'kernel-2_6_7-1_planetlab'.
struct pci_dev *pdev;
...
- if (!pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
+ if (pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
card->playback_enabled = 1;
} else {
card->playback_enabled = 0;
printk(KERN_WARN "%s: Playback disabled due to DMA limitations.\n",
card->name);
}
- if (!pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
+ if (pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
card->record_enabled = 1;
} else {
card->record_enabled = 0;
modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
preaction=<preaction type> preop=<preop type> start_now=x
- nowayout=x
The timeout is the number of seconds to the action, and the pretimeout
is the amount of seconds before the reset that the pre-timeout panic will
If start_now is set to 1, the watchdog timer will start running as
soon as the driver is loaded.
-If nowayout is set to 1, the watchdog timer will not stop when the
-watchdog device is closed. The default value of nowayout is true
-if the CONFIG_WATCHDOG_NOWAYOUT option is enabled, or false if not.
-
When compiled into the kernel, the kernel command line is available
for configuring the watchdog:
ipmi_watchdog.preaction=<preaction type>
ipmi_watchdog.preop=<preop type>
ipmi_watchdog.start_now=x
- ipmi_watchdog.nowayout=x
The options are the same as the module parameter options.
10/03/2003
Revised Feb 12, 2004 by Martine Silbermann
email: Martine.Silbermann@hp.com
- Revised Jun 25, 2004 by Tom L Nguyen
1. About this guide
-This guide describes the basics of Message Signaled Interrupts (MSI),
-the advantages of using MSI over traditional interrupt mechanisms,
-and how to enable your driver to use MSI or MSI-X. Also included is
-a Frequently Asked Questions.
+This guide describes the basics of Message Signaled Interrupts(MSI), the
+advantages of using MSI over traditional interrupt mechanisms, and how
+to enable your driver to use MSI or MSI-X. Also included is a Frequently
+Asked Questions.
2. Copyright 2003 Intel Corporation
the MSI/MSI-X capability structure in its PCI capability list. The
device function may implement both the MSI capability structure and
the MSI-X capability structure; however, the bus driver should not
-enable both.
+enable both, but instead enable only the MSI-X capability structure.
The MSI capability structure contains Message Control register,
Message Address register and Message Data register. These registers
support for better interrupt performance.
Using MSI enables the device functions to support two or more
-vectors, which can be configured to target different CPU's to
+vectors, which can be configure to target different CPU's to
increase scalability.
5. Configuring a driver to use MSI/MSI-X
By default, the kernel will not enable MSI/MSI-X on all devices that
-support this capability. The CONFIG_PCI_MSI kernel option
+support this capability. The CONFIG_PCI_USE_VECTOR kernel option
must be selected to enable MSI/MSI-X support.
-5.1 Including MSI/MSI-X support into the kernel
+5.1 Including MSI support into the kernel
-To allow MSI/MSI-X capable device drivers to selectively enable
-MSI/MSI-X (using pci_enable_msi()/pci_enable_msix() as described
-below), the VECTOR based scheme needs to be enabled by setting
-CONFIG_PCI_MSI during kernel config.
+To allow MSI-Capable device drivers to selectively enable MSI (using
+pci_enable_msi as described below), the VECTOR based scheme needs to
+be enabled by setting CONFIG_PCI_USE_VECTOR.
Since the target of the inbound message is the local APIC, providing
-CONFIG_X86_LOCAL_APIC must be enabled as well as CONFIG_PCI_MSI.
+CONFIG_PCI_USE_VECTOR is dependent on whether CONFIG_X86_LOCAL_APIC
+is enabled or not.
-5.2 Configuring for MSI support
-
-Due to the non-contiguous fashion in vector assignment of the
-existing Linux kernel, this version does not support multiple
-messages regardless of a device function is capable of supporting
-more than one vector. To enable MSI on a device function's MSI
-capability structure requires a device driver to call the function
-pci_enable_msi() explicitly.
-
-5.2.1 API pci_enable_msi
-
-int pci_enable_msi(struct pci_dev *dev)
+int pci_enable_msi(struct pci_dev *)
With this new API, any existing device driver, which like to have
-MSI enabled on its device function, must call this API to enable MSI
-A successful call will initialize the MSI capability structure
-with ONE vector, regardless of whether a device function is
+MSI enabled on its device function, must call this explicitly. A
+successful call will initialize the MSI/MSI-X capability structure
+with ONE vector, regardless of whether the device function is
capable of supporting multiple messages. This vector replaces the
pre-assigned dev->irq with a new MSI vector. To avoid the conflict
of new assigned vector with existing pre-assigned vector requires
-a device driver to call this API before calling request_irq().
-
-5.2.2 API pci_disable_msi
-
-void pci_disable_msi(struct pci_dev *dev)
-
-This API should always be used to undo the effect of pci_enable_msi()
-when a device driver is unloading. This API restores dev->irq with
-the pre-assigned IOAPIC vector and switches a device's interrupt
-mode to PCI pin-irq assertion/INTx emulation mode.
-
-Note that a device driver should always call free_irq() on MSI vector
-it has done request_irq() on before calling this API. Failure to do
-so results a BUG_ON() and a device will be left with MSI enabled and
-leaks its vector.
-
-5.2.3 MSI mode vs. legacy mode diagram
+the device driver to call this API before calling request_irq(...).
The below diagram shows the events, which switches the interrupt
mode on the MSI-capable device function between MSI mode and
| | <=============== | |
| MSI MODE | | PIN-IRQ ASSERTION MODE |
| | ===============> | |
- ------------ pci_disable_msi ------------------------
-
-
-Figure 1.0 MSI Mode vs. Legacy Mode
+ ------------ free_irq ------------------------
-In Figure 1.0, a device operates by default in legacy mode. Legacy
-in this context means PCI pin-irq assertion or PCI-Express INTx
-emulation. A successful MSI request (using pci_enable_msi()) switches
-a device's interrupt mode to MSI mode. A pre-assigned IOAPIC vector
-stored in dev->irq will be saved by the PCI subsystem and a new
-assigned MSI vector will replace dev->irq.
-
-To return back to its default mode, a device driver should always call
-pci_disable_msi() to undo the effect of pci_enable_msi(). Note that a
-device driver should always call free_irq() on MSI vector it has done
-request_irq() on before calling pci_disable_msi(). Failure to do so
-results a BUG_ON() and a device will be left with MSI enabled and
-leaks its vector. Otherwise, the PCI subsystem restores a device's
-dev->irq with a pre-assigned IOAPIC vector and marks released
-MSI vector as unused.
-
-Once being marked as unused, there is no guarantee that the PCI
-subsystem will reserve this MSI vector for a device. Depending on
-the availability of current PCI vector resources and the number of
-MSI/MSI-X requests from other drivers, this MSI may be re-assigned.
+5.2 Configuring for MSI support
-For the case where the PCI subsystem re-assigned this MSI vector
-another driver, a request to switching back to MSI mode may result
-in being assigned a different MSI vector or a failure if no more
-vectors are available.
+Due to the non-contiguous fashion in vector assignment of the
+existing Linux kernel, this version does not support multiple
+messages regardless of the device function is capable of supporting
+more than one vector. The bus driver initializes only entry 0 of
+this capability if pci_enable_msi(...) is called successfully by
+the device driver.
5.3 Configuring for MSI-X support
-Due to the ability of the system software to configure each vector of
-the MSI-X capability structure with an independent message address
-and message data, the non-contiguous fashion in vector assignment of
-the existing Linux kernel has no impact on supporting multiple
-messages on an MSI-X capable device functions. To enable MSI-X on
-a device function's MSI-X capability structure requires its device
-driver to call the function pci_enable_msix() explicitly.
-
-The function pci_enable_msix(), once invoked, enables either
-all or nothing, depending on the current availability of PCI vector
-resources. If the PCI vector resources are available for the number
-of vectors requested by a device driver, this function will configure
-the MSI-X table of the MSI-X capability structure of a device with
-requested messages. To emphasize this reason, for example, a device
-may be capable for supporting the maximum of 32 vectors while its
-software driver usually may request 4 vectors. It is recommended
-that the device driver should call this function once during the
+Both the MSI capability structure and the MSI-X capability structure
+share the same above semantics; however, due to the ability of the
+system software to configure each vector of the MSI-X capability
+structure with an independent message address and message data, the
+non-contiguous fashion in vector assignment of the existing Linux
+kernel has no impact on supporting multiple messages on an MSI-X
+capable device functions. By default, as mentioned above, ONE vector
+should be always allocated to the MSI-X capability structure at
+entry 0. The bus driver does not initialize other entries of the
+MSI-X table.
+
+Note that the PCI subsystem should have full control of a MSI-X
+table that resides in Memory Space. The software device driver
+should not access this table.
+
+To request for additional vectors, the device software driver should
+call function msi_alloc_vectors(). It is recommended that the
+software driver should call this function once during the
initialization phase of the device driver.
-Unlike the function pci_enable_msi(), the function pci_enable_msix()
-does not replace the pre-assigned IOAPIC dev->irq with a new MSI
-vector because the PCI subsystem writes the 1:1 vector-to-entry mapping
-into the field vector of each element contained in a second argument.
-Note that the pre-assigned IO-APIC dev->irq is valid only if the device
-operates in PIN-IRQ assertion mode. In MSI-X mode, any attempt of
-using dev->irq by the device driver to request for interrupt service
-may result unpredictabe behavior.
-
-For each MSI-X vector granted, a device driver is responsible to call
-other functions like request_irq(), enable_irq(), etc. to enable
-this vector with its corresponding interrupt service handler. It is
-a device driver's choice to assign all vectors with the same
-interrupt service handler or each vector with a unique interrupt
-service handler.
-
-5.3.1 Handling MMIO address space of MSI-X Table
-
-The PCI 3.0 specification has implementation notes that MMIO address
-space for a device's MSI-X structure should be isolated so that the
-software system can set different page for controlling accesses to
-the MSI-X structure. The implementation of MSI patch requires the PCI
-subsystem, not a device driver, to maintain full control of the MSI-X
-table/MSI-X PBA and MMIO address space of the MSI-X table/MSI-X PBA.
-A device driver is prohibited from requesting the MMIO address space
-of the MSI-X table/MSI-X PBA. Otherwise, the PCI subsystem will fail
-enabling MSI-X on its hardware device when it calls the function
-pci_enable_msix().
-
-5.3.2 Handling MSI-X allocation
-
-Determining the number of MSI-X vectors allocated to a function is
-dependent on the number of MSI capable devices and MSI-X capable
-devices populated in the system. The policy of allocating MSI-X
-vectors to a function is defined as the following:
-
-#of MSI-X vectors allocated to a function = (x - y)/z where
-
-x = The number of available PCI vector resources by the time
- the device driver calls pci_enable_msix(). The PCI vector
- resources is the sum of the number of unassigned vectors
- (new) and the number of released vectors when any MSI/MSI-X
- device driver switches its hardware device back to a legacy
- mode or is hot-removed. The number of unassigned vectors
- may exclude some vectors reserved, as defined in parameter
- NR_HP_RESERVED_VECTORS, for the case where the system is
- capable of supporting hot-add/hot-remove operations. Users
- may change the value defined in NR_HR_RESERVED_VECTORS to
- meet their specific needs.
-
-y = The number of MSI capable devices populated in the system.
- This policy ensures that each MSI capable device has its
- vector reserved to avoid the case where some MSI-X capable
- drivers may attempt to claim all available vector resources.
-
-z = The number of MSI-X capable devices pupulated in the system.
- This policy ensures that maximum (x - y) is distributed
- evenly among MSI-X capable devices.
-
-Note that the PCI subsystem scans y and z during a bus enumeration.
-When the PCI subsystem completes configuring MSI/MSI-X capability
-structure of a device as requested by its device driver, y/z is
-decremented accordingly.
-
-5.3.3 Handling MSI-X shortages
-
-For the case where fewer MSI-X vectors are allocated to a function
-than requested, the function pci_enable_msix() will return the
-maximum number of MSI-X vectors available to the caller. A device
-driver may re-send its request with fewer or equal vectors indicated
-in a return. For example, if a device driver requests 5 vectors, but
-the number of available vectors is 3 vectors, a value of 3 will be a
-return as a result of pci_enable_msix() call. A function could be
-designed for its driver to use only 3 MSI-X table entries as
-different combinations as ABC--, A-B-C, A--CB, etc. Note that this
-patch does not support multiple entries with the same vector. Such
-attempt by a device driver to use 5 MSI-X table entries with 3 vectors
-as ABBCC, AABCC, BCCBA, etc will result as a failure by the function
-pci_enable_msix(). Below are the reasons why supporting multiple
-entries with the same vector is an undesirable solution.
-
- - The PCI subsystem can not determine which entry, which
- generated the message, to mask/unmask MSI while handling
- software driver ISR. Attempting to walk through all MSI-X
- table entries (2048 max) to mask/unmask any match vector
- is an undesirable solution.
-
- - Walk through all MSI-X table entries (2048 max) to handle
- SMP affinity of any match vector is an undesirable solution.
-
-5.3.4 API pci_enable_msix
-
-int pci_enable_msix(struct pci_dev *dev, u32 *entries, int nvec)
-
-This API enables a device driver to request the PCI subsystem
-for enabling MSI-X messages on its hardware device. Depending on
-the availability of PCI vectors resources, the PCI subsystem enables
-either all or nothing.
+The function msi_alloc_vectors(), once invoked, enables either
+all or nothing, depending on the current availability of vector
+resources. If no vector resources are available, the device function
+still works with ONE vector. If the vector resources are available
+for the number of vectors requested by the driver, this function
+will reconfigure the MSI-X capability structure of the device with
+additional messages, starting from entry 1. To emphasize this
+reason, for example, the device may be capable for supporting the
+maximum of 32 vectors while its software driver usually may request
+4 vectors.
+
+For each vector, after this successful call, the device driver is
+responsible to call other functions like request_irq(), enable_irq(),
+etc. to enable this vector with its corresponding interrupt service
+handler. It is the device driver's choice to have all vectors shared
+the same interrupt service handler or each vector with a unique
+interrupt service handler.
+
+In addition to the function msi_alloc_vectors(), another function
+msi_free_vectors() is provided to allow the software driver to
+release a number of vectors back to the vector resources. Once
+invoked, the PCI subsystem disables (masks) each vector released.
+These vectors are no longer valid for the hardware device and its
+software driver to use. Like free_irq, it recommends that the
+device driver should also call msi_free_vectors to release all
+additional vectors previously requested.
+
+int msi_alloc_vectors(struct pci_dev *dev, int *vector, int nvec)
+
+This API enables the software driver to request the PCI subsystem
+for additional messages. Depending on the number of vectors
+available, the PCI subsystem enables either all or nothing.
Argument dev points to the device (pci_dev) structure.
-
-Argument entries is a pointer of unsigned integer type. The number of
-elements is indicated in argument nvec. The content of each element
-will be mapped to the following struct defined in /driver/pci/msi.h.
-
-struct msix_entry {
- u16 vector; /* kernel uses to write alloc vector */
- u16 entry; /* driver uses to specify entry */
-};
-
-A device driver is responsible for initializing the field entry of
-each element with unique entry supported by MSI-X table. Otherwise,
--EINVAL will be returned as a result. A successful return of zero
-indicates the PCI subsystem completes initializing each of requested
-entries of the MSI-X table with message address and message data.
-Last but not least, the PCI subsystem will write the 1:1
-vector-to-entry mapping into the field vector of each element. A
-device driver is responsible of keeping track of allocated MSI-X
-vectors in its internal data structure.
-
+Argument vector is a pointer of integer type. The number of
+elements is indicated in argument nvec.
Argument nvec is an integer indicating the number of messages
requested.
+A return of zero indicates that the number of allocated vector is
+successfully allocated. Otherwise, indicate resources not
+available.
-A return of zero indicates that the number of MSI-X vectors is
-successfully allocated. A return of greater than zero indicates
-MSI-X vector shortage. Or a return of less than zero indicates
-a failure. This failure may be a result of duplicate entries
-specified in second argument, or a result of no available vector,
-or a result of failing to initialize MSI-X table entries.
-
-5.3.5 API pci_disable_msix
-
-void pci_disable_msix(struct pci_dev *dev)
+int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec)
-This API should always be used to undo the effect of pci_enable_msix()
-when a device driver is unloading. Note that a device driver should
-always call free_irq() on all MSI-X vectors it has done request_irq()
-on before calling this API. Failure to do so results a BUG_ON() and
-a device will be left with MSI-X enabled and leaks its vectors.
+This API enables the software driver to inform the PCI subsystem
+that it is willing to release a number of vectors back to the
+MSI resource pool. Once invoked, the PCI subsystem disables each
+MSI-X entry associated with each vector stored in the argument 2.
+These vectors are no longer valid for the hardware device and
+its software driver to use.
-5.3.6 MSI-X mode vs. legacy mode diagram
+Argument dev points to the device (pci_dev) structure.
+Argument vector is a pointer of integer type. The number of
+elements is indicated in argument nvec.
+Argument nvec is an integer indicating the number of messages
+released.
+A return of zero indicates that the number of allocated vectors
+is successfully released. Otherwise, indicates a failure.
-The below diagram shows the events, which switches the interrupt
-mode on the MSI-X capable device function between MSI-X mode and
-PIN-IRQ assertion mode (legacy).
-
- ------------ pci_enable_msix(,,n) ------------------------
- | | <=============== | |
- | MSI-X MODE | | PIN-IRQ ASSERTION MODE |
- | | ===============> | |
- ------------ pci_disable_msix ------------------------
-
-Figure 2.0 MSI-X Mode vs. Legacy Mode
-
-In Figure 2.0, a device operates by default in legacy mode. A
-successful MSI-X request (using pci_enable_msix()) switches a
-device's interrupt mode to MSI-X mode. A pre-assigned IOAPIC vector
-stored in dev->irq will be saved by the PCI subsystem; however,
-unlike MSI mode, the PCI subsystem will not replace dev->irq with
-assigned MSI-X vector because the PCI subsystem already writes the 1:1
-vector-to-entry mapping into the field vector of each element
-specified in second argument.
-
-To return back to its default mode, a device driver should always call
-pci_disable_msix() to undo the effect of pci_enable_msix(). Note that
-a device driver should always call free_irq() on all MSI-X vectors it
-has done request_irq() on before calling pci_disable_msix(). Failure
-to do so results a BUG_ON() and a device will be left with MSI-X
-enabled and leaks its vectors. Otherwise, the PCI subsystem switches a
-device function's interrupt mode from MSI-X mode to legacy mode and
-marks all allocated MSI-X vectors as unused.
-
-Once being marked as unused, there is no guarantee that the PCI
-subsystem will reserve these MSI-X vectors for a device. Depending on
-the availability of current PCI vector resources and the number of
-MSI/MSI-X requests from other drivers, these MSI-X vectors may be
-re-assigned.
-
-For the case where the PCI subsystem re-assigned these MSI-X vectors
-to other driver, a request to switching back to MSI-X mode may result
-being assigned with another set of MSI-X vectors or a failure if no
-more vectors are available.
-
-5.4 Handling function implementng both MSI and MSI-X capabilities
-
-For the case where a function implements both MSI and MSI-X
-capabilities, the PCI subsystem enables a device to run either in MSI
-mode or MSI-X mode but not both. A device driver determines whether it
-wants MSI or MSI-X enabled on its hardware device. Once a device
-driver requests for MSI, for example, it is prohibited to request for
-MSI-X; in other words, a device driver is not permitted to ping-pong
-between MSI mod MSI-X mode during a run-time.
-
-5.5 Hardware requirements for MSI/MSI-X support
-MSI/MSI-X support requires support from both system hardware and
+5.4 Hardware requirements for MSI support
+MSI support requires support from both system hardware and
individual hardware device functions.
-5.5.1 System hardware support
+5.4.1 System hardware support
Since the target of MSI address is the local APIC CPU, enabling
-MSI/MSI-X support in Linux kernel is dependent on whether existing
+MSI support in Linux kernel is dependent on whether existing
system hardware supports local APIC. Users should verify their
system whether it runs when CONFIG_X86_LOCAL_APIC=y.
In SMP environment, CONFIG_X86_LOCAL_APIC is automatically set;
however, in UP environment, users must manually set
CONFIG_X86_LOCAL_APIC. Once CONFIG_X86_LOCAL_APIC=y, setting
-CONFIG_PCI_MSI enables the VECTOR based scheme and
+CONFIG_PCI_USE_VECTOR enables the VECTOR based scheme and
the option for MSI-capable device drivers to selectively enable
-MSI/MSI-X.
+MSI (using pci_enable_msi as described below).
-Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI/MSI-X
-vector is allocated new during runtime and MSI/MSI-X support does not
-depend on BIOS support. This key independency enables MSI/MSI-X
-support on future IOxAPIC free platform.
+Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI
+vector is allocated new during runtime and MSI support does not
+depend on BIOS support. This key independency enables MSI support
+on future IOxAPIC free platform.
-5.5.2 Device hardware support
+5.4.2 Device hardware support
The hardware device function supports MSI by indicating the
MSI/MSI-X capability structure on its PCI capability list. By
default, this capability structure will not be initialized by
MSI-capable hardware is responsible for whether calling
pci_enable_msi or not. A return of zero indicates the kernel
successfully initializes the MSI/MSI-X capability structure of the
-device funtion. The device function is now running on MSI/MSI-X mode.
+device funtion. The device function is now running on MSI mode.
-5.6 How to tell whether MSI/MSI-X is enabled on device function
+5.5 How to tell whether MSI is enabled on device function
-At the driver level, a return of zero from the function call of
-pci_enable_msi()/pci_enable_msix() indicates to a device driver that
-its device function is initialized successfully and ready to run in
-MSI/MSI-X mode.
+At the driver level, a return of zero from pci_enable_msi(...)
+indicates to the device driver that its device function is
+initialized successfully and ready to run in MSI mode.
At the user level, users can use command 'cat /proc/interrupts'
-to display the vector allocated for a device and its interrupt
-MSI/MSI-X mode ("PCI MSI"/"PCI MSIX"). Below shows below MSI mode is
-enabled on a SCSI Adaptec 39320D Ultra320.
+to display the vector allocated for the device and its interrupt
+mode, as shown below.
CPU0 CPU1
0: 324639 0 IO-APIC-edge timer
option to the kernel via the tagged lists specifying the port, and
serial format options as described in
- Documentation/kernel-parameters.txt.
+ linux/Documentation/kernel-parameters.txt.
3. Detect the machine type
$Id: README.aztcd,v 2.60 1997/11/29 09:51:25 root Exp root $
- Readme-File Documentation/cdrom/aztcd
+ Readme-File /usr/src/Documentation/cdrom/aztcd
for
AZTECH CD-ROM CDA268-01A, ORCHID CD-3110,
OKANO/WEARNES CDD110, CONRAD TXC, CyCDROM CR520, CR540
A reworked and improved version called 'cdtester.c', which has yet more
features for testing CDROM-drives can be found in
-Documentation/cdrom/sbpcd, written by E.Moenkeberg.
+/usr/src/linux/Documentation/cdrom/sbpcd, written by E.Moenkeberg.
Werner Zimmermann
Fachhochschule fuer Technik Esslingen
+++ /dev/null
-CKRM I/O controller
-
-Last updated: Sep 21, 2004
-
-
-Intro
------
-
-CKRM's I/O scheduler is developed as a delta over a modified version of
-the Complete Fair Queuing scheduler (CFQ) that implements I/O priorities.
-The latter's original posting can be found at:
- http://www.ussg.iu.edu/hypermail/linux/kernel/0311.1/0019.html
-
-Please note that this is not the CFQ version currently in the linus kernel
-(2.6.8.1 at time of writing) which provides equal, not prioritized,
-bandwidth allocation amongst processes. Since the CFQ in the kernel is likely
-to eventually move towards I/O priority implementation, CKRM has not renamed
-the underlying I/O scheduler and simply replaces drivers/block/cfq-iosched.c
-with the modified version.
-
-Installation
-------------
-
-1. Configure "Disk I/O Resource Controller" under CKRM (see
-Documentation/ckrm/installation)
-
-2. After booting into the new kernel, load ckrm-io
- # modprobe ckrm-io
-
-3. Verify that reading /rcfs/taskclass/shares displays values for the
-I/O controller (res=cki).
-
-4. Mount sysfs for monitoring bandwidth received (temporary solution till
-a userlevel tool is developed)
- # mount -t sysfs none /sys
-
-
-Usage
------
-
-For brevity, we assume we are in the /rcfs/taskclass directory for all the
-code snippets below.
-
-Initially, the systemwide default class gets 100% of the I/O bandwidth.
-
- $ cat stats
-
- <display from other controllers, snipped>
- 20 total ioprio
- 20 unused/default ioprio
-
-The first value is the share of a class, as a parent. The second is the share
-of its default subclass. Initially the two are equal. As named subclasses get
-created and assigned shares, the default subclass' share (which equals the
-"unused" portion of the parent's allocation) dwindles.
-
-
-CFQ assigns one of 20 I/O priorities to all I/O requests. Each priority level
-gets a fixed proportion of the total bandwidth in increments of 5%. e.g.
- ioprio=1 gets 5%,
- ioprio=2 gets 10%.....
- all the way through ioprio=19 getting 95%
-
-ioprio=0 gets bandwidth only if no other priority level submits I/O i.e. it can
-get starved.
-ioprio=20 is considered realtime I/O and always gets priority.
-
-CKRM's I/O scheduler distributes these 20 priority levels amongst the hierarchy
-of classes according to the relative share of each class. Thus, root starts out
-with the total allocation of 20 initially. As children get created and shares
-assigned to them, root's allocation reduces. At any time, the sum of absolute
-share values of all classes equals 20.
-
-
-
-Class creation
---------------
-
- $ mkdir a
-
-Its initial share is zero. The parent's share values will be unchanged. Note
-that even classes with zero share get unused bandwidth under CFQ.
-
-Setting a new class share
--------------------------
-
- $ echo "res=cki,guarantee=20" > /rcfs/taskclass/a/shares
- Set cki shares to 20 -1 -1 -1
-
- $ echo a/shares
-
- res=cki,guarantee=20,limit=100,total_guarantee=100,max_limit=100
-
-The limit and max_limit fields can be ignored as they are not implemented.
-The absolute share of a is 20% of parent's absolute total (20) and can be seen
-through
- $ echo a/stats
-
- <snip>
- 4 total ioprio
- 4 unused/default ioprio
-
-Since a gets 4, parent's default's share diminishes accordingly. Thus
-
- $ echo stats
-
- <snip>
- 20 total ioprio
- 16 unused/default ioprio
-
-
-Monitoring
-----------
-
-Each priority level's request service rate can be viewed through sysfs (mounted
-during installation). To view the servicing of priority 4's requests,
-
- $ while : ; echo /sys/block/<device>/queue/iosched/p4 ; sleep 1 ; done
- rq (10,15) sec (20,30) q (40,50)
-
- <data above updated in a loop>
-
-where
- rq = cumulative I/O requests received (10) and serviced (15)
- sec = cumulative sectors requested (20) and served (30)
- q = cumulative number of times the queue was created(40)/destroyed (50)
-
-The rate at which requests or sectors are serviced should differ for different
-priority levels. The difference in received and serviced values indicates queue
-depth - with insufficient depth, differentiation between I/O priority levels
-will not be observed.
-
-The rate of q creation is not significant for CKRM.
-
-
-Caveats
--------
-
-CFQ's I/O differentiation is still being worked upon so its better to choose
-widely separated share values to observe differences in delivered I/O
-bandwidth.
-
-CFQ, and consequently CKRM, does not provide limits yet. So it is not possible
-to completely limit an I/O hog process by putting it in a class with a low I/O
-share. Only if the competing classes maintain sufficient queue depth (i.e a
-high I/O issue rate) will they get preferential treatment. However, they may
-still see latency degradation due to seeks caused by servicing of the low
-priority class.
-
-When limits are implemented, this behaviour will be rectified.
-
-Please post questions on the CKRM I/O scheduler on ckrm-tech@lists.sf.net.
-
-
+++ /dev/null
-CKRM Basics
--------------
-A brief review of CKRM concepts and terminology will help make installation
-and testing easier. For more details, please visit http://ckrm.sf.net.
-
-Currently there are two class types, taskclass and socketclass for grouping,
-regulating and monitoring tasks and sockets respectively.
-
-To avoid repeating instructions for each classtype, this document assumes a
-task to be the kernel object being grouped. By and large, one can replace task
-with socket and taskclass with socketclass.
-
-RCFS depicts a CKRM class as a directory. Hierarchy of classes can be
-created in which children of a class share resources allotted to
-the parent. Tasks can be classified to any class which is at any level.
-There is no correlation between parent-child relationship of tasks and
-the parent-child relationship of classes they belong to.
-
-Without a Classification Engine, class is inherited by a task. A privileged
-user can reassigned a task to a class as described below, after which all
-the child tasks under that task will be assigned to that class, unless the
-user reassigns any of them.
-
-A Classification Engine, if one exists, will be used by CKRM to
-classify a task to a class. The Rule based classification engine uses some
-of the attributes of the task to classify a task. When a CE is present
-class is not inherited by a task.
-
-Characteristics of a class can be accessed/changed through the following magic
-files under the directory representing the class:
-
-shares: allows to change the shares of different resources managed by the
- class
-stats: allows to see the statistics associated with each resources managed
- by the class
-target: allows to assign a task to a class. If a CE is present, assigning
- a task to a class through this interface will prevent CE from
- reassigning the task to any class during reclassification.
-members: allows to see which tasks has been assigned to a class
-config: allow to view and modify configuration information of different
- resources in a class.
-
-Resource allocations for a class is controlled by the parameters:
-
-guarantee: specifies how much of a resource is guranteed to a class. A
- special value DONT_CARE(-2) mean that there is no specific
- guarantee of a resource is specified, this class may not get
- any resource if the system is runing short of resources
-limit: specifies the maximum amount of resource that is allowed to be
- allocated by a class. A special value DONT_CARE(-2) mean that
- there is no specific limit is specified, this class can get all
- the resources available.
-total_guarantee: total guarantee that is allowed among the children of this
- class. In other words, the sum of "guarantee"s of all children
- of this class cannot exit this number.
-max_limit: Maximum "limit" allowed for any of this class's children. In
- other words, "limit" of any children of this class cannot exceed
- this value.
-
-None of this parameters are absolute or have any units associated with
-them. These are just numbers(that are relative to its parents') that are
-used to calculate the absolute number of resource available for a specific
-class.
-
-Note: The root class has an absolute number of resource units associated with it.
-
+++ /dev/null
-Usage of CKRM without a classification engine
------------------------------------------------
-
-1. Create a class
-
- # mkdir /rcfs/taskclass/c1
- creates a taskclass named c1 , while
- # mkdir /rcfs/socket_class/s1
- creates a socketclass named s1
-
-The newly created class directory is automatically populated by magic files
-shares, stats, members, target and config.
-
-2. View default shares
-
- # cat /rcfs/taskclass/c1/shares
-
- "guarantee=-2,limit=-2,total_guarantee=100,max_limit=100" is the default
- value set for resources that have controllers registered with CKRM.
-
-3. change shares of a <class>
-
- One or more of the following fields can/must be specified
- res=<res_name> #mandatory
- guarantee=<number>
- limit=<number>
- total_guarantee=<number>
- max_limit=<number>
- e.g.
- # echo "res=numtasks,limit=20" > /rcfs/taskclass/c1
-
- If any of these parameters are not specified, the current value will be
- retained.
-
-4. Reclassify a task (listening socket)
-
- write the pid of the process to the destination class' target file
- # echo 1004 > /rcfs/taskclass/c1/target
-
- write the "<ipaddress>\<port>" string to the destination class' target file
- # echo "0.0.0.0\32770" > /rcfs/taskclass/c1/target
-
-5. Get a list of tasks (sockets) assigned to a taskclass (socketclass)
-
- # cat /rcfs/taskclass/c1/members
- lists pids of tasks belonging to c1
-
- # cat /rcfs/socket_class/s1/members
- lists the ipaddress\port of all listening sockets in s1
-
-6. Get the statictics of different resources of a class
-
- # cat /rcfs/tasksclass/c1/stats
- shows c1's statistics for each resource with a registered resource
- controller.
-
- # cat /rcfs/socket_class/s1/stats
- show's s1's stats for the listenaq controller.
-
-7. View the configuration values of the resources associated with a class
-
- # cat /rcfs/taskclass/c1/config
- shows per-controller config values for c1.
-
-8. Change the configuration values of resources associated with a class
- Configuration values are different for different resources. the comman
- field "res=<resname>" must always be specified.
-
- # echo "res=numtasks,parameter=value" > /rcfs/taskclass/c1/config
- to change (without any effect), the value associated with <parameter>.
-
-
+++ /dev/null
-CRBCE
-----------
-
-crbce is a superset of rbce. In addition to providing automatic
-classification, the crbce module
-- monitors per-process delay data that is collected by the delay
-accounting patch
-- collects data on significant kernel events where reclassification
-could occur e.g. fork/exec/setuid/setgid etc., and
-- uses relayfs to supply both these datapoints to userspace
-
-To illustrate the utility of the data gathered by crbce, we provide a
-userspace daemon called crbcedmn that prints the header info received
-from the records sent by the crbce module.
-
-0. Ensure that a CKRM-enabled kernel with following options configured
- has been compiled. At a minimum, core, rcfs, atleast one classtype,
- delay-accounting patch and relayfs. For testing, it is recommended
- all classtypes and resource controllers be compiled as modules.
-
-1. Ensure that the Makefile's BUILD_CRBCE=1 and KDIR points to the
- kernel of step 1 and call make.
- This also builds the userspace daemon, crbcedmn.
-
-2..9 Same as rbce installation and testing instructions,
- except replacing rbce.ko with crbce.ko
-
-10. Read the pseudo daemon help file
- # ./crbcedmn -h
-
-11. Run the crbcedmn to display all records being processed
- # ./crbcedmn
-
+++ /dev/null
-Kernel installation
-------------------------------
-
-<kernver> = version of mainline Linux kernel
-<ckrmver> = version of CKRM
-
-Note: It is expected that CKRM versions will change fairly rapidly. Hence once
-a CKRM version has been released for some <kernver>, it will only be made
-available for future <kernver>'s until the next CKRM version is released.
-
-1. Patch
-
- Apply ckrm/kernel/<kernver>/ckrm-<ckrmversion>.patch to a mainline kernel
- tree with version <kernver>.
-
- If CRBCE will be used, additionally apply the following patches, in order:
- delayacctg-<ckrmversion>.patch
- relayfs-<ckrmversion>.patch
-
-
-2. Configure
-
-Select appropriate configuration options:
-
-a. for taskclasses
-
- General Setup-->Class Based Kernel Resource Management
-
- [*] Class Based Kernel Resource Management
- <M> Resource Class File System (User API)
- [*] Class Manager for Task Groups
- <M> Number of Tasks Resource Manager
-
-b. To test socket_classes and multiple accept queue controller
-
- General Setup-->Class Based Kernel Resource Management
- [*] Class Based Kernel Resource Management
- <M> Resource Class File System (User API)
- [*] Class Manager for socket groups
- <M> Multiple Accept Queues Resource Manager
-
- Device Drivers-->Networking Support-->Networking options-->
- [*] Network packet filtering (replaces ipchains)
- [*] IP: TCP Multiple accept queues support
-
-c. To test CRBCE later (requires 2a.)
-
- File Systems-->Pseudo filesystems-->
- <M> Relayfs filesystem support
- (enable all sub fields)
-
- General Setup-->
- [*] Enable delay accounting
-
-
-3. Build, boot into kernel
-
-4. Enable rcfs
-
- # insmod <patchedtree>/fs/rcfs/rcfs.ko
- # mount -t rcfs rcfs /rcfs
-
- This will create the directories /rcfs/taskclass and
- /rcfs/socketclass which are the "roots" of subtrees for creating
- taskclasses and socketclasses respectively.
-
-5. Load numtasks and listenaq controllers
-
- # insmod <patchedtree>/kernel/ckrm/ckrm_tasks.ko
- # insmod <patchedtree>/kernel/ckrm/ckrm_listenaq.ko
+++ /dev/null
-0. Lifecycle of a LRU Page:
-----------------------------
-These are the events in a page's lifecycle:
- - allocation of the page
- there are multiple high level page alloc functions; __alloc_pages()
- is the lowest level function that does the real allocation.
- - get into LRU list (active list or inactive list)
- - get out of LRU list
- - freeing the page
- there are multiple high level page free functions; free_pages_bulk()
- is the lowest level function that does the real free.
-
-When the memory subsystem runs low on LRU pages, pages are reclaimed by
- - moving pages from active list to inactive list (refill_inactive_zone())
- - freeing pages from the inactive list (shrink_zone)
-depending on the recent usage of the page(approximately).
-
-1. Introduction
----------------
-Memory resource controller controls the number of lru physical pages
-(active and inactive list) a class uses. It does not restrict any
-other physical pages (slabs etc.,)
-
-For simplicity, this document will always refer lru physical pages as
-physical pages or simply pages.
-
-There are two parameters(that are set by the user) that affect the number
-of pages a class is allowed to have in active/inactive list.
-They are
- - guarantee - specifies the number of pages a class is
- guaranteed to get. In other words, if a class is using less than
- 'guarantee' number of pages, its pages will not be freed when the
- memory subsystem tries to free some pages.
- - limit - specifies the maximum number of pages a class can get;
- 'limit' in essence can be considered as the 'hard limit'
-
-Rest of this document details how these two parameters are used in the
-memory allocation logic.
-
-Note that the numbers that are specified in the shares file, doesn't
-directly correspond to the number of pages. But, the user can make
-it so by making the total_guarantee and max_limit of the default class
-(/rcfs/taskclass) to be the total number of pages(given in config file)
-available in the system.
-
- for example:
- # cd /rcfs/taskclass
- # cat config
- res=mem;tot_pages=239778,active=60473,inactive=135285,free=44555
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=100,max_limit=100
-
- "tot_pages=239778" above mean there are 239778 lru pages in
- the system.
-
- By making total_guarantee and max_limit to be same as this number at
- this level (/rcfs/taskclass), one can make guarantee and limit in all
- classes refer to the number of pages.
-
- # echo 'res=mem,total_guarantee=239778,max_limit=239778' > shares
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=239778,max_limit=239778
-
-
-The number of pages a class can use be anywhere between its guarantee and
-limit. CKRM memory controller springs into action when the system needs
-to choose a victim page to swap out. While the number of pages a class can
-have allocated may be anywhere between its guarantee and limit, victim
-pages will be choosen from classes that are above their guarantee.
-
-Pages will be freed from classes that are close to their "limit" before
-freeing pages from the classes that are close to their guarantee. Pages
-belonging to classes that are below their guarantee will not be chosen as
-a victim.
-
-2. Core Design
---------------------------
-
-CKRM memory resource controller taps at appropriate low level memory
-management functions to associate a page with a class and to charge
-a class that brings the page to the LRU list.
-
-2.1 Changes in page allocation function(__alloc_pages())
---------------------------------------------------------
-- If the class that the current task belong to is over 110% of its 'limit',
- allocation of page(s) fail.
-- After succesful allocation of a page, the page is attached with the class
- to which the current task belongs to.
-- Note that the class is _not_ charged for the page(s) here.
-
-2.2 Changes in page free(free_pages_bulk())
--------------------------------------------
-- page is freed from the class it belongs to.
-
-2.3 Adding/Deleting page to active/inactive list
--------------------------------------------------
-When a page is added to the active or inactive list, the class that the
-page belongs to is charged for the page usage.
-
-When a page is deleted from the active or inactive list, the class that the
-page belongs to is credited back.
-
-If a class uses upto its limit, attempt is made to shrink the class's usage
-to 90% of its limit, in order to help the class stay within its limit.
-But, if the class is aggressive, and keep getting over the class's limit
-often(more than 10 shrink events in 10 seconds), then the memory resource
-controller gives up on the class and doesn't try to shrink the class, which
-will eventually lead the class to reach its 110% of its limit and then the
-page allocations will start failing.
-
-2.4 Chages in the page reclaimation path (refill_inactive_zone and shrink_zone)
--------------------------------------------------------------------------------
-Pages will be moved from active to inactive list(refill_inactive_zone) and
-pages from inactive list will be freed in the following order:
-(range is calculated by subtracting 'guarantee' from 'limit')
- - Classes that are over 110% of their range
- - Classes that are over 100% of their range
- - Classes that are over 75% of their range
- - Classes that are over 50% of their range
- - Classes that are over 25% of their range
- - Classes whose parent is over 110% of its range
- - Classes that are over their guarantee
-
-2.5 Handling of Shared pages
-----------------------------
-Even if a mm is shared by tasks, the pages that belong to the mm will be
-charged against the individual tasks that bring the page into LRU.
-
-But, when any task that is using a mm moves to a different class or exits,
-then all pages that belong to the mm will be charged against the richest
-class among the tasks that are using the mm.
-
-Note: Shared page handling need to be improved with a better policy.
-
+++ /dev/null
-Installation
-------------
-
-1. Configure "Class based physical memory controller" under CKRM (see
- Documentation/ckrm/installation)
-
-2. Reboot the system with the new kernel.
-
-3. Verify that the memory controller is present by reading the file
- /rcfs/taskclass/config (should show a line with res=mem)
-
-Usage
------
-
-For brevity, unless otherwise specified all the following commands are
-executed in the default class (/rcfs/taskclass).
-
-Initially, the systemwide default class gets 100% of the LRU pages, and the
-config file displays the total number of physical pages.
-
- # cd /rcfs/taskclass
- # cat config
- res=mem;tot_pages=239778,active=60473,inactive=135285,free=44555
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=100,max_limit=100
-
- tot_pages - total number of pages
- active - number of pages in the active list ( sum of all zones)
- inactive - number of pages in the inactive list ( sum of all zones )
- free - number of free pages (sum of all pages)
-
- By making total_guarantee and max_limit to be same as tot_pages, one make
- make the numbers in shares file be same as the number of pages for a
- class.
-
- # echo 'res=mem,total_guarantee=239778,max_limit=239778' > shares
- # cat shares
- res=mem,guarantee=-2,limit=-2,total_guarantee=239778,max_limit=239778
-
-
-Class creation
---------------
-
- # mkdir c1
-
-Its initial share is don't care. The parent's share values will be unchanged.
-
-Setting a new class share
--------------------------
-
- # echo 'res=mem,guarantee=25000,limit=50000' > c1/shares
-
- # cat c1/shares
- res=mem,guarantee=25000,limit=50000,total_guarantee=100,max_limit=100
-
- 'guarantee' specifies the number of pages this class entitled to get
- 'limit' is the maximum number of pages this class can get.
-
-Monitoring
-----------
-
-stats file shows statistics of the page usage of a class
- # cat stats
- ----------- Memory Resource stats start -----------
- Number of pages used(including pages lent to children): 196654
- Number of pages guaranteed: 239778
- Maximum limit of pages: 239778
- Total number of pages available(after serving guarantees to children): 214778
- Number of pages lent to children: 0
- Number of pages borrowed from the parent: 0
- ----------- Memory Resource stats end -----------
-
+++ /dev/null
-Rule-based Classification Engine (RBCE)
--------------------------------------------
-
-The ckrm/rbce directory contains the sources for two classification engines
-called rbce and crbce. Both are optional, built as kernel modules and share much
-of their codebase. Only one classification engine (CE) can be loaded at a time
-in CKRM.
-
-
-With RBCE, user can specify rules for how tasks are classified to a
-class. Rules are specified by one or more attribute-value pairs and
-an associated class. The tasks that match all the attr-value pairs
-will get classified to the class attached with the rule.
-
-The file rbce_info under /rcfs/ce directory details the functionality
-of different files available under the directory and also details
-about attributes that can are used to define rules.
-
-order: When multiple rules are defined the rules are executed
- according to the order of a rule. Order can be specified
- while defining a rule. If order is not specified, the
- highest order will be assigned to the rule(i.e, the new
- rule will be executed after all the previously defined
- evaluate false). So, order of rules is important as that
- will decide, which class a task will get assigned to. For
- example, if we have the two following rules: r1:
- uid=1004,order=10,class=/rcfs/taskclass/c1 r2:
- uid=1004,cmd=grep,order=20,class=/rcfs/taskclass/c2 then,
- the task "grep" executed by user 1004 will always be
- assigned to class /rcfs/taskclass/c1, as rule r1 will be
- executed before r2 and the task successfully matched the
- rule's attr-value pairs. Rule r2 will never be consulted
- for the command. Note: The order in which the rules are
- displayed(by ls) has no correlation with the order of the
- rule.
-
-dependency: Rules can be defined to be depend on another rule. i.e a
- rule can be dependent on one rule and has its own
- additional attr-value pairs. the dependent rule will
- evaluate true only if all the attr-value pairs of both
- rules are satisfied. ex: r1: gid=502,class=/rcfs/taskclass
- r2: depend=r1,cmd=grep,class=rcfstaskclass/c1 r2 is a
- dependent rule that depends on r1, a task will be assigned
- to /rcfs/taskclass/c1 if its gid is 502 and the executable
- command name is "grep". If a task's gid is 502 but the
- command name is _not_ "grep" then it will be assigned to
- /rcfs/taskclass
-
- Note: The order of dependent rule must be _lesser_ than the
- rule it depends on, so that it is evaluated _before the
- base rule is evaluated. Otherwise the base rule will
- evaluate true and the task will be assigned to the class of
- that rule without the dependent rule ever getting
- evaluated. In the example above, order of r2 must be lesser
- than order of r1.
-
-app_tag: a task can be attached with a tag(ascii string), that becomes
- an attribute of that task and rules can be defined with the
- tag value.
-
-state: states are at two levels in RBCE. The entire RBCE can be
- enabled or disabled which writing 1 or 0 to the file
- rbce_state under /rcfs/ce. Disabling RBCE, would mean that
- the rules defined in RBCE will not be utilized for
- classifying a task to a class. A specific rule can be
- enabled/disabled by changing the state of that rule. Once
- it is disabled, the rule will not be evaluated.
+++ /dev/null
-Usage of CKRM with RBCE
---------------------------
-
-0. Ensure that a CKRM-enabled kernel with following options configured
- has been compiled. At a minimum, core, rcfs and atleast one
- classtype. For testing, it is recommended all classtypes and
- resource controllers be compiled as modules.
-
-1. Change ckrm/rbce/Makefile's KDIR to point to this compiled kernel's source
- tree and call make
-
-2. Load rbce module.
- # insmod ckrm/rbce/rbce.ko
- Note that /rcfs has to be mounted before this.
- Note: this command should populate the directory /rcfs/ce with files
- rbce_reclassify, rbce_tag, rbce_info, rbce_state and a directory
- rules.
-
- Note2: If these are not created automatically, just create them by
- using the commands touch and mkdir.(bug that needs to be fixed)
-
-3. Defining a rule
- Rules are defined by creating(by writing) to a file under the
- /rcfs/ce/rules directory by concatinating multiple attribute value
- pairs.
-
- Note that the classes must be defined before defining rules that
- uses the classes. eg: the command # echo
- "uid=1004,class=/rcfs/taskclass/c1" > /rcfs/ce/rules/r1 will define
- a rule r1 that classifies all tasks belong to user id 1004 to class
- /rcfs/taskclass/c1
-
-4. Viewing a rule
- read the corresponding file.
- to read rule r1, issue the command:
- # cat /rcfs/ce/rules/r1
-
-5. Changing a rule
-
- Changing a rule is done the same way as defining a rule, the new
- rule will include the old set of attr-value pairs slapped with new
- attr-value pairs. eg: if the current r2 is
- uid=1004,depend=r1,class=/rcfs/taskclass/c1
- (r1 as defined in step 3)
-
- the command:
- # echo gid=502 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,gid=502,depend=r1,class=/rcfs/taskclass/c1
-
- the command:
- # echo uid=1005 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1005,class=/rcfs/taskclass/c1
-
- the command:
- # echo class=/rcfs/taskclass/c2 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,depend=r1,class=/rcfs/taskclass/c2
-
- the command:
- # echo depend=r4 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,depend=r4,class=/rcfs/taskclass/c2
-
- the command:
- # echo +depend=r4 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,depend=r1,depend=r4,class=/rcfs/taskclass/c2
-
- the command:
- # echo -depend=r1 > /rcfs/ce/rules/r1
- will change the rule to
- r1: uid=1004,class=/rcfs/taskclass/c2
-
-6. Checking the state of RBCE
- State(enabled/disabled) of RBCE can be checked by reading the file
- /rcfs/ce/rbce_state, it will show 1(enabled) or 0(disabled).
- By default, RBCE is enabled(1).
- ex: # cat /rcfs/ce/rbce_state
-
-7. Changing the state of RBCE
- State of RBCE can be changed by writing 1(enable) or 0(disable).
- ex: # echo 1 > cat /rcfs/ce/rbce_state
-
-8. Checking the state of a rule
- State of a rule is displayed in the rule. Rule can be viewed by
- reading the rule file. ex: # cat /rcfs/ce/rules/r1
-
-9. Changing the state of a rule
-
- State of a rule can be changed by writing "state=1"(enable) or
- "state=0"(disable) to the corresponding rule file. By defeault, the
- rule is enabled when defined. ex: to disable an existing rule r1,
- issue the command
- # echo "state=0" > /rcfs/ce/rules/r1
-
-
To create the ip2mkdev shell script change to a convenient directory (/tmp
works just fine) and run the following command:
- unshar Documentation/computone.txt
+ unshar /usr/src/linux/Documentation/computone.txt
(This file)
You should now have a file ip2mkdev in your current working directory with
Herbert Valerio Riedel
Kyle McMartin
Adam J. Richter
- Fruhwirth Clemens (i586)
- Linus Torvalds (i586)
CAST5 algorithm contributors:
Kartikey Mahendra Bhatt (original developers unknown, FSF copyright).
-TEA/XTEA algorithm contributors:
- Aaron Grothe
-
-Khazad algorithm contributors:
- Aaron Grothe
-
Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
Please send any credits updates or corrections to:
32 = /dev/ttyDB0 DataBooster serial port 0
...
39 = /dev/ttyDB7 DataBooster serial port 7
- 40 = /dev/ttySG0 SGI Altix console port
205 char Low-density serial ports (alternate device)
0 = /dev/culu0 Callout device for ttyLU0
32 = /dev/cudb0 Callout device for ttyDB0
...
39 = /dev/cudb7 Callout device for ttyDB7
- 40 = /dev/cusg0 Callout device for ttySG0
206 char OnStream SC-x0 tape devices
0 = /dev/osst0 First OnStream SCSI tape, mode 0
Supporting Tools:
-----------------
Supporting tools include digiDload, digiConfig, buildPCI, and ditty. See
-drivers/char/README.epca for more details. Note,
+/usr/src/linux/Documentation/README.epca.dir/user.doc for more details. Note,
this driver REQUIRES that digiDload be executed prior to it being used.
Failure to do this will result in an ENODEV error.
binary-only firmware.
The DVB drivers will be converted to use the request_firmware()
-hotplug interface (see Documentation/firmware_class/).
+hotplug interface (see linux/Documentation/firmware_class/).
(CONFIG_FW_LOADER)
The firmware can be loaded automatically via the hotplug manager
Hotplug Firmware Loading for 2.6 kernels
----------------------------------------
For 2.6 kernels the firmware is loaded at the point that the driver module is
-loaded. See Documentation/dvb/firmware.txt for more information.
+loaded. See linux/Documentation/dvb/firmware.txt for more information.
mv STB_PC_T.bin /usr/lib/hotplug/firmware/dvb-ttusb-dec-2000t.fw
mv STB_PC_X.bin /usr/lib/hotplug/firmware/dvb-ttusb-dec-2540t.fw
loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
- int (*check_flags)(int);
- int (*dir_notify)(struct file *, unsigned long);
};
locking rules:
sendfile: no
sendpage: no
get_unmapped_area: no
-check_flags: no
-dir_notify: no
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
and thrash the system to death, so large and/or important servers will want to
set this value to 0.
-nr_hugepages and hugetlb_shm_group
-----------------------------------
-
-nr_hugepages configures number of hugetlb page reserved for the system.
-
-hugetlb_shm_group contains group id that is allowed to create SysV shared
-memory segment using hugetlb page.
-
-laptop_mode
------------
-
-laptop_mode is a knob that controls "laptop mode". All the things that are
-controlled by this knob are discussed in Documentation/laptop-mode.txt.
-
-block_dump
-----------
-
-block_dump enables block I/O debugging when set to a nonzero value. More
-information on block I/O debugging is in Documentation/laptop-mode.txt.
-
2.5 /proc/sys/dev - Device specific parameters
----------------------------------------------
command to write value into these files, thereby changing the default settings
of the kernel.
------------------------------------------------------------------------------
+
+
+
+
+
+
+
*
-* Documentation/filesystems/udf.txt
+* ./Documentation/filesystems/udf.txt
*
UDF Filesystem version 0.9.8.1
though not all of them are actually meaningful to the kernel. Boot
loader authors who need additional command line options for the boot
loader itself should get them registered in
-Documentation/kernel-parameters.txt to make sure they will not
+linux/Documentation/kernel-parameters.txt to make sure they will not
conflict with actual kernel options now or in the future.
vga=<mode>
module outside the kernel is to use the kernel build system,
kbuild. Use the following command-line:
-make -C path/to/kernel/src M=$PWD modules
+make -C path/to/kernel/src SUBDIRS=$PWD modules
This requires that a makefile exits made in accordance to
Documentation/kbuild/makefiles.txt. Read that file for more details on
# Invokes the kernel build system to come back to the current
# directory and build yourmodule.ko.
default:
- make -C ${KERNEL_SOURCE} M=`pwd` modules
+ make -C ${KERNEL_SOURCE} SUBDIRS=`pwd` modules
Document Author: Bart Samwel (bart@samwel.tk)
Date created: January 2, 2004
-Last modified: July 10, 2004
+Last modified: April 3, 2004
Introduction
------------
-Laptop mode is used to minimize the time that the hard disk needs to be spun up,
+Laptopmode is used to minimize the time that the hard disk needs to be spun up,
to conserve battery power on laptops. It has been reported to cause significant
power savings.
--------
* Introduction
-* Installation
+* The short story
* Caveats
-* The Details
+* The details
* Tips & Tricks
* Control script
* ACPI integration
* Monitoring tool
-Installation
-------------
+The short story
+---------------
To use laptop mode, you don't need to set any kernel configuration options
-or anything. Simply install all the files included in this document, and
-laptop mode will automatically be started when you're on battery. For
-your convenience, a tarball containing an installer can be downloaded at:
+or anything. You simply need to run the laptop_mode control script (which
+is included in this document) as follows:
+
+# laptop_mode start
-http://www.xs4all.nl/~bsamwel/laptop_mode/tools
+Then set your harddisk spindown time to a relatively low value with hdparm:
-To configure laptop mode, you need to edit the configuration file, which is
-located in /etc/default/laptop-mode on Debian-based systems, or in
-/etc/sysconfig/laptop-mode on other systems.
+hdparm -S 4 /dev/hda
-Unfortunately, automatic enabling of laptop mode does not work for
-laptops that don't have ACPI. On those laptops, you need to start laptop
-mode manually. To start laptop mode, run "laptop_mode start", and to
-stop it, run "laptop_mode stop". (Note: The laptop mode tools package now
-has experimental support for APM, you might want to try that first.)
+The value -S 4 means 20 seconds idle time before spindown. Your harddisk will
+now only spin up when a disk cache miss occurs, or at least once every 10
+minutes to write back any pending changes.
+
+To stop laptop_mode, run "laptop_mode stop".
Caveats
-------
-* The downside of laptop mode is that you have a chance of losing up to 10
- minutes of work. If you cannot afford this, don't use it! The supplied ACPI
- scripts automatically turn off laptop mode when the battery almost runs out,
- so that you won't lose any data at the end of your battery life.
+* The downside of laptop mode is that you have a chance of losing up
+ to 10 minutes of work. If you cannot afford this, don't use it! It's
+ wise to turn OFF laptop mode when you're almost out of battery --
+ although this will make the battery run out faster, at least you'll
+ lose less work when it actually runs out. I'm still looking for someone
+ to submit instructions on how to turn off laptop mode when battery is low,
+ e.g., using ACPI events. I don't have a laptop myself, so if you do and
+ you care to contribute such instructions, please do.
* Most desktop hard drives have a very limited lifetime measured in spindown
cycles, typically about 50.000 times (it's usually listed on the spec sheet).
* If you have your filesystems listed as type "auto" in fstab, like I did, then
the control script will not recognize them as filesystems that need remounting.
- You must list the filesystems with their true type instead.
* It has been reported that some versions of the mutt mail client use file access
times to determine whether a folder contains new mail. If you use mutt and
- experience this, you must disable the noatime remounting by setting the option
- DO_REMOUNT_NOATIME to 0 in the configuration file.
+ experience this, you must disable the noatime remounting in the control script
+ by setting DO_REMOUNT_NOATIME=0.
-The Details
+The details
-----------
-Laptop mode is controlled by the knob /proc/sys/vm/laptop_mode. This knob is
+Laptop-mode is controlled by the flag /proc/sys/vm/laptop_mode. This flag is
present for all kernels that have the laptop mode patch, regardless of any
-configuration options. When the knob is set, any physical disk I/O (that might
-have caused the hard disk to spin up) causes Linux to flush all dirty blocks. The
-result of this is that after a disk has spun down, it will not be spun up
-anymore to write dirty blocks, because those blocks had already been written
-immediately after the most recent read operation. The value of the laptop_mode
-knob determines the time between the occurrence of disk I/O and when the flush
-is triggered. A sensible value for the knob is 5 seconds. Setting the knob to
-0 disables laptop mode.
+configuration options. When the flag is set, any physical disk read operation
+(that might have caused the hard disk to spin up) causes Linux to flush all dirty
+blocks. The result of this is that after a disk has spun down, it will not be spun
+up anymore to write dirty blocks, because those blocks had already been written
+immediately after the most recent read operation.
To increase the effectiveness of the laptop_mode strategy, the laptop_mode
control script increases dirty_expire_centisecs and dirty_writeback_centisecs in
all block dirtyings done to files. This makes it possible to debug why a disk
needs to spin up, and to increase battery life even more. The output of
block_dump is written to the kernel output, and it can be retrieved using
-"dmesg". When you use block_dump and your kernel logging level also includes
-kernel debugging messages, you probably want to turn off klogd, otherwise
+"dmesg". When you use block_dump, you may want to turn off klogd, otherwise
the output of block_dump will be logged, causing disk activity that is not
normally there.
+If 10 minutes is too much or too little downtime for you, you can configure
+this downtime as follows. In the control script, set the MAX_AGE value to the
+maximum number of seconds of disk downtime that you would like. You should
+then set your filesystem's commit interval to the same value. The dirty ratio
+is also configurable from the control script.
-Configuration
--------------
-
-The laptop mode configuration file is located in /etc/default/laptop-mode on
-Debian-based systems, or in /etc/sysconfig/laptop-mode on other systems. It
-contains the following options:
-
-MAX_AGE:
-
-Maximum time, in seconds, of hard drive spindown time that you are
-confortable with. Worst case, it's possible that you could lose this
-amount of work if your battery fails while you're in laptop mode.
-
-MINIMUM_BATTERY_MINUTES:
-
-Automatically disable laptop mode if the remaining number of minutes of
-battery power is less than this value. Default is 10 minutes.
-
-AC_HD/BATT_HD:
-
-The idle timeout that should be set on your hard drive when laptop mode
-is active (BATT_HD) and when it is not active (AC_HD). The defaults are
-20 seconds (value 4) for BATT_HD and 2 hours (value 244) for AC_HD. The
-possible values are those listed in the manual page for "hdparm" for the
-"-S" option.
-
-HD:
-
-The devices for which the spindown timeout should be adjusted by laptop mode.
-Default is /dev/hda. If you specify multiple devices, separate them by a space.
-
-READAHEAD:
-
-Disk readahead, in 512-byte sectors, while laptop mode is active. A large
-readahead can prevent disk accesses for things like executable pages (which are
-loaded on demand while the application executes) and sequentially accessed data
-(MP3s).
-
-DO_REMOUNTS:
-
-The control script automatically remounts any mounted journaled filesystems
-with approriate commit interval options. When this option is set to 0, this
-feature is disabled.
-
-DO_REMOUNT_NOATIME:
-
-When remounting, should the filesystems be remounted with the noatime option?
-Normally, this is set to "1" (enabled), but there may be programs that require
-access time recording.
-
-DIRTY_RATIO:
+If you don't like the idea of the control script remounting your filesystems
+for you, you can change DO_REMOUNTS to 0 in the script.
-The percentage of memory that is allowed to contain "dirty" or unsaved data
-before a writeback is forced, while laptop mode is active. Corresponds to
-the /proc/sys/vm/dirty_ratio sysctl.
-
-DIRTY_BACKGROUND_RATIO:
-
-The percentage of memory that is allowed to contain "dirty" or unsaved data
-after a forced writeback is done due to an exceeding of DIRTY_RATIO. Set
-this nice and low. This corresponds to the /proc/sys/vm/dirty_background_ratio
-sysctl.
-
-Note that the behaviour of dirty_background_ratio is quite different
-when laptop mode is active and when it isn't. When laptop mode is inactive,
-dirty_background_ratio is the threshold percentage at which background writeouts
-start taking place. When laptop mode is active, however, background writeouts
-are disabled, and the dirty_background_ratio only determines how much writeback
-is done when dirty_ratio is reached.
-
-DO_CPU:
-
-Enable CPU frequency scaling when in laptop mode. (Requires CPUFreq to be setup.
-See Documentation/cpu-freq/user-guide.txt for more info. Disabled by default.)
-
-CPU_MAXFREQ:
-
-When on battery, what is the maximum CPU speed that the system should use? Legal
-values are "slowest" for the slowest speed that your CPU is able to operate at,
-or a value listed in /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies.
+Thanks to Kiko Piris, the control script can be used to enable laptop mode on
+both the Linux 2.4 and 2.6 series.
Tips & Tricks
-------------
* Bartek Kania reports getting up to 50 minutes of extra battery life (on top
- of his regular 3 to 3.5 hours) using a spindown time of 5 seconds (BATT_HD=1).
+ of his regular 3 to 3.5 hours) using very aggressive power management (hdparm
+ -B1) and a spindown time of 5 seconds (hdparm -S1).
-* You can spin down the disk while playing MP3, by setting disk readahead
- to 8MB (READAHEAD=16384). Effectively, the disk will read a complete MP3 at
+* You can spin down the disk while playing MP3, by setting the disk readahead
+ to 8MB (hdparm -a 16384). Effectively, the disk will read a complete MP3 at
once, and will then spin down while the MP3 is playing. (Thanks to Bartek
Kania.)
this on powerbooks too. I hope that this is a piece of information that
might be useful to the Laptop Mode patch or it's users."
+* One thing which will cause disks to spin up is not-present application
+ and dynamic library text pages. The kernel will load program text off disk
+ on-demand, so each time you invoke an application feature for the first
+ time, the kernel needs to spin the disk up to go and fetch that part of the
+ application.
+
+ So it is useful to increase the disk readahead parameter greatly, so that
+ the kernel will pull all of the executable's pages into memory on the first
+ pagefault.
+
+ The supplied script does this.
+
* In syslog.conf, you can prefix entries with a dash ``-'' to omit syncing the
file after every logging. When you're using laptop-mode and your disk doesn't
spin down, this is a likely culprit.
(http://noflushd.sourceforge.net/), it seems that noflushd prevents laptop-mode
from doing its thing.
-* If you're worried about your data, you might want to consider using a USB
- memory stick or something like that as a "working area". (Be aware though
- that flash memory can only handle a limited number of writes, and overuse
- may wear out your memory stick pretty quickly. Do _not_ use journalling
- filesystems on flash memory sticks.)
-
-
-Configuration file for control and ACPI battery scripts
--------------------------------------------------------
-
-This allows the tunables to be changed for the scripts via an external
-configuration file
-
-It should be installed as /etc/default/laptop-mode on Debian, and as
-/etc/sysconfig/laptop-mode on Red Hat, SUSE, Mandrake, and other work-alikes.
-
---------------------CONFIG FILE BEGIN-------------------------------------------
-# Maximum time, in seconds, of hard drive spindown time that you are
-# confortable with. Worst case, it's possible that you could lose this
-# amount of work if your battery fails you while in laptop mode.
-#MAX_AGE=600
-
-# Automatically disable laptop mode when the number of minutes of battery
-# that you have left goes below this threshold.
-MINIMUM_BATTERY_MINUTES=10
-
-# Read-ahead, in 512-byte sectors. You can spin down the disk while playing MP3/OGG
-# by setting the disk readahead to 8MB (READAHEAD=16384). Effectively, the disk
-# will read a complete MP3 at once, and will then spin down while the MP3/OGG is
-# playing.
-#READAHEAD=4096
-
-# Shall we remount journaled fs. with appropiate commit interval? (1=yes)
-#DO_REMOUNTS=1
-
-# And shall we add the "noatime" option to that as well? (1=yes)
-#DO_REMOUNT_NOATIME=1
-
-# Dirty synchronous ratio. At this percentage of dirty pages the process
-# which
-# calls write() does its own writeback
-#DIRTY_RATIO=40
-
-#
-# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio. Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
-#
-#DIRTY_BACKGROUND_RATIO=5
-
-# kernel default dirty buffer age
-#DEF_AGE=30
-#DEF_UPDATE=5
-#DEF_DIRTY_BACKGROUND_RATIO=10
-#DEF_DIRTY_RATIO=40
-#DEF_XFS_AGE_BUFFER=15
-#DEF_XFS_SYNC_INTERVAL=30
-#DEF_XFS_BUFD_INTERVAL=1
-
-# This must be adjusted manually to the value of HZ in the running kernel
-# on 2.4, until the XFS people change their 2.4 external interfaces to work in
-# centisecs. This can be automated, but it's a work in progress that still
-# needs# some fixes. On 2.6 kernels, XFS uses USER_HZ instead of HZ for
-# external interfaces, and that is currently always set to 100. So you don't
-# need to change this on 2.6.
-#XFS_HZ=100
-
-# Should the maximum CPU frequency be adjusted down while on battery?
-# Requires CPUFreq to be setup.
-# See Documentation/cpu-freq/user-guide.txt for more info
-#DO_CPU=0
-
-# When on battery what is the maximum CPU speed that the system should
-# use? Legal values are "slowest" for the slowest speed that your
-# CPU is able to operate at, or a value listed in:
-# /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
-# Only applicable if DO_CPU=1.
-#CPU_MAXFREQ=slowest
-
-# Idle timeout for your hard drive (man hdparm for valid values, -S option)
-# Default is 2 hours on AC (AC_HD=244) and 20 seconds for battery (BATT_HD=4).
-#AC_HD=244
-#BATT_HD=4
-
-# The drives for which to adjust the idle timeout. Separate them by a space,
-# e.g. HD="/dev/hda /dev/hdb".
-#HD="/dev/hda"
-
-# Set the spindown timeout on a hard drive?
-#DO_HD=1
-
---------------------CONFIG FILE END---------------------------------------------
-
Control script
--------------
-Please note that this control script works for the Linux 2.4 and 2.6 series (thanks
-to Kiko Piris).
+Please note that this control script works for the Linux 2.4 and 2.6 series.
---------------------CONTROL SCRIPT BEGIN----------------------------------------
+--------------------CONTROL SCRIPT BEGIN------------------------------------------
#!/bin/bash
# start or stop laptop_mode, best run by a power management daemon when
#############################################################################
-# Source config
-if [ -f /etc/default/laptop-mode ] ; then
- # Debian
- . /etc/default/laptop-mode
-elif [ -f /etc/sysconfig/laptop-mode ] ; then
- # Others
- . /etc/sysconfig/laptop-mode
-fi
-
-# Don't raise an error if the config file is incomplete
-# set defaults instead:
-
-# Maximum time, in seconds, of hard drive spindown time that you are
-# confortable with. Worst case, it's possible that you could lose this
-# amount of work if your battery fails you while in laptop mode.
-MAX_AGE=${MAX_AGE:-'600'}
+# Age time, in seconds. should be put into a sysconfig file
+MAX_AGE=600
# Read-ahead, in kilobytes
-READAHEAD=${READAHEAD:-'4096'}
+READAHEAD=4096
# Shall we remount journaled fs. with appropiate commit interval? (1=yes)
-DO_REMOUNTS=${DO_REMOUNTS:-'1'}
+DO_REMOUNTS=1
# And shall we add the "noatime" option to that as well? (1=yes)
-DO_REMOUNT_NOATIME=${DO_REMOUNT_NOATIME:-'1'}
-
-# Shall we adjust the idle timeout on a hard drive?
-DO_HD=${DO_HD:-'1'}
-
-# Adjust idle timeout on which hard drive?
-HD="${HD:-'/dev/hda'}"
-
-# spindown time for HD (hdparm -S values)
-AC_HD=${AC_HD:-'244'}
-BATT_HD=${BATT_HD:-'4'}
+DO_REMOUNT_NOATIME=1
# Dirty synchronous ratio. At this percentage of dirty pages the process which
# calls write() does its own writeback
-DIRTY_RATIO=${DIRTY_RATIO:-'40'}
-
-# cpu frequency scaling
-# See Documentation/cpu-freq/user-guide.txt for more info
-DO_CPU=${CPU_MANAGE:-'0'}
-CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
+DIRTY_RATIO=40
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
# of dirty memory to dirty_background_ratio. Set this nice and low, so once
# some writeout has commenced, we do a lot of it.
#
-DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
+DIRTY_BACKGROUND_RATIO=5
# kernel default dirty buffer age
-DEF_AGE=${DEF_AGE:-'30'}
-DEF_UPDATE=${DEF_UPDATE:-'5'}
-DEF_DIRTY_BACKGROUND_RATIO=${DEF_DIRTY_BACKGROUND_RATIO:-'10'}
-DEF_DIRTY_RATIO=${DEF_DIRTY_RATIO:-'40'}
-DEF_XFS_AGE_BUFFER=${DEF_XFS_AGE_BUFFER:-'15'}
-DEF_XFS_SYNC_INTERVAL=${DEF_XFS_SYNC_INTERVAL:-'30'}
-DEF_XFS_BUFD_INTERVAL=${DEF_XFS_BUFD_INTERVAL:-'1'}
+DEF_AGE=30
+DEF_UPDATE=5
+DEF_DIRTY_BACKGROUND_RATIO=10
+DEF_DIRTY_RATIO=40
+DEF_XFS_AGE_BUFFER=15
+DEF_XFS_SYNC_INTERVAL=30
+DEF_XFS_BUFD_INTERVAL=1
# This must be adjusted manually to the value of HZ in the running kernel
# on 2.4, until the XFS people change their 2.4 external interfaces to work in
# some fixes. On 2.6 kernels, XFS uses USER_HZ instead of HZ for external
# interfaces, and that is currently always set to 100. So you don't need to
# change this on 2.6.
-XFS_HZ=${XFS_HZ:-'100'}
+XFS_HZ=100
#############################################################################
fi
}
-deduce_fstype () {
- MP="$1"
- # My root filesystem unfortunately has
- # type "unknown" in /etc/mtab. If we encounter
- # "unknown", we try to get the type from fstab.
- cat /etc/fstab |
- grep -v '^#' |
- while read FSTAB_DEV FSTAB_MP FSTAB_FST FSTAB_OPTS FSTAB_DUMP FSTAB_DUMP ; do
- if [ "$FSTAB_MP" = "$MP" ]; then
- echo $FSTAB_FST
- exit 0
- fi
- done
-}
if [ $DO_REMOUNT_NOATIME -eq 1 ] ; then
NOATIME_OPT=",noatime"
if [ $DO_REMOUNTS -eq 1 ]; then
cat /etc/mtab | while read DEV MP FST OPTS DUMP PASS ; do
PARSEDOPTS="$(parse_mount_opts "$OPTS")"
- if [ "$FST" = 'unknown' ]; then
- FST=$(deduce_fstype $MP)
- fi
case "$FST" in
"ext3"|"reiserfs")
PARSEDOPTS="$(parse_mount_opts commit "$OPTS")"
fi
done
fi
- if [ $DO_HD -eq 1 ] ; then
- for THISHD in $HD ; do
- /sbin/hdparm -S $BATT_HD $THISHD > /dev/null 2>&1
- /sbin/hdparm -B 1 $THISHD > /dev/null 2>&1
- done
- fi
- if [ $DO_CPU -eq 1 -a -e /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq ]; then
- if [ $CPU_MAXFREQ = 'slowest' ]; then
- CPU_MAXFREQ=`cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq`
- fi
- echo $CPU_MAXFREQ > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
- fi
echo "."
;;
stop)
if [ $DO_REMOUNTS -eq 1 ] ; then
cat /etc/mtab | while read DEV MP FST OPTS DUMP PASS ; do
# Reset commit and atime options to defaults.
- if [ "$FST" = 'unknown' ]; then
- FST=$(deduce_fstype $MP)
- fi
case "$FST" in
"ext3"|"reiserfs")
PARSEDOPTS="$(parse_mount_opts_wfstab $DEV commit $OPTS)"
fi
done
fi
- if [ $DO_HD -eq 1 ] ; then
- for THISHD in $HD ; do
- /sbin/hdparm -S $AC_HD $THISHD > /dev/null 2>&1
- /sbin/hdparm -B 255 $THISHD > /dev/null 2>&1
- done
- fi
- if [ $DO_CPU -eq 1 -a -e /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq ]; then
- echo `cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq` > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
- fi
echo "."
;;
*)
esac
exit 0
---------------------CONTROL SCRIPT END------------------------------------------
+--------------------CONTROL SCRIPT END--------------------------------------------
ACPI integration
----------------
Dax Kelson submitted this so that the ACPI acpid daemon will
-kick off the laptop_mode script and run hdparm. The part that
-automatically disables laptop mode when the battery is low was
-writen by Jan Topinski.
+kick off the laptop_mode script and run hdparm.
------------------/etc/acpi/events/ac_adapter BEGIN------------------------------
+---------------------------/etc/acpi/events/ac_adapter BEGIN-------------------------------------------
event=ac_adapter
-action=/etc/acpi/actions/ac.sh %e
-----------------/etc/acpi/events/ac_adapter END---------------------------------
-
+action=/etc/acpi/actions/battery.sh
+---------------------------/etc/acpi/events/ac_adapter END-------------------------------------------
------------------/etc/acpi/events/battery BEGIN---------------------------------
-event=battery.*
-action=/etc/acpi/actions/battery.sh %e
-----------------/etc/acpi/events/battery END------------------------------------
+---------------------------/etc/acpi/actions/battery.sh BEGIN-------------------------------------------
+#!/bin/sh
+# cpu throttling
+# cat /proc/acpi/processor/CPU0/throttling for more info
+ACAD_THR=0
+BATT_THR=2
-----------------/etc/acpi/actions/ac.sh BEGIN-----------------------------------
-#!/bin/bash
+# spindown time for HD (man hdparm for valid values)
+# I prefer 2 hours for acad and 20 seconds for batt
+ACAD_HD=244
+BATT_HD=4
-# ac on/offline event handler
+# ac/battery event handler
-status=`awk '/^state: / { print $2 }' /proc/acpi/ac_adapter/$2/state`
+status=`awk '/^state: / { print $2 }' /proc/acpi/ac_adapter/AC/state`
case $status in
"on-line")
+ echo "Setting HD spindown for AC mode."
/sbin/laptop_mode stop
+ /sbin/hdparm -S $ACAD_HD /dev/hda > /dev/null 2>&1
+ /sbin/hdparm -B 255 /dev/hda > /dev/null 2>&1
+ #echo -n $ACAD_CPU:$ACAD_THR > /proc/acpi/processor/CPU0/limit
exit 0
;;
"off-line")
+ echo "Setting HD spindown for battery mode."
/sbin/laptop_mode start
+ /sbin/hdparm -S $BATT_HD /dev/hda > /dev/null 2>&1
+ /sbin/hdparm -B 1 /dev/hda > /dev/null 2>&1
+ #echo -n $BATT_CPU:$BATT_THR > /proc/acpi/processor/CPU0/limit
exit 0
;;
esac
----------------------------/etc/acpi/actions/ac.sh END--------------------------
-
-
----------------------------/etc/acpi/actions/battery.sh BEGIN-------------------
-#! /bin/bash
-
-# Automatically disable laptop mode when the battery almost runs out.
-
-BATT_INFO=/proc/acpi/battery/$2/state
-
-if [[ -f /proc/sys/vm/laptop_mode ]]
-then
- LM=`cat /proc/sys/vm/laptop_mode`
- if [[ $LM -gt 0 ]]
- then
- if [[ -f $BATT_INFO ]]
- then
- # Source the config file only now that we know we need
- if [ -f /etc/default/laptop-mode ] ; then
- # Debian
- . /etc/default/laptop-mode
- elif [ -f /etc/sysconfig/laptop-mode ] ; then
- # Others
- . /etc/sysconfig/laptop-mode
- fi
- MINIMUM_BATTERY_MINUTES=${MINIMUM_BATTERY_MINUTES:-'10'}
-
- ACTION="`cat $BATT_INFO | grep charging | cut -c 26-`"
- if [[ ACTION -eq "discharging" ]]
- then
- PRESENT_RATE=`cat $BATT_INFO | grep "present rate:" | sed "s/.* \([0-9][0-9]* \).*/\1/" `
- REMAINING=`cat $BATT_INFO | grep "remaining capacity:" | sed "s/.* \([0-9][0-9]* \).*/\1/" `
- fi
- if (($REMAINING * 60 / $PRESENT_RATE < $MINIMUM_BATTERY_MINUTES))
- then
- /sbin/laptop_mode stop
- fi
- else
- logger -p daemon.warning "You are using laptop mode and your battery interface $BATT_INFO is missing. This may lead to loss of data when the battery runs out. Check kernel ACPI support and /proc/acpi/battery folder, and edit /etc/acpi/battery.sh to set BATT_INFO to the correct path."
- fi
- fi
-fi
----------------------------/etc/acpi/actions/battery.sh END--------------------
-
+---------------------------/etc/acpi/actions/battery.sh END-------------------------------------------
Monitoring tool
---------------
Bartek Kania submitted this, it can be used to measure how much time your disk
spends spun up/down.
----------------------------dslm.c BEGIN-----------------------------------------
+---------------------------dslm.c BEGIN-------------------------------------------
/*
* Simple Disk Sleep Monitor
* by Bartek Kania
return 0;
}
----------------------------dslm.c END-------------------------------------------
+---------------------------dslm.c END---------------------------------------------
This should not cause problems for anybody, since everybody using a
2.1.x kernel should have updated their C library to a suitable version
-anyway (see the file "Documentation/Changes".)
+anyway (see the file "linux/Documentation/Changes".)
1.2 Allow Mixed Locks Again
---------------------------
The current list of parameters can be found in the files:
linux/net/TUNABLE
- Documentation/networking/ip-sysctl.txt
+ linux/Documentation/networking/ip-sysctl.txt
Some of these are accessible via the sysctl interface, and many more are
scheduled to be added in this way. For example, some parameters related
The SliceCOM board doesn't require firmware. You can have 4 of these cards
in one machine. The driver doesn't (yet) support shared interrupts, so
you will need a separate IRQ line for every board.
-Read Documentation/networking/slicecom.txt for help on configuring
+Read linux/Documentation/networking/slicecom.txt for help on configuring
this adapter.
THE HDLC/PPP LINE PROTOCOL DRIVER
you have to enable it with a boot time parameter. Prior to 2.4.2-ac18
the NMI-oopser is enabled unconditionally on x86 SMP boxes.
-On x86-64 the NMI oopser is on by default. On 64bit Intel CPUs
-it uses IO-APIC by default and on AMD it uses local APIC.
-
[ feel free to send bug reports, suggestions and patches to
Ingo Molnar <mingo@redhat.com> or the Linux SMP mailing
list at <linux-smp@vger.kernel.org> ]
A lot of the assembly code currently runs in real mode, which means
absolute addresses are used instead of virtual addresses as in the
rest of the kernel. To translate an absolute address to a virtual
-address you can lookup in System.map, add __PAGE_OFFSET (0x10000000
+address you can lookup in System.map, add __PAGE_OFFSET (0xc0000000
currently).
code tried to access.
Typical values for the System Responder address are addresses larger
-than __PAGE_OFFSET (0x10000000) which mean a virtual address didn't
+than __PAGE_OFFSET (0xc0000000) which mean a virtual address didn't
get translated to a physical address before real-mode code tried to
access it.
General Registers as specified by ABI
+ FPU Registers must not be used in kernel mode
+
Control Registers
CR 0 (Recovery Counter) used for ptrace
CR 8 (Protection ID) per-process value*
CR 9, 12, 13 (PIDS) unused
CR10 (CCR) lazy FPU saving*
-CR11 as specified by ABI (SAR)
+CR11 as specified by ABI
CR14 (interruption vector) initialized to fault_vector
CR15 (EIEM) initialized to all ones*
CR16 (Interval Timer) read for cycle count/write starts Interval Tmr
CR17-CR22 interruption parameters
-CR19 Interrupt Instruction Register
-CR20 Interrupt Space Register
-CR21 Interrupt Offset Register
-CR22 Interrupt PSW
CR23 (EIRR) read for pending interrupts/write clears bits
CR24 (TR 0) Kernel Space Page Directory Pointer
CR25 (TR 1) User Space Page Directory Pointer
should be sent to the mailing list available through the suspend2
website, and not to the Linux Kernel Mailing List. We are working
toward merging suspend2 into the mainline kernel.
-
-Q: Kernel thread must voluntarily freeze itself (call 'refrigerator'). But
-I found some kernel threads don't do it, and they don't freeze, and
-so the system can't sleep. Is this a known behavior?
-
-A: All such kernel threads need to be fixed, one by one. Select place
-where it is safe to be frozen (no kernel semaphores should be held at
-that point and it must be safe to sleep there), and add:
-
- if (current->flags & PF_FREEZE)
- refrigerator(PF_FREEZE);
-
-Q: What is the difference between between "platform", "shutdown" and
-"firmware" in /sys/power/disk?
-
-A:
-
-shutdown: save state in linux, then tell bios to powerdown
-
-platform: save state in linux, then tell bios to powerdown and blink
- "suspended led"
-
-firmware: tell bios to save state itself [needs BIOS-specific suspend
- partition, and has very little to do with swsusp]
-
-"platform" is actually right thing to do, but "shutdown" is most
-reliable.
+++ /dev/null
-===========================================================================
- HVCS
- IBM "Hypervisor Virtual Console Server" Installation Guide
- for Linux Kernel 2.6.4+
- Copyright (C) 2004 IBM Corporation
-
-===========================================================================
-NOTE:Eight space tabs are the optimum editor setting for reading this file.
-===========================================================================
-
- Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
- Date Created: March, 02, 2004
- Last Changed: July, 07, 2004
-
----------------------------------------------------------------------------
-Table of contents:
-
- 1. Driver Introduction:
- 2. System Requirements
- 3. Build Options:
- 3.1 Built-in:
- 3.2 Module:
- 4. Installation:
- 5. Connection:
- 6. Disconnection:
- 7. Configuration:
- 8. Questions & Answers:
- 9. Reporting Bugs:
-
----------------------------------------------------------------------------
-1. Driver Introduction:
-
-This is the device driver for the IBM Hypervisor Virtual Console Server,
-"hvcs". The IBM hvcs provides a tty driver interface to allow Linux user
-space applications access to the system consoles of logically partitioned
-operating systems (Linux and AIX) running on the same partitioned Power5
-ppc64 system. Physical hardware consoles per partition are not practical
-on this hardware so system consoles are accessed by this driver using
-firmware interfaces to virtual terminal devices.
-
----------------------------------------------------------------------------
-2. System Requirements:
-
-This device driver was written using 2.6.4 Linux kernel APIs and will only
-build and run on kernels of this version or later.
-
-This driver was written to operate solely on IBM Power5 ppc64 hardware
-though some care was taken to abstract the architecture dependent firmware
-calls from the driver code.
-
-Sysfs must be mounted on the system so that the user can determine which
-major and minor numbers are associated with each vty-server. Directions
-for sysfs mounting are outside the scope of this document.
-
----------------------------------------------------------------------------
-3. Build Options:
-
-The hvcs driver registers itself as a tty driver. The tty layer
-dynamically allocates a block of major and minor numbers in a quantity
-requested by the registering driver. The hvcs driver asks the tty layer
-for 64 of these major/minor numbers by default to use for hvcs device node
-entries.
-
-If the default number of device entries is adequate then this driver can be
-built into the kernel. If not, the default can be over-ridden by inserting
-the driver as a module with insmod parameters.
-
----------------------------------------------------------------------------
-3.1 Built-in:
-
-The following menuconfig example demonstrates selecting to build this
-driver into the kernel.
-
- Device Drivers --->
- Character devices --->
- <*> IBM Hypervisor Virtual Console Server Support
-
-Begin the kernel make process.
-
----------------------------------------------------------------------------
-3.2 Module:
-
-The following menuconfig example demonstrates selecting to build this
-driver as a kernel module.
-
- Device Drivers --->
- Character devices --->
- <M> IBM Hypervisor Virtual Console Server Support
-
-The make process will build the following kernel modules:
-
- hvcs.ko
- hvcserver.ko
-
-To insert the module with the default allocation execute the following
-commands in the order they appear:
-
- insmod hvcserver.ko
- insmod hvcs.ko
-
-The hvcserver module contains architecture specific firmware calls and must
-be inserted first, otherwise the hvcs module will not find some of the
-symbols it expects.
-
-To override the default use an insmod parameter as follows (requesting 4
-tty devices as an example):
-
- insmod hvcs.ko hvcs_parm_num_devs=4
-
-There is a maximum number of dev entries that can be specified on insmod.
-We think that 1024 is currently a decent maximum number of server adapters
-to allow. This can always be changed by modifying the constant in the
-source file before building.
-
-NOTE: The length of time it takes to insmod the driver seems to be related
-to the number of tty interfaces the registering driver requests.
-
-In order to remove the driver module execute the following command:
-
- rmmod hvcs.ko
-
-The recommended method for installing hvcs as a module is to use depmod to
-build a current modules.dep file in /lib/modules/`uname -r` and then
-execute:
-
-modprobe hvcs hvcs_parm_num_devs=4
-
-The modules.dep file indicates that hvcserver.ko needs to be inserted
-before hvcs.ko and modprobe uses this file to smartly insert the modules in
-the proper order.
-
-The following modprobe command is used to remove hvcs and hvcserver in the
-proper order:
-
-modprobe -r hvcs
-
----------------------------------------------------------------------------
-4. Installation:
-
-The tty layer creates sysfs entries which contain the major and minor
-numbers allocated for the hvcs driver. The following snippet of "tree"
-output of the sysfs directory shows where these numbers are presented:
-
- sys/
- |-- *other sysfs base dirs*
- |
- |-- class
- | |-- *other classes of devices*
- | |
- | `-- tty
- | |-- *other tty devices*
- | |
- | |-- hvcs0
- | | `-- dev
- | |-- hvcs1
- | | `-- dev
- | |-- hvcs2
- | | `-- dev
- | |-- hvcs3
- | | `-- dev
- | |
- | |-- *other tty devices*
- |
- |-- *other sysfs base dirs*
-
-For the above examples the following output is a result of cat'ing the
-"dev" entry in the hvcs directory:
-
- Pow5:/sys/class/tty/hvcs0/ # cat dev
- 254:0
-
- Pow5:/sys/class/tty/hvcs1/ # cat dev
- 254:1
-
- Pow5:/sys/class/tty/hvcs2/ # cat dev
- 254:2
-
- Pow5:/sys/class/tty/hvcs3/ # cat dev
- 254:3
-
-The output from reading the "dev" attribute is the char device major and
-minor numbers that the tty layer has allocated for this driver's use. Most
-systems running hvcs will already have the device entries created or udev
-will do it automatically.
-
-Given the example output above, to manually create a /dev/hvcs* node entry
-mknod can be used as follows:
-
- mknod /dev/hvcs0 c 254 0
- mknod /dev/hvcs1 c 254 1
- mknod /dev/hvcs2 c 254 2
- mknod /dev/hvcs3 c 254 3
-
-Using mknod to manually create the device entries makes these device nodes
-persistent. Once created they will exist prior to the driver insmod.
-
-Attempting to connect an application to /dev/hvcs* prior to insertion of
-the hvcs module will result in an error message similar to the following:
-
- "/dev/hvcs*: No such device".
-
-NOTE: Just because there is a device node present doesn't mean that there
-is a vty-server device configured for that node.
-
----------------------------------------------------------------------------
-5. Connection
-
-Since this driver controls devices that provide a tty interface a user can
-interact with the device node entries using any standard tty-interactive
-method (e.g. "cat", "dd", "echo"). The intent of this driver however, is
-to provide real time console interaction with a Linux partition's console,
-which requires the use of applications that provide bi-directional,
-interactive I/O with a tty device.
-
-Applications (e.g. "minicom" and "screen") that act as terminal emulators
-or perform terminal type control sequence conversion on the data being
-passed through them are NOT acceptable for providing interactive console
-I/O. These programs often emulate antiquated terminal types (vt100 and
-ANSI) and expect inbound data to take the form of one of these supported
-terminal types but they either do not convert, or do not _adequately_
-convert, outbound data into the terminal type of the terminal which invoked
-them (though screen makes an attempt and can apparently be configured with
-much termcap wrestling.)
-
-For this reason kermit and cu are two of the recommended applications for
-interacting with a Linux console via an hvcs device. These programs simply
-act as a conduit for data transfer to and from the tty device. They do not
-require inbound data to take the form of a particular terminal type, nor do
-they cook outbound data to a particular terminal type.
-
-In order to ensure proper functioning of console applications one must make
-sure that once connected to a /dev/hvcs console that the console's $TERM
-env variable is set to the exact terminal type of the terminal emulator
-used to launch the interactive I/O application. If one is using xterm and
-kermit to connect to /dev/hvcs0 when the console prompt becomes available
-one should "export TERM=xterm" on the console. This tells ncurses
-applications that are invoked from the console that they should output
-control sequences that xterm can understand.
-
-As a precautionary measure an hvcs user should always "exit" from their
-session before disconnecting an application such as kermit from the device
-node. If this is not done, the next user to connect to the console will
-continue using the previous user's logged in session which includes
-using the $TERM variable that the previous user supplied.
-
----------------------------------------------------------------------------
-6. Disconnection
-
-As a security feature to prevent the delivery of stale data to an
-unintended target the Power5 system firmware disables the fetching of data
-and discards that data when a connection between a vty-server and a vty has
-been severed. As an example, when a vty-server is immediately disconnected
-from a vty following output of data to the vty the vty adapter may not have
-enough time between when it received the data interrupt and when the
-connection was severed to fetch the data from firmware before the fetch is
-disabled by firmware.
-
-When hvcs is being used to serve consoles this behavior is not a huge issue
-because the adapter stays connected for large amounts of time following
-almost all data writes. When hvcs is being used as a tty conduit to tunnel
-data between two partitions [see Q & A below] this is a huge problem
-because the standard Linux behavior when cat'ing or dd'ing data to a device
-is to open the tty, send the data, and then close the tty. If this driver
-manually terminated vty-server connections on tty close this would close
-the vty-server and vty connection before the target vty has had a chance to
-fetch the data.
-
-Additionally, disconnecting a vty-server and vty only on module removal or
-adapter removal is impractical because other vty-servers in other
-partitions may require the usage of the target vty at any time.
-
-Due to this behavioral restriction disconnection of vty-servers from the
-connected vty is a manual procedure using a write to a sysfs attribute
-outlined below, on the other hand the initial vty-server connection to a
-vty is established automatically by this driver. Manual vty-server
-connection is never required.
-
-In order to terminate the connection between a vty-server and vty the
-"vterm_state" sysfs attribute within each vty-server's sysfs entry is used.
-Reading this attribute reveals the current connection state of the
-vty-server adapter. A zero means that the vty-server is not connected to a
-vty. A one indicates that a connection is active.
-
-Writing a '0' (zero) to the vterm_state attribute will disconnect the VTERM
-connection between the vty-server and target vty ONLY if the vterm_state
-previously read '1'. The write directive is ignored if the vterm_state
-read '0' or if any value other than '0' was written to the vterm_state
-attribute. The following example will show the method used for verifying
-the vty-server connection status and disconnecting a vty-server connection.
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state
- 1
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo 0 > vterm_state
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state
- 0
-
-All vty-server connections are automatically terminated when the device is
-hotplug removed and when the module is removed.
-
----------------------------------------------------------------------------
-7. Configuration
-
-Each vty-server has a sysfs entry in the /sys/devices/vio directory, which
-is symlinked in several other sysfs tree directories, notably under the
-hvcs driver entry, which looks like the following example:
-
- Pow5:/sys/bus/vio/drivers/hvcs # ls
- . .. 30000003 30000004 rescan
-
-By design, firmware notifies the hvcs driver of vty-server lifetimes and
-partner vty removals but not the addition of partner vtys. Since an HMC
-Super Admin can add partner info dynamically we have provided the hvcs
-driver sysfs directory with the "rescan" update attribute which will query
-firmware and update the partner info for all the vty-servers that this
-driver manages. Writing a '1' to the attribute triggers the update. An
-explicit example follows:
-
- Pow5:/sys/bus/vio/drivers/hvcs # echo 1 > rescan
-
-Reading the attribute will indicate a state of '1' or '0'. A one indicates
-that an update is in process. A zero indicates that an update has
-completed or was never executed.
-
-Vty-server entries in this directory are a 32 bit partition unique unit
-address that is created by firmware. An example vty-server sysfs entry
-looks like the following:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls
- . current_vty devspec partner_clcs vterm_state
- .. detach_state name partner_vtys
-
-Each entry is provided, by default with a "name" attribute. Reading the
-"name" attribute will reveal the device type as shown in the following
-example:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name
- vty-server
-
-Each entry is also provided, by default, with a "devspec" attribute which
-reveals the full device specification when read, as shown in the following
-example:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec
- /vdevice/vty-server@30000004
-
-Each vty-server sysfs dir is provided with two read-only attributes that
-provide lists of easily parsed partner vty data: "partner_vtys" and
-"partner_clcs".
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys
- 30000000
- 30000001
- 30000002
- 30000000
- 30000000
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_clcs
- U5112.428.103048A-V3-C0
- U5112.428.103048A-V3-C2
- U5112.428.103048A-V3-C3
- U5112.428.103048A-V4-C0
- U5112.428.103048A-V5-C0
-
-Reading partner_vtys returns a list of partner vtys. Vty unit address
-numbering is only per-partition-unique so entries will frequently repeat.
-
-Reading partner_clcs returns a list of "converged location codes" which are
-composed of a system serial number followed by "-V*", where the '*' is the
-target partition number, and "-C*", where the '*' is the slot of the
-adapter. The first vty partner corresponds to the first clc item, the
-second vty partner to the second clc item, etc.
-
-A vty-server can only be connected to a single vty at a time. The entry,
-"current_vty" prints the clc of the currently selected partner vty when
-read.
-
-The current_vty can be changed by writing a valid partner clc to the entry
-as in the following example:
-
- Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304
- 8A-V4-C0 > current_vty
-
-Changing the current_vty when a vty-server is already connected to a vty
-does not affect the current connection. The change takes effect when the
-currently open connection is freed.
-
-Information on the "vterm_state" attribute was covered earlier on the
-chapter entitled "disconnection".
-
----------------------------------------------------------------------------
-8. Questions & Answers:
-===========================================================================
-Q: What are the security concerns involving hvcs?
-
-A: There are three main security concerns:
-
- 1. The creator of the /dev/hvcs* nodes has the ability to restrict
- the access of the device entries to certain users or groups. It
- may be best to create a special hvcs group privilege for providing
- access to system consoles.
-
- 2. To provide network security when grabbing the console it is
- suggested that the user connect to the console hosting partition
- using a secure method, such as SSH or sit at a hardware console.
-
- 3. Make sure to exit the user session when done with a console or
- the next vty-server connection (which may be from another
- partition) will experience the previously logged in session.
-
----------------------------------------------------------------------------
-Q: How do I multiplex a console that I grab through hvcs so that other
-people can see it:
-
-A: You can use "screen" to directly connect to the /dev/hvcs* device and
-setup a session on your machine with the console group privileges. As
-pointed out earlier by default screen doesn't provide the termcap settings
-for most terminal emulators to provide adequate character conversion from
-term type "screen" to others. This means that curses based programs may
-not display properly in screen sessions.
-
----------------------------------------------------------------------------
-Q: Why are the colors all messed up?
-Q: Why are the control characters acting strange or not working?
-Q: Why is the console output all strange and unintelligible?
-
-A: Please see the preceding section on "Connection" for a discussion of how
-applications can affect the display of character control sequences.
-Additionally, just because you logged into the console using and xterm
-doesn't mean someone else didn't log into the console with the HMC console
-(vt320) before you and leave the session logged in. The best thing to do
-is to export TERM to the terminal type of your terminal emulator when you
-get the console. Additionally make sure to "exit" the console before you
-disconnect from the console. This will ensure that the next user gets
-their own TERM type set when they login.
-
----------------------------------------------------------------------------
-Q: When I try to CONNECT kermit to an hvcs device I get:
-"Sorry, can't open connection: /dev/hvcs*"What is happening?
-
-A: Some other Power5 console mechanism has a connection to the vty and
-isn't giving it up. You can try to force disconnect the consoles from the
-HMC by right clicking on the partition and then selecting "close terminal".
-Otherwise you have to hunt down the people who have console authority. It
-is possible that you already have the console open using another kermit
-session and just forgot about it. Please review the console options for
-Power5 systems to determine the many ways a system console can be held.
-
-OR
-
-A: Another user may not have a connectivity method currently attached to a
-/dev/hvcs device but the vterm_state may reveal that they still have the
-vty-server connection established. They need to free this using the method
-outlined in the section on "Disconnection" in order for others to connect
-to the target vty.
-
-OR
-
-A: The user profile you are using to execute kermit probably doesn't have
-permissions to use the /dev/hvcs* device.
-
-OR
-
-A: You probably haven't inserted the hvcs.ko module yet but the /dev/hvcs*
-entry still exists (on systems without udev).
-
-OR
-
-A: There is not a corresponding vty-server device that maps to an existing
-/dev/hvcs* entry.
-
----------------------------------------------------------------------------
-Q: When I try to CONNECT kermit to an hvcs device I get:
-"Sorry, write access to UUCP lockfile directory denied."
-
-A: The /dev/hvcs* entry you have specified doesn't exist where you said it
-does? Maybe you haven't inserted the module (on systems with udev).
-
----------------------------------------------------------------------------
-Q: If I already have one Linux partition installed can I use hvcs on said
-partition to provide the console for the install of a second Linux
-partition?
-
-A: Yes granted that your are connected to the /dev/hvcs* device using
-kermit or cu or some other program that doesn't provide terminal emulation.
-
----------------------------------------------------------------------------
-Q: Can I connect to more than one partition's console at a time using this
-driver?
-
-A: Yes. Of course this means that there must be more than one vty-server
-configured for this partition and each must point to a disconnected vty.
-
----------------------------------------------------------------------------
-Q: Does the hvcs driver support dynamic (hotplug) addition of devices?
-
-A: Yes, if you have dlpar and hotplug enabled for your system and it has
-been built into the kernel the hvcs drivers is configured to dynamically
-handle additions of new devices and removals of unused devices.
-
----------------------------------------------------------------------------
-Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty
-device on that partition as the other end of the pipe?
-
-A: Yes, on Power5 platforms the hvc_console driver provides a tty interface
-for extra /dev/hvc* devices (where /dev/hvc0 is most likely the console).
-In order to get a tty conduit working between the two partitions the HMC
-Super Admin must create an additional "serial server" for the target
-partition with the HMC gui which will show up as /dev/hvc* when the target
-partition is rebooted.
-
-The HMC Super Admin then creates an additional "serial client" for the
-current partition and points this at the target partition's newly created
-"serial server" adapter (remember the slot). This shows up as an
-additional /dev/hvcs* device.
-
-Now a program on the target system can be configured to read or write to
-/dev/hvc* and another program on the current partition can be configured to
-read or write to /dev/hvcs*. Now you have a tty conduit between two
-partitions.
-
----------------------------------------------------------------------------
-9. Reporting Bugs:
-
-The proper channel for reporting bugs is either through the Linux OS
-distribution company that provided your OS or by posting issues to the
-ppc64 development mailing list at:
-
-linuxppc64-dev@lists.linuxppc.org
-
-This request is to provide a documented and searchable public exchange
-of the problems and solutions surrounding this driver for the benefit of
-all users.
+++ /dev/null
-Linux 2.6.x on MPC52xx family
------------------------------
-
-For the latest info, go to http://www.246tNt.com/mpc52xx/state.txt
-
-To compile/use :
-
- - U-Boot:
- # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
- if you wish to ).
- # make lite5200_defconfig
- # make uImage
-
- then, on U-boot:
- => tftpboot 200000 uImage
- => tftpboot 400000 pRamdisk
- => bootm 200000 400000
-
- - DBug:
- # <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
- if you wish to ).
- # make lite5200_defconfig
- # cp your_initrd.gz arch/ppc/boot/images/ramdisk.image.gz
- # make zImage.initrd
- # make
-
- then in DBug:
- DBug> dn -i zImage.initrd.lite5200
-
-
-Some remarks :
- - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100
- is not supported, and I'm not sure anyone is interesting in working on it
- so. I didn't took 5xxx because there's apparently a lot of 5xxx that have
- nothing to do with the MPC5200. I also included the 'MPC' for the same
- reason.
- - Of course, I inspired myself from the 2.4 port. If you think I forgot to
- mention you/your company in the copyright of some code, I'll correct it
- ASAP.
- - The codes wants the MBAR to be set at 0xf0000000 by the bootloader. It's
- mapped 1:1 with the MMU. If for whatever reason, you want to change this,
- beware that some code depends on the 0xf0000000 address and other depends
- on the 1:1 mapping.
- - Most of the code assumes that port multiplexing, frequency selection, ...
- has already been done. IMHO this should be done as early as possible, in
- the bootloader. If for whatever reason you can't do it there, do it in the
- platform setup code (if U-Boot) or in the arch/ppc/boot/simple/... (if
- DBug)
Then notify /sbin/init that /etc/inittab has changed, by issuing
the telinit command with the q operand:
- cd Documentation/s390
+ cd /usr/src/linux/Documentation/s390
sh config3270.sh
sh /tmp/mkdev3270
telinit q
Documentation
=============
There is a SCSI documentation directory within the kernel source tree,
-typically Documentation/scsi . Most documents are in plain
+typically /usr/src/linux/Documentation/scsi . Most documents are in plain
(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
found in that directory. A more recent copy of this document may be found
at http://www.torque.net/scsi/scsi_mid_low_api.txt.gz .
<para>
More precise information can be found in
- <filename>Documentation/sound/alsa/ControlNames.txt</filename>.
+ <filename>alsa-kernel/Documentation/sound/alsa/ControlNames.txt</filename>.
</para>
</section>
</section>
The callback is much more complicated than the text-file
version. You need to use a low-level i/o functions such as
<function>copy_from/to_user()</function> to transfer the
- data.
+ data. Also, you have to keep tracking the file position, too.
<informalexample>
<programlisting>
static long my_file_io_read(snd_info_entry_t *entry,
void *file_private_data,
struct file *file,
- char *buf,
- unsigned long count,
- unsigned long pos)
+ char *buf, long count)
{
long size = count;
- if (pos + size > local_max_size)
- size = local_max_size - pos;
- if (copy_to_user(buf, local_data + pos, size))
+ if (file->f_pos + size > local_max_size)
+ size = local_max_size - file->f_pos;
+ if (copy_to_user(buf, local_data + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
return size;
}
]]>
# insmod awe_wave
(Be sure to load awe_wave after sb!)
- See Documentation/sound/oss/AWE32 for
+ See /usr/src/linux/Documentation/sound/oss/AWE32 for
more details.
9. (only for obsolete systems) If you don't have /dev/sequencer
========
0.1.0 11/20/1998 First version, draft
1.0.0 11/1998 Alan Cox changes, incorporation in 2.2.0
- as Documentation/sound/oss/Introduction
+ as /usr/src/linux/Documentation/sound/oss/Introduction
1.1.0 6/30/1999 Second version, added notes on making the drivers,
added info on multiple sound cards of similar types,]
added more diagnostics info, added info about esd.
4) OSS's WWW site at http://www.opensound.com.
-5) All the files in Documentation/sound.
+5) All the files in linux/Documentation/sound.
6) The comments and code in linux/drivers/sound.
This documentation is relevant for the PAS16 driver (pas2_card.c and
friends) under kernel version 2.3.99 and later. If you are
unfamiliar with configuring sound under Linux, please read the
-Sound-HOWTO, Documentation/sound/oss/Introduction and other
+Sound-HOWTO, linux/Documentation/sound/oss/Introduction and other
relevant docs first.
The following information is relevant information from README.OSS
The new stuff for 2.3.99 and later
============================================================================
-The following configuration options from Documentation/Configure.help
+The following configuration options from linux/Documentation/Configure.help
are relevant to configuring the PAS16:
Sound card support
dev/ device specific information (eg dev/cdrom/info)
fs/ specific filesystems
filehandle, inode, dentry and quota tuning
- binfmt_misc <Documentation/binfmt_misc.txt>
+ binfmt_misc <linux/Documentation/binfmt_misc.txt>
kernel/ global kernel info / tuning
miscellaneous stuff
net/ networking stuff, for documentation look in:
- <Documentation/networking/>
+ <linux/Documentation/networking/>
proc/ <empty>
sunrpc/ SUN Remote Procedure Call (NFS)
vm/ memory management tuning
- dirty_writeback_centisecs
- max_map_count
- min_free_kbytes
-- laptop_mode
-- block_dump
==============================================================
dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
-dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
-block_dump:
+dirty_writeback_centisecs, vfs_cache_pressure:
See Documentation/filesystems/proc.txt
NOTE:
The USB subsystem now has a substantial section in "The Linux Kernel API"
- guide (in Documentation/DocBook), generated from the current source
+ guide (in linux/Documentation/DocBook), generated from the current source
code. This particular documentation file isn't particularly current or
complete; don't rely on it except for a quick overview.
2000-July-12
For USB help other than the readme files that are located in
-Documentation/usb/*, see the following:
+linux/Documentation/usb/*, see the following:
Linux-USB project: http://www.linux-usb.org
mirrors at http://www.suse.cz/development/linux-usb/
Information - video4linux:
http://roadrunner.swansea.linux.org.uk/v4lapi.shtml
-Documentation/video4linux/API.html
+/usr/src/linux/Documentation/video4linux/API.html
/usr/include/linux/videodev.h
Information - video4linux/mjpeg extensions:
+++ /dev/null
-
-debug_switch:
-
- 0 1
-
- 1 2
-
- 2 4
-
- 3 8
-
- 4 16
-
- 5 32
-
- 6 64
-
- 7 128
-
-
-debug_xid:
-
- 0 1 "alloc_vx_info(%d) = %p\n"
- "dealloc_vx_info(%p)"
- "loc_vx_info(%d) = %p (not available)"
- "loc_vx_info(%d) = %p (found)"
- "loc_vx_info(%d) = %p (new)"
-
- 1 2 "alloc_vx_info(%d)*"
- "loc_vx_info(%d)*"
- "locate_vx_info(%d)"
-
- 2 4 "get_vx_info(%p[#%d.%d])"
- "put_vx_info(%p[#%d.%d])"
-
- 3 8 "set_vx_info(%p[#%d.%d.%d])"
- "clr_vx_info(%p[#%d.%d.%d])"
- "rcu_free_vx_info(%p): uc=%d"
-
- 4 16 "__hash_vx_info: %p[#%d]"
- "__unhash_vx_info: %p[#%d]"
- "__vx_dynamic_id: [#%d]"
-
- 5 32 "vx_migrate_task(%p,%p[#%d.%d])"
- "task_get_vx_info(%p)"
-
- 6 64
-
- 7 128
-
-
-debug_nid:
-
- 0 1 "alloc_nx_info() = %p"
- "dealloc_nx_info(%p)"
- "loc_nx_info(%d) = %p (not available)"
- "loc_nx_info(%d) = %p (found)"
- "loc_nx_info(%d) = %p (new)"
-
- 1 2 "alloc_nx_info(%d)*"
- "loc_nx_info(%d)*"
-
- 2 4 "get_nx_info(%p[#%d.%d])"
- "put_nx_info(%p[#%d.%d])"
-
- 3 8 "set_nx_info(%p[#%d.%d.%d])"
- "clr_nx_info(%p[#%d.%d.%d])"
- "rcu_free_nx_info(%p): uc=%d"
-
- 4 16 "__hash_nx_info: %p[#%d]"
- "__unhash_nx_info: %p[#%d]"
- "__nx_dynamic_id: [#%d]"
-
- 5 32 "nx_migrate_task(%p,%p[#%d.%d])"
- "task_get_nx_info(%p)"
- "create_nx_info()"
-
- 6 64
-
- 7 128
-
-
-debug_dlim:
-
- 0 1 "alloc_dl_info(%p,%d) = %p"
- "dealloc_dl_info(%p)"
- "locate_dl_info(%p,#%d) = %p"
-
- 1 2 "alloc_dl_info(%p,%d)*"
-
- 2 4 "get_dl_info(%p[#%d.%d])"
- "put_dl_info(%p[#%d.%d])"
-
- 3 8 "rcu_free_dl_info(%p)"
- "__hash_dl_info: %p[#%d]"
- "__unhash_dl_info: %p[#%d]"
-
-
- 4 16 "ALLOC (%p,#%d)%c inode (%d)"
- "FREE (%p,#%d)%c inode"
-
- 5 32 "ALLOC (%p,#%d)%c %lld bytes (%d)"
- "FREE (%p,#%d)%c %lld bytes"
-
- 6 64 "ADJUST: %lld,%lld on %d,%d [mult=%d]"
-
- 7 128 "ext3_has_free_blocks(%p): free=%u, root=%u"
- "ext3_has_free_blocks(%p): %u<%u+1, %c, %u!=%u r=%d"
-
-
-
-debug_cvirt:
-
-
- 0 1
-
- 1 2
-
- 2 4 "vx_map_tgid: %p/%llx: %d -> %d"
- "vx_rmap_tgid: %p/%llx: %d -> %d"
-
- 3 8
-
- 4 16
-
- 5 32
-
- 6 64
-
- 7 128
-
-
-
-debug_net:
-
-
- 0 1
-
- 1 2
-
- 2 4 "tcp_in_list(%p) %p,%p;%lx"
-
- 3 8 "inet_bind(%p) %p,%p;%lx"
-
- 4 16 "ip_route_connect(%p) %p,%p;%lx"
-
- 5 32 "tcp_ipv4_addr_conflict(%p,%p) %p,%p;%lx %p,%p;%lx"
-
- 6 64 "sk: %p [#%d] (from %d)"
- "sk,req: %p [#%d] (from %d)"
- "sk,egf: %p [#%d] (from %d)"
- "sk,egn: %p [#%d] (from %d)"
- "tw: %p [#%d] (from %d)"
-
- 7 128 "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
- "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
-
-
-
-
-debug_limit:
-
- n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s"
- "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
-
- m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s"
- "vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
- "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
-
-
DEFXX FDDI NETWORK DRIVER
P: Maciej W. Rozycki
-M: macro@linux-mips.org
+M: macro@ds2.pg.gda.pl
S: Maintained
DELL LAPTOP SMM DRIVER
L: linuxppc-embedded@lists.linuxppc.org
S: Maintained
-LINUX FOR POWERPC EMBEDDED PPC8XX AND BOOT CODE
-P: Tom Rini
-M: trini@kernel.crashing.org
-W: http://www.penguinppc.org/
-L: linuxppc-embedded@lists.linuxppc.org
-S: Maintained
-
LINUX FOR POWERPC EMBEDDED PPC85XX
P: Kumar Gala
M: kumar.gala@freescale.com
L: linux-scsi@vger.kernel.org
S: Maintained
-M68K ARCHITECTURE
-P: Geert Uytterhoeven
-M: geert@linux-m68k.org
-P: Roman Zippel
-M: zippel@linux-m68k.org
+M68K
+P: Jes Sorensen
+M: jes@trained-monkey.org
+W: http://www.clark.net/pub/lawrencc/linux/index.html
L: linux-m68k@lists.linux-m68k.org
-W: http://www.linux-m68k.org/
-W: http://linux-m68k-cvs.ubb.ca/
S: Maintained
M68K ON APPLE MACINTOSH
P: David Woodhouse
M: dwmw2@redhat.com
W: http://www.linux-mtd.infradead.org/
-L: linux-mtd@lists.infradead.org
+L: mtd@infradead.org
S: Maintained
MICROTEK X6 SCANNER
M: jmorris@redhat.com
P: Hideaki YOSHIFUJI
M: yoshfuji@linux-ipv6.org
-P: Patrick McHardy
-M: kaber@coreworks.de
L: netdev@oss.sgi.com
S: Maintained
ONSTREAM SCSI TAPE DRIVER
P: Willem Riede
M: osst@riede.org
-L: osst-users@lists.sourceforge.net
+L: osst@linux1.onstream.nl
L: linux-scsi@vger.kernel.org
S: Maintained
S: Maintained
SPARC (sparc32):
-P: William L. Irwin
-M: wli@holomorphy.com
+P: Keith M. Wesolowski
+M: wesolows@foobazco.org
L: sparclinux@vger.kernel.org
S: Maintained
VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 8
-EXTRAVERSION = -1.521.2.5.planetlab
+SUBLEVEL = 7
+EXTRAVERSION = -1.planetlab
NAME=Zonked Quokka
# *DOCUMENTATION*
KBUILD_CHECKSRC = 0
endif
-# Use make M=dir to specify directory of external module to build
+# Use make M=dir to specify direcotry of external module to build
# Old syntax make ... SUBDIRS=$PWD is still supported
# Setting the environment variable KBUILD_EXTMOD take precedence
ifdef SUBDIRS
_all: modules
endif
+# Make sure we're not wasting cpu-cycles doing locale handling, yet do make
+# sure error messages appear in the user-desired language
+ifdef LC_ALL
+LANG := $(LC_ALL)
+LC_ALL :=
+endif
+LC_COLLATE := C
+LC_CTYPE := C
+export LANG LC_ALL LC_COLLATE LC_CTYPE
+
srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
TOPDIR := $(srctree)
# FIXME - TOPDIR is obsolete, use srctree/objtree
$(sort $(vmlinux-objs)) arch/$(ARCH)/kernel/vmlinux.lds.s: $(vmlinux-dirs) ;
-# Handle descending into subdirectories listed in $(vmlinux-dirs)
-# Preset locale variables to speed up the build process. Limit locale
-# tweaks to this spot to avoid wrong language settings when running
-# make menuconfig etc.
-# Error messages still appears in the original language
+# Handle descending into subdirectories listed in $(vmlinux-dirs)
.PHONY: $(vmlinux-dirs)
$(vmlinux-dirs): prepare-all scripts
# A multi level approach is used. prepare1 is updated first, then prepare0.
# prepare-all is the collection point for the prepare targets.
-.PHONY: prepare-all prepare prepare0 prepare1 prepare2
-
-# prepare 2 generate Makefile to be placed in output directory, if
-# using a seperate output directory. This allows convinient use
-# of make in output directory
-prepare2:
- $(Q)if [ ! $(srctree) -ef $(objtree) ]; then \
- $(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
- $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) \
- > $(objtree)/Makefile; \
- fi
+.PHONY: prepare-all prepare prepare0 prepare1
# prepare1 is used to check if we are building in a separate output directory,
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
# 2) Create the include2 directory, used for the second asm symlink
-prepare1: prepare2
+prepare1:
ifneq ($(KBUILD_SRC),)
@echo ' Using $(srctree) as source for kernel'
$(Q)if [ -h $(srctree)/include/asm -o -f $(srctree)/.config ]; then \
sleep 1; \
fi
@rm -rf $(MODLIB)/kernel
- @rm -f $(MODLIB)/source
+ @rm -f $(MODLIB)/build
@mkdir -p $(MODLIB)/kernel
- @ln -s $(srctree) $(MODLIB)/source
- @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \
- rm -f $(MODLIB)/build ; \
- ln -s $(objtree) $(MODLIB)/build ; \
- fi
+ @ln -s $(TOPDIR) $(MODLIB)/build
$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
# If System.map exists, run depmod. This deliberately does not have a
# ---------------------------------------------------------------------------
define all-sources
- ( find $(srctree) $(RCS_FIND_IGNORE) \
+ ( find . $(RCS_FIND_IGNORE) \
\( -name include -o -name arch \) -prune -o \
-name '*.[chS]' -print; \
- find $(srctree)/arch/$(ARCH) $(RCS_FIND_IGNORE) \
+ find arch/$(ARCH) $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find $(srctree)/security/selinux/include $(RCS_FIND_IGNORE) \
+ find security/selinux/include $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find $(srctree)/include $(RCS_FIND_IGNORE) \
+ find include $(RCS_FIND_IGNORE) \
\( -name config -o -name 'asm-*' \) -prune \
-o -name '*.[chS]' -print; \
- find $(srctree)/include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
+ find include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find $(srctree)/include/asm-generic $(RCS_FIND_IGNORE) \
+ find include/asm-generic $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print )
endef
- There are various README files in the Documentation/ subdirectory:
these typically contain kernel-specific installation notes for some
- drivers for example. See Documentation/00-INDEX for a list of what
+ drivers for example. See ./Documentation/00-INDEX for a list of what
is contained in each file. Please read the Changes file, as it
contains information about the problems, which may result by upgrading
your kernel.
Compiling and running the 2.6.xx kernels requires up-to-date
versions of various software packages. Consult
- Documentation/Changes for the minimum version numbers required
+ ./Documentation/Changes for the minimum version numbers required
and how to get updates for these packages. Beware that using
excessively old versions of these packages can cause indirect
errors that are very difficult to track down, so don't assume that
gcc 2.91.66 (egcs-1.1.2), and gcc 2.7.2.3 are known to miscompile
some parts of the kernel, and are *no longer supported*.
Also remember to upgrade your binutils package (for as/ld/nm and company)
- if necessary. For more information, refer to Documentation/Changes.
+ if necessary. For more information, refer to ./Documentation/Changes.
Please note that you can still run a.out user programs with this kernel.
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
* Copyright (C) 2001-2002 Jan-Benedict Glaw <jbglaw@lug-owl.de>
*
* This driver is at all a modified version of Erik Mouw's
- * Documentation/DocBook/procfs_example.c, so: thank
+ * ./linux/Documentation/DocBook/procfs_example.c, so: thank
* you, Erik! He can be reached via email at
* <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea
* provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 220 */
.quad alpha_ni_syscall
-#ifdef CONFIG_TUX
- .quad __sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .quad sys_tux
-# else
.quad alpha_ni_syscall
-# endif
-#endif
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 225 */
# Select various configuration options depending on the machine type
config DISCONTIGMEM
bool
- depends on ARCH_EDB7211 || ARCH_SA1100 || (ARCH_LH7A40X && !LH7A40X_CONTIGMEM)
+ depends on ARCH_EDB7211 || ARCH_SA1100 || (ARCH_LH7A40X && !LH7A40X_SROMLL)
default y
help
Say Y to support efficient handling of discontiguous physical memory,
help
This enables the CPUfreq driver for ARM Integrator CPUs.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say Y.
tune-$(CONFIG_CPU_V6) :=-mtune=strongarm
# Need -Uarm for gcc < 3.x
-CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) $(call check_gcc,-malignment-traps,-mshort-load-bytes) -msoft-float -Wa,-mno-fpu -Uarm
+CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu
-CHECK := $(CHECK) -D__arm__=1
-
#Default value
DATAADDR := .
--defsym params_phys=$(PARAMS_PHYS) -T
AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\"
-targets := bootp init.o kernel.o initrd.o
+targets := bootp bootp.lds init.o kernel.o initrd.o
# Note that bootp.lds picks up kernel.o and initrd.o
-$(obj)/bootp: $(src)/bootp.lds $(addprefix $(obj)/,init.o kernel.o initrd.o) FORCE
+$(obj)/bootp: $(addprefix $(obj)/,bootp.lds init.o kernel.o initrd.o) FORCE
$(call if_changed,ld)
@:
.type _start, #function
.globl _start
-_start: add lr, pc, #-0x8 @ lr = current load addr
- adr r13, data
+_start: adr r13, data
ldmia r13!, {r4-r6} @ r5 = dest, r6 = length
- add r4, r4, lr @ r4 = initrd_start + load addr
bl move @ move the initrd
/*
CFLAGS_font.o := -Dstatic=
$(obj)/font.o: $(FONTC)
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in Makefile arch/arm/boot/Makefile .config
@sed "$(SEDFLAGS)" < $< > $@
$(obj)/misc.o: $(obj)/misc.c include/asm/arch/uncompress.h lib/inflate.c
mov r0, #0x30
mcr p15, 0, r0, c1, c0, 0
mov r0, #0x13
- msr cpsr_cxsf, r0
+ msr cpsr, r0
mov r12, #0x03000000 @ point to LEDs
orr r12, r12, #0x00020000
orr r12, r12, #0xba00
/* Ensure all interrupts are off and MMU disabled */
mrs r0, cpsr
orr r0, r0, #0xc0
- msr cpsr_cxsf, r0
+ msr cpsr, r0
adr lr, 1b
orr lr, lr, #0x10000000
# CONFIG_LLC is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_HOTPLUG is not set
+CONFIG_LOG_BUF_SHIFT=16
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
#
# Loadable module support
#
# System Type
#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_ANAKIN is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
-CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE_PB is not set
+CONFIG_ARCH_S3C2410=y
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+
+#
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
+#
#
# S3C2410 Implementations
#
CONFIG_ARCH_BAST=y
-# CONFIG_ARCH_H1940 is not set
-# CONFIG_ARCH_SMDK2410 is not set
-CONFIG_MACH_VR1000=y
#
# Processor Type
# General setup
#
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+# CONFIG_HOTPLUG is not set
#
# At least one math emulation must be selected
CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
# CONFIG_FPE_FASTFPE is not set
-# CONFIG_VFP is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_AOUT=y
# CONFIG_BINFMT_MISC is not set
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
# CONFIG_ARTHUR is not set
-CONFIG_S3C2410_DMA=y
-# CONFIG_S3C2410_DMA_DEBUG is not set
CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
CONFIG_ALIGNMENT_TRAP=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_CML1=y
+# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
CONFIG_PARPORT_PC_SUPERIO=y
# CONFIG_PARPORT_ARC is not set
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Ethernet (10000 Mbit)
#
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
+# CONFIG_NET_RADIO is not set
+# CONFIG_HOSTAP is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=m
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
-# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDE_BAST=y
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_DMA_NONPCI is not set
# CONFIG_BLK_DEV_HD is not set
#
#
# CONFIG_SCSI is not set
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
#
# I2O device support
#
#
# ISDN subsystem
#
-# CONFIG_ISDN is not set
+# CONFIG_ISDN_BOOL is not set
#
# Input device support
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_I8042=y
CONFIG_SERIO_SERPORT=y
# CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PARKBD is not set
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y
-# CONFIG_SERIAL_BAST_SIO is not set
+# CONFIG_SERIAL_DZ is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=y
# CONFIG_TIPAR is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_ISA is not set
-# CONFIG_I2C_PARPORT is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PHILIPSPAR is not set
# CONFIG_SCx200_ACB is not set
#
-# Hardware Sensors Chip support
+# I2C Hardware Sensors Chip support
#
CONFIG_I2C_SENSOR=m
# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_GL518SM is not set
+CONFIG_SENSORS_EEPROM=m
# CONFIG_SENSORS_IT87 is not set
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM78=m
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
CONFIG_SENSORS_LM85=m
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
#
-# Other I2C Chip support
+# L3 serial bus support
#
-CONFIG_SENSORS_EEPROM=m
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_L3 is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
#
# File systems
#
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
+CONFIG_NLS=y
#
# Native Language Support
#
-CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
#
# Graphics support
#
# CONFIG_LOGO is not set
#
-# Sound
+# Misc devices
#
-# CONFIG_SOUND is not set
#
-# Misc devices
+# Multimedia Capabilities Port drivers
#
+# CONFIG_MCP is not set
#
-# USB support
+# Console Switches
#
+# CONFIG_SWITCHES is not set
#
-# USB Gadget Support
+# USB support
#
# CONFIG_USB_GADGET is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
CONFIG_DEBUG_LL=y
-# CONFIG_DEBUG_ICEDCC is not set
+CONFIG_DEBUG_LL_PRINTK=y
CONFIG_DEBUG_S3C2410_PORT=y
CONFIG_DEBUG_S3C2410_UART=0
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_LLC is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
# CONFIG_CPU_IS_SLOW is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_ECONET_AUNUDP is not set
# CONFIG_ECONET_NATIVE is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_ECONET_AUNUDP=y
CONFIG_ECONET_NATIVE=y
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
#
CONFIG_EXPERIMENTAL=y
# CONFIG_CLEAN_COMPILE is not set
+CONFIG_STANDALONE=y
CONFIG_BROKEN=y
CONFIG_BROKEN_ON_SMP=y
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
#
# Loadable module support
#
# System Type
#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_ANAKIN is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
-CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE_PB is not set
+CONFIG_ARCH_S3C2410=y
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+
+#
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
+#
#
# S3C2410 Implementations
#
CONFIG_ARCH_BAST=y
CONFIG_ARCH_H1940=y
-CONFIG_ARCH_SMDK2410=y
-CONFIG_MACH_VR1000=y
#
# Processor Type
# General setup
#
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+# CONFIG_HOTPLUG is not set
#
# At least one math emulation must be selected
CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
# CONFIG_FPE_FASTFPE is not set
-# CONFIG_VFP is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_AOUT=y
# CONFIG_BINFMT_MISC is not set
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
# CONFIG_ARTHUR is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_CML1=y
+# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
CONFIG_PARPORT_PC_SUPERIO=y
# CONFIG_PARPORT_ARC is not set
# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
-CONFIG_MTD_CFI_UTIL=y
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
# Self-contained MTD device drivers
#
# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
# CONFIG_MTD_BLKMTD is not set
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Ethernet (10000 Mbit)
#
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
+# CONFIG_NET_RADIO is not set
+# CONFIG_HOSTAP is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=m
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
-# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDE_BAST=y
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_DMA_NONPCI is not set
# CONFIG_BLK_DEV_HD is not set
#
#
# CONFIG_SCSI is not set
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
#
# I2O device support
#
#
# ISDN subsystem
#
-# CONFIG_ISDN is not set
+# CONFIG_ISDN_BOOL is not set
#
# Input device support
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
# CONFIG_DIGI is not set
# CONFIG_MOXA_INTELLIO is not set
# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_ISI is not set
+# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_RISCOM8 is not set
CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y
CONFIG_SERIAL_BAST_SIO=y
+# CONFIG_SERIAL_DZ is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=y
# CONFIG_TIPAR is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_ISA is not set
-# CONFIG_I2C_PARPORT is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PHILIPSPAR is not set
# CONFIG_SCx200_ACB is not set
#
-# Hardware Sensors Chip support
+# I2C Hardware Sensors Chip support
#
CONFIG_I2C_SENSOR=m
# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_GL518SM is not set
+CONFIG_SENSORS_EEPROM=m
# CONFIG_SENSORS_IT87 is not set
CONFIG_SENSORS_LM75=m
-# CONFIG_SENSORS_LM77 is not set
CONFIG_SENSORS_LM78=m
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
CONFIG_SENSORS_LM85=m
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
#
-# Other I2C Chip support
+# L3 serial bus support
#
-CONFIG_SENSORS_EEPROM=m
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_L3 is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
#
# File systems
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_JFFS_FS=y
CONFIG_JFFS_FS_VERBOSE=0
-# CONFIG_JFFS_PROC_FS is not set
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
# CONFIG_JFFS2_FS_NAND is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
+CONFIG_NLS=y
#
# Native Language Support
#
-CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
#
# Graphics support
#
# CONFIG_LOGO is not set
#
-# Sound
+# Misc devices
#
-# CONFIG_SOUND is not set
#
-# Misc devices
+# Multimedia Capabilities Port drivers
#
+# CONFIG_MCP is not set
#
-# USB support
+# Console Switches
#
+# CONFIG_SWITCHES is not set
#
-# USB Gadget Support
+# USB support
#
# CONFIG_USB_GADGET is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_PRINTK=y
# CONFIG_DEBUG_ICEDCC is not set
CONFIG_DEBUG_S3C2410_PORT=y
CONFIG_DEBUG_S3C2410_UART=0
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
return err;
}
-static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+static ssize_t apm_read(struct file *fp, char *buf, size_t count, loff_t *ppos)
{
struct apm_user *as = fp->private_data;
apm_event_t event;
bl do_DataAbort
disable_irq r0
ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5
strne r0, [r0, -r0] @ bug()
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.ltorg
1: disable_irq r0
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
- msr spsr_cxsf, lr
+ msr spsr, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
.align 5
bl do_PrefetchAbort @ call abort handler
disable_irq r0
ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
ands lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr_cxsf, r13 @ switch to SVC_32 mode
+ msr spsr, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
ldr r0, [sp, #S_PSR] @ Get calling cpsr
sub lr, lr, #4
str lr, [r8]
- msr spsr_cxsf, r0
+ msr spsr, r0
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
ldr lr, [sp, #S_PC] @ Get PC
ldr r1, [sp, #S_PSR] @ Get calling cpsr
disable_irq ip @ disable IRQs
ldr lr, [sp, #S_PC]! @ Get PC
- msr spsr_cxsf, r1 @ save in spsr_svc
+ msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
.macro fast_restore_user_regs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
+ msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
.macro slow_restore_user_regs
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
+ msr spsr, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
-void _memcpy_fromio(void *to, unsigned long from, size_t count)
+void _memcpy_fromio(void * to, unsigned long from, size_t count)
{
- unsigned char *t = to;
while (count) {
count--;
- *t = readb(from);
- t++;
+ *(char *) to = readb(from);
+ ((char *) to)++;
from++;
}
}
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
-void _memcpy_toio(unsigned long to, const void *from, size_t count)
+void _memcpy_toio(unsigned long to, const void * from, size_t count)
{
- const unsigned char *f = from;
while (count) {
count--;
- writeb(*f, to);
- f++;
+ writeb(*(char *) from, to);
+ ((char *) from)++;
to++;
}
}
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
+ info.si_addr = (void *)instruction_pointer(regs);
force_sig_info(SIGTRAP, &info, tsk);
}
__put_user_error(NULL, &frame->uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
- stack.ss_sp = (void __user *)current->sas_ss_sp;
+ stack.ss_sp = (void *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
+ if (get_user(fourth.__pad, (void __user **) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
unsigned int instr;
struct undef_hook *hook;
siginfo_t info;
- void __user *pc;
+ void *pc;
/*
* According to the ARM ARM, PC is 2 or 4 bytes ahead,
*/
regs->ARM_pc -= correction;
- pc = (void __user *)instruction_pointer(regs);
+ pc = (void *)instruction_pointer(regs);
if (thumb_mode(regs)) {
- get_user(instr, (u16 __user *)pc);
+ get_user(instr, (u16 *)pc);
} else {
- get_user(instr, (u32 __user *)pc);
+ get_user(instr, (u32 *)pc);
}
spin_lock_irq(&undef_lock);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
+ info.si_addr = (void *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
+ info.si_addr = (void *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)addr;
+ info.si_addr = (void *)addr;
force_sig_info(SIGILL, &info, current);
die_if_kernel("unknown data abort code", regs, instr);
#define CPSR2SPSR(rt) \
mrs rt, cpsr; \
- msr spsr_cxsf, rt
+ msr spsr, rt
@ Purpose: call an expansion card loader to read bytes.
@ Proto : char read_loader(int offset, char *card_base, char *loader);
EXPORT_SYMBOL(pci_set_dma_mask);
EXPORT_SYMBOL(pci_dac_set_dma_mask);
EXPORT_SYMBOL(pci_set_consistent_dma_mask);
-EXPORT_SYMBOL(ixp4xx_pci_read);
-EXPORT_SYMBOL(ixp4xx_pci_write);
.flags = IORESOURCE_MEM,
};
-static struct platform_device coyote_flash = {
+static struct platform_device coyote_flash_device = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.resource = &coyote_flash_resource,
};
-static struct platform_device *coyote_devices[] __initdata = {
- &coyote_flash
-};
-
static void __init coyote_init(void)
{
- platform_add_devices(&coyote_devices, ARRAY_SIZE(coyote_devices));
+ platform_add_device(&coyote_flash_device);
}
MACHINE_START(ADI_COYOTE, "ADI Engineering IXP4XX Coyote Development Platform")
.flags = IORESOURCE_MEM,
};
-static struct platform_device ixdp425_flash = {
+static struct platform_device ixdp425_flash_device = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.num_resources = 0
};
-static struct platform_device *ixdp425_devices[] __initdata = {
- &ixdp425_i2c_controller,
- &ixdp425_flash
-};
-
static void __init ixdp425_init(void)
{
- platform_add_devices(&ixdp425_devices, ARRAY_SIZE(ixdp425_devices));
+ platform_add_device(&ixdp425_flash_device);
+ platform_add_device(&ixdp425_i2c_controller);
}
MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
.flags = IORESOURCE_MEM,
};
-static struct platform_device prpmc1100_flash = {
+static struct platform_device prpmc1100_flash_device = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.resource = &prpmc1100_flash_resource,
};
-static struct platform_device *prpmc1100_devices[] __initdata = {
- &prpmc1100_flash
-};
-
static void __init prpmc1100_init(void)
{
- platform_add_devices(&prpmc1100_devices, ARRAY_SIZE(prpmc1100_devices));
+ platform_add_device(&prpmc1100_flash_device);
}
MACHINE_START(PRPMC1100, "Motorola PrPMC1100")
# Object file lists.
-obj-y := time.o
+obj-y := fiq.o time.o
+# generic.o
obj-$(CONFIG_MACH_KEV7A400) += arch-kev7a400.o irq-lh7a400.o
-obj-$(CONFIG_MACH_LPD7A400) += arch-lpd7a40x.o irq-lh7a400.o
-obj-$(CONFIG_MACH_LPD7A404) += arch-lpd7a40x.o irq-lh7a404.o
+obj-$(CONFIG_MACH_LPD7A400) += arch-lpd7a40x.o ide-lpd7a40x.o irq-lh7a400.o
+obj-$(CONFIG_MACH_LPD7A404) += arch-lpd7a40x.o ide-lpd7a40x.o irq-lh7a404.o
obj-m :=
obj-n :=
#ifdef CONFIG_MACH_LPD7A400
extern void lh7a400_init_irq (void);
-extern void lh7a40x_init_time (void);
MACHINE_START (LPD7A400, "Logic Product Development LPD7A400-10")
MAINTAINER ("Marc Singer")
BOOT_PARAMS (0xc0000100)
MAPIO (lpd7a400_map_io)
INITIRQ (lh7a400_init_irq)
- INITTIME (lh7a40x_init_time)
INIT_MACHINE (lpd7a40x_init)
MACHINE_END
--- /dev/null
+/*
+ * linux/arch/arm/lib/lh7a400-fiqhandler.S
+ * Copyright (C) 2002, Lineo, Inc.
+ * based on linux/arch/arm/lib/floppydma.S, which is
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+ .global fiqhandler_end
+
+ @ register usage:
+ @ r8 &interrupt controller registers
+ @ r9 &gpio registers
+ @ r11 work
+ @ r12 work
+
+ENTRY(fiqhandler)
+
+ @ read the status register to find out which FIQ this is
+
+ ldr r12, [r8] @ intc->status
+ and r12, r12, #0xf @ only interested in low-order 4 bits
+
+ @ translate FIQ 0:3 to IRQ 23:26
+ @ disable this FIQ and enable the corresponding IRQ
+
+ str r12, [r8, #0xc] @ disable this FIQ
+ mov r12, r12, lsl #23 @ get the corresopnding IRQ bit
+ str r12, [r8, #0x8] @ enable that IRQ
+
+ subs pc, lr, #4
+fiqhandler_end:
+
--- /dev/null
+/* arch/arm/mach-lh7a40x/ide-lpd7a40x.c
+ *
+ * Copyright (C) 2004 Logic Product Development
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/ide.h>
+
+#include <asm/io.h>
+
+#define IOBARRIER_READ readl (IOBARRIER_VIRT)
+
+static u8 lpd7a40x_ide_inb (unsigned long port)
+{
+ u16 v = (u16) readw (port & ~0x1);
+ IOBARRIER_READ;
+ if (port & 0x1)
+ v >>= 8;
+ return v & 0xff;
+}
+
+static u16 lpd7a40x_ide_inw (unsigned long port)
+{
+ u16 v = (u16) readw (port);
+ IOBARRIER_READ;
+ return v;
+}
+
+static void lpd7a40x_ide_insw (unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ *((u16*) addr)++ = (u16) readw (port);
+ IOBARRIER_READ;
+ }
+}
+
+static u32 lpd7a40x_ide_inl (unsigned long port)
+{
+ u32 v = (u16) readw (port);
+ IOBARRIER_READ;
+ v |= (u16) readw (port + 2);
+ IOBARRIER_READ;
+
+ return v;
+}
+
+static void lpd7a40x_ide_insl (unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ *((u16*) addr)++ = (u16) readw (port);
+ IOBARRIER_READ;
+ *((u16*) addr)++ = (u16) readw (port + 2);
+ IOBARRIER_READ;
+ }
+}
+
+/* lpd7a40x_ide_outb -- this function is complicated by the fact that
+ * the user wants to be able to do byte IO and the hardware cannot.
+ * In order to write the high byte, we need to write a short. So, we
+ * read before writing in order to maintain the register values that
+ * shouldn't change. This isn't a good idea for the data IO registers
+ * since reading from them will not return the current value. We
+ * expect that this function handles the control register adequately.
+*/
+
+static void lpd7a40x_ide_outb (u8 valueUser, unsigned long port)
+{
+ /* Block writes to SELECT register. Draconian, but the only
+ * way to cope with this hardware configuration without
+ * modifying the SELECT_DRIVE call in the ide driver. */
+ if ((port & 0xf) == 0x6)
+ return;
+
+ if (port & 0x1) { /* Perform read before write. Only
+ * the COMMAND register needs
+ * this. */
+ u16 value = (u16) readw (port & ~0x1);
+ IOBARRIER_READ;
+ value = (value & 0x00ff) | (valueUser << 8);
+ writew (value, port & ~0x1);
+ IOBARRIER_READ;
+ }
+ else { /* Allow low-byte writes which seem to
+ * be OK. */
+ writeb (valueUser, port);
+ IOBARRIER_READ;
+ }
+}
+
+static void lpd7a40x_ide_outbsync (ide_drive_t *drive, u8 value,
+ unsigned long port)
+{
+ lpd7a40x_ide_outb (value, port);
+}
+
+static void lpd7a40x_ide_outw (u16 value, unsigned long port)
+{
+ writew (value, port);
+ IOBARRIER_READ;
+}
+
+static void lpd7a40x_ide_outsw (unsigned long port, void *addr, u32 count)
+{
+ while (count-- > 0) {
+ writew (*((u16*) addr)++, port);
+ IOBARRIER_READ;
+ }
+}
+
+static void lpd7a40x_ide_outl (u32 value, unsigned long port)
+{
+ writel (value, port);
+ IOBARRIER_READ;
+}
+
+static void lpd7a40x_ide_outsl (unsigned long port, void *addr, u32 count)
+{
+ while (count-- > 0) {
+ writel (*((u32*) addr)++, port);
+ IOBARRIER_READ;
+ }
+}
+
+void lpd7a40x_SELECT_DRIVE (ide_drive_t *drive)
+{
+ unsigned jifStart = jiffies;
+#define WAIT_TIME (30*HZ/1000)
+
+ /* Check for readiness. */
+ while ((HWIF(drive)->INB(IDE_STATUS_REG) & 0x40) == 0)
+ if (jifStart <= jiffies + WAIT_TIME)
+ return;
+
+ /* Only allow one drive.
+ For more information, see Documentation/arm/Sharp-LH/ */
+ if (drive->select.all & (1<<4))
+ return;
+
+ /* OUTW so that the IDLE_IMMEDIATE (and not NOP) command is sent. */
+ HWIF(drive)->OUTW(drive->select.all | 0xe100, IDE_SELECT_REG);
+}
+
+void lpd7a40x_hwif_ioops (ide_hwif_t *hwif)
+{
+ hwif->mmio = 2; /* Just for show */
+ hwif->irq = IDE_NO_IRQ; /* Stop this probing */
+
+ hwif->OUTB = lpd7a40x_ide_outb;
+ hwif->OUTBSYNC = lpd7a40x_ide_outbsync;
+ hwif->OUTW = lpd7a40x_ide_outw;
+ hwif->OUTL = lpd7a40x_ide_outl;
+ hwif->OUTSW = lpd7a40x_ide_outsw;
+ hwif->OUTSL = lpd7a40x_ide_outsl;
+ hwif->INB = lpd7a40x_ide_inb;
+ hwif->INW = lpd7a40x_ide_inw;
+ hwif->INL = lpd7a40x_ide_inl;
+ hwif->INSW = lpd7a40x_ide_insw;
+ hwif->INSL = lpd7a40x_ide_insl;
+ hwif->selectproc = lpd7a40x_SELECT_DRIVE;
+}
<http://www.fsforth.de>
config MACH_VR1000
- bool "Thorcom VR1000"
+ bool "Simtec VR1000"
help
- Say Y here if you are using the Thorcom VR1000 board.
-
- This linux port is currently being maintained by Simtec, on behalf
- of Thorcom. Any queries, please contact Thorcom first.
+ Say Y here if you are using the Simtec VR1000 board.
endmenu
# Object file lists.
-obj-y := s3c2410.o irq.o time.o gpio.o
+obj-y := s3c2410.o irq.o time.o
obj-m :=
obj-n :=
obj- :=
+++ /dev/null
-/* linux/arch/arm/mach-s3c2410/gpio.c
- *
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 GPIO support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-
-#include <asm/hardware.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-
-#include <asm/arch/regs-gpio.h>
-
-void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function)
-{
- unsigned long base = S3C2410_GPIO_BASE(pin);
- unsigned long shift = 1;
- unsigned long mask = 3;
- unsigned long con;
- unsigned long flags;
-
- if (pin < S3C2410_GPIO_BANKB) {
- shift = 0;
- mask = 1;
- }
-
- mask <<= S3C2410_GPIO_OFFSET(pin);
-
- local_irq_save(flags);
-
- con = __raw_readl(base + 0x00);
-
- con &= mask << shift;
- con |= function;
-
- __raw_writel(con, base + 0x00);
-
- local_irq_restore(flags);
-}
-
-void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
-{
- unsigned long base = S3C2410_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long up;
-
- if (pin < S3C2410_GPIO_BANKB)
- return;
-
- local_irq_save(flags);
-
- up = __raw_readl(base + 0x08);
- up &= 1 << offs;
- up |= to << offs;
- __raw_writel(up, base + 0x08);
-
- local_irq_restore(flags);
-}
-
-void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
-{
- unsigned long base = S3C2410_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long dat;
-
- local_irq_save(flags);
-
- dat = __raw_readl(base + 0x04);
- dat &= 1 << offs;
- dat |= to << offs;
- __raw_writel(dat, base + 0x04);
-
- local_irq_restore(flags);
-}
[0] = {
.hwport = 0,
.flags = 0,
- .clock = &s3c2410_pclk,
+ .clock = &s3c2410_hclk,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
[1] = {
.hwport = 1,
.flags = 0,
- .clock = &s3c2410_pclk,
+ .clock = &s3c2410_hclk,
.ucon = 0x245,
.ulcon = 0x03,
.ufcon = 0x00,
[2] = {
.hwport = 2,
.flags = 0,
- .clock = &s3c2410_pclk,
+ .clock = &s3c2410_hclk,
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0x51,
void __init smdk2410_init_time(void)
{
- s3c2410_init_time();
+ s3c2401_init_time();
}
MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch
* published by the Free Software Foundation.
*
* Modifications:
- * 06-Aug-2004 BJD Fixed call to time initialisation
* 12-Jul-2004 BJD Renamed machine
* 16-May-2003 BJD Created initial version
* 16-Aug-2003 BJD Fixed header files and copyright, added URL
void __init vr1000_init_time(void)
{
- s3c2410_init_time();
+ s3c2401_init_time();
}
MACHINE_START(VR1000, "Thorcom-VR1000")
extern void s3c2410_init_irq(void);
-extern void s3c2410_init_time(void);
+extern s3c2410_init_time(void);
MAPIO(collie_map_io)
INITIRQ(sa1100_init_irq)
INIT_MACHINE(collie_init)
- INITTIME(sa1100_init_time)
MACHINE_END
static int __init blockops_check(void)
{
register unsigned int err asm("r4") = 0;
- unsigned int err_pos = 1;
unsigned int cache_type;
int i;
unregister_undef_hook(&blockops_hook);
- for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
- printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
+ for (i = 0; i < ARRAY_SIZE(func); i++, err >>= 1)
+ printk("%30s: %ssupported\n", func[i], err & 1 ? "not " : "");
if ((err & 8) == 0) {
printk(" --> Using %s block cache invalidate\n",
/* We must not map this if we have highmem enabled */
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08lx", pte_val(*pte));
+#ifdef CONFIG_CPU_32
printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
+#endif
pte_unmap(pte);
#endif
} while(0);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = code;
- si.si_addr = (void __user *)addr;
+ si.si_addr = (void *)addr;
force_sig_info(SIGSEGV, &si, tsk);
}
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
+#ifdef CONFIG_CPU_32
+#define TABLE_OFFSET (PTRS_PER_PTE)
+#else
+#define TABLE_OFFSET 0
+#endif
+
+#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(pte_t))
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
*/
reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+#ifdef CONFIG_CPU_32
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
PTRS_PER_PGD * sizeof(pgd_t));
-
+#endif
/*
* And don't forget to reserve the allocator bitmap,
* which will be freed later.
*/
arch_adjust_zones(node, zone_size, zhole_size);
- free_area_init_node(node, pgdat, NULL, zone_size,
+ free_area_init_node(node, pgdat, 0, zone_size,
bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
}
* stack+task struct. Use the same method as 'current' uses to
* reach them.
*/
-register unsigned long *user_registers asm("sl");
+register unsigned int *user_registers asm("sl");
#define GET_USERREG() (user_registers)
#include <asm/uaccess.h>
-static inline void loadSingle(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadSingle(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
fpa11->fType[Fn] = typeSingle;
get_user(fpa11->fpreg[Fn].fSingle, pMem);
}
-static inline void loadDouble(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadDouble(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int *p;
}
#ifdef CONFIG_FPE_NWFPE_XP
-static inline void loadExtended(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadExtended(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int *p;
}
#endif
-static inline void loadMultiple(const unsigned int Fn, const unsigned int __user *pMem)
+static inline void loadMultiple(const unsigned int Fn, const unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
register unsigned int *p;
}
}
-static inline void storeSingle(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeSingle(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
put_user(val.i[0], pMem);
}
-static inline void storeDouble(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeDouble(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
}
#ifdef CONFIG_FPE_NWFPE_XP
-static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeExtended(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
}
#endif
-static inline void storeMultiple(const unsigned int Fn, unsigned int __user *pMem)
+static inline void storeMultiple(const unsigned int Fn, unsigned int *pMem)
{
FPA11 *fpa11 = GET_FPA11();
register unsigned int nType, *p;
unsigned int PerformLDF(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int nRc = 1, write_back = WRITE_BACK(opcode);
+ unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
+ write_back = WRITE_BACK(opcode);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return nRc;
}
unsigned int PerformSTF(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int nRc = 1, write_back = WRITE_BACK(opcode);
+ unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
+ write_back = WRITE_BACK(opcode);
SetRoundingMode(ROUND_TO_NEAREST);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return nRc;
}
unsigned int PerformLFM(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int i, Fd, write_back = WRITE_BACK(opcode);
+ unsigned int i, Fd, *pBase, *pAddress, *pFinal,
+ write_back = WRITE_BACK(opcode);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return 1;
}
unsigned int PerformSFM(const unsigned int opcode)
{
- unsigned int __user *pBase, *pAddress, *pFinal;
- unsigned int i, Fd, write_back = WRITE_BACK(opcode);
+ unsigned int i, Fd, *pBase, *pAddress, *pFinal,
+ write_back = WRITE_BACK(opcode);
- pBase = (unsigned int __user *) readRegister(getRn(opcode));
+ pBase = (unsigned int *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned long) pFinal);
+ writeRegister(getRn(opcode), (unsigned int) pFinal);
return 1;
}
#ifdef CONFIG_DEBUG_USER
printk(KERN_DEBUG
- "NWFPE: %s[%d] takes exception %08x at %p from %08lx\n",
+ "NWFPE: %s[%d] takes exception %08x at %p from %08x\n",
current->comm, current->pid, flags,
__builtin_return_address(0), GET_USERREG()[15]);
#endif
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-static inline unsigned long readRegister(const unsigned int nReg)
+extern __inline__
+unsigned int readRegister(const unsigned int nReg)
{
/* Note: The CPU thinks it has dealt with the current instruction.
As a result the program counter has been advanced to the next
for this in this routine. LDF/STF instructions with Rn = PC
depend on the PC being correct, as they use PC+8 in their
address calculations. */
- unsigned long *userRegisters = GET_USERREG();
+ unsigned int *userRegisters = GET_USERREG();
unsigned int val = userRegisters[nReg];
if (REG_PC == nReg)
val -= 4;
return val;
}
-static inline void
-writeRegister(const unsigned int nReg, const unsigned long val)
+extern __inline__
+void writeRegister(const unsigned int nReg, const unsigned int val)
{
- unsigned long *userRegisters = GET_USERREG();
+ unsigned int *userRegisters = GET_USERREG();
userRegisters[nReg] = val;
}
-static inline unsigned long readCPSR(void)
+extern __inline__
+unsigned int readCPSR(void)
{
return (readRegister(REG_CPSR));
}
-static inline void writeCPSR(const unsigned long val)
+extern __inline__
+void writeCPSR(const unsigned int val)
{
writeRegister(REG_CPSR, val);
}
-static inline unsigned long readConditionCodes(void)
+extern __inline__
+unsigned int readConditionCodes(void)
{
#ifdef __FPEM_TEST__
return (0);
#endif
}
-static inline void writeConditionCodes(const unsigned long val)
+extern __inline__
+void writeConditionCodes(const unsigned int val)
{
- unsigned long *userRegisters = GET_USERREG();
- unsigned long rval;
+ unsigned int *userRegisters = GET_USERREG();
+ unsigned int rval;
/*
* Operate directly on userRegisters since
* the CPSR may be the PC register itself.
rval = userRegisters[REG_CPSR] & ~CC_MASK;
userRegisters[REG_CPSR] = rval | (val & CC_MASK);
}
+
+extern __inline__
+unsigned int readMemoryInt(unsigned int *pMem)
+{
+ return *pMem;
+}
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_LAPB is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
libs-y += arch/i386/lib/
core-y += arch/i386/kernel/ \
arch/i386/mm/ \
- arch/i386/$(mcore-y)/ \
- arch/i386/crypto/
+ arch/i386/$(mcore-y)/
drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
drivers-$(CONFIG_PCI) += arch/i386/pci/
# must be linked after kernel/
--- /dev/null
+#
+# linux/arch/i386/boot/compressed/Makefile
+#
+# create a compressed vmlinux image from the original vmlinux
+#
+
+targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
+EXTRA_AFLAGS := -traditional
+
+LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32
+
+$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
+ $(call if_changed,ld)
+ @:
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
+
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,ld)
--- /dev/null
+/*
+ * linux/boot/head.S
+ *
+ * Copyright (C) 1991, 1992, 1993 Linus Torvalds
+ */
+
+/*
+ * head.S contains the 32-bit startup code.
+ *
+ * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
+ * the page directory will exist. The startup code will be overwritten by
+ * the page directory. [According to comments etc elsewhere on a compressed
+ * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
+ *
+ * Page 0 is deliberately kept safe, since System Management Mode code in
+ * laptops may need to access the BIOS data stored there. This is also
+ * useful for future device drivers that either access the BIOS via VM86
+ * mode.
+ */
+
+/*
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ */
+.text
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+
+ .globl startup_32
+
+startup_32:
+ cld
+ cli
+ movl $(__BOOT_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+ movl %eax,%fs
+ movl %eax,%gs
+
+ lss stack_start,%esp
+ xorl %eax,%eax
+1: incl %eax # check that A20 really IS enabled
+ movl %eax,0x000000 # loop forever if it isn't
+ cmpl %eax,0x100000
+ je 1b
+
+/*
+ * Initialize eflags. Some BIOS's leave bits like NT set. This would
+ * confuse the debugger if this code is traced.
+ * XXX - best to initialize before switching to protected mode.
+ */
+ pushl $0
+ popfl
+/*
+ * Clear BSS
+ */
+ xorl %eax,%eax
+ movl $_edata,%edi
+ movl $_end,%ecx
+ subl %edi,%ecx
+ cld
+ rep
+ stosb
+/*
+ * Do the decompression, and jump to the new kernel..
+ */
+ subl $16,%esp # place for structure on the stack
+ movl %esp,%eax
+ pushl %esi # real mode pointer as second arg
+ pushl %eax # address of structure as first arg
+ call decompress_kernel
+ orl %eax,%eax
+ jnz 3f
+ popl %esi # discard address
+ popl %esi # real mode pointer
+ xorl %ebx,%ebx
+ ljmp $(__BOOT_CS), $0x100000
+
+/*
+ * We come here, if we were loaded high.
+ * We need to move the move-in-place routine down to 0x1000
+ * and then start it with the buffer addresses in registers,
+ * which we got from the stack.
+ */
+3:
+ movl $move_routine_start,%esi
+ movl $0x1000,%edi
+ movl $move_routine_end,%ecx
+ subl %esi,%ecx
+ addl $3,%ecx
+ shrl $2,%ecx
+ cld
+ rep
+ movsl
+
+ popl %esi # discard the address
+ popl %ebx # real mode pointer
+ popl %esi # low_buffer_start
+ popl %ecx # lcount
+ popl %edx # high_buffer_start
+ popl %eax # hcount
+ movl $0x100000,%edi
+ cli # make sure we don't get interrupted
+ ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
+
+/*
+ * Routine (template) for moving the decompressed kernel in place,
+ * if we were high loaded. This _must_ PIC-code !
+ */
+move_routine_start:
+ movl %ecx,%ebp
+ shrl $2,%ecx
+ rep
+ movsl
+ movl %ebp,%ecx
+ andl $3,%ecx
+ rep
+ movsb
+ movl %edx,%esi
+ movl %eax,%ecx # NOTE: rep movsb won't move if %ecx == 0
+ addl $3,%ecx
+ shrl $2,%ecx
+ rep
+ movsl
+ movl %ebx,%esi # Restore setup pointer
+ xorl %ebx,%ebx
+ ljmp $(__BOOT_CS), $0x100000
+move_routine_end:
--- /dev/null
+/*
+ * misc.c
+ *
+ * This is a collection of several routines from gzip-1.0.3
+ * adapted for Linux.
+ *
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ * puts by Nick Holloway 1993, better puts by Martin Mares 1995
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ */
+
+#include <linux/linkage.h>
+#include <linux/vmalloc.h>
+#include <linux/tty.h>
+#include <asm/io.h>
+#ifdef STANDARD_MEMORY_BIOS_CALL
+#undef STANDARD_MEMORY_BIOS_CALL
+#endif
+
+/*
+ * gzip declarations
+ */
+
+#define OF(args) args
+#define STATIC static
+
+#undef memset
+#undef memcpy
+
+/*
+ * Why do we do this? Don't ask me..
+ *
+ * Incomprehensible are the ways of bootloaders.
+ */
+static void* memset(void *, int, size_t);
+static void* memcpy(void *, __const void *, size_t);
+#define memzero(s, n) memset ((s), 0, (n))
+
+typedef unsigned char uch;
+typedef unsigned short ush;
+typedef unsigned long ulg;
+
+#define WSIZE 0x8000 /* Window size must be at least 32k, */
+ /* and a power of two */
+
+static uch *inbuf; /* input buffer */
+static uch window[WSIZE]; /* Sliding window buffer */
+
+static unsigned insize = 0; /* valid bytes in inbuf */
+static unsigned inptr = 0; /* index of next byte to be processed in inbuf */
+static unsigned outcnt = 0; /* bytes in output buffer */
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# define Assert(cond,msg) {if(!(cond)) error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+static int fill_inbuf(void);
+static void flush_window(void);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+static unsigned char *real_mode; /* Pointer to real-mode data */
+
+#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
+#ifndef STANDARD_MEMORY_BIOS_CALL
+#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
+#endif
+#define SCREEN_INFO (*(struct screen_info *)(real_mode+0))
+
+extern char input_data[];
+extern int input_len;
+
+static long bytes_out = 0;
+static uch *output_data;
+static unsigned long output_ptr = 0;
+
+static void *malloc(int size);
+static void free(void *where);
+
+static void puts(const char *);
+
+extern int end;
+static long free_mem_ptr = (long)&end;
+static long free_mem_end_ptr;
+
+#define INPLACE_MOVE_ROUTINE 0x1000
+#define LOW_BUFFER_START 0x2000
+#define LOW_BUFFER_MAX 0x90000
+#define HEAP_SIZE 0x3000
+static unsigned int low_buffer_end, low_buffer_size;
+static int high_loaded =0;
+static uch *high_buffer_start /* = (uch *)(((ulg)&end) + HEAP_SIZE)*/;
+
+static char *vidmem = (char *)0xa0000;
+static int lines, cols;
+
+#ifdef CONFIG_X86_NUMAQ
+static void * xquad_portio = NULL;
+#endif
+
+#include "../../../../lib/inflate.c"
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size <0) error("Malloc error");
+ if (free_mem_ptr <= 0) error("Memory error");
+
+ free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
+
+ p = (void *)free_mem_ptr;
+ free_mem_ptr += size;
+
+ if (free_mem_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ return p;
+}
+
+static void free(void *where)
+{ /* Don't care */
+}
+
+static void gzip_mark(void **ptr)
+{
+ *ptr = (void *) free_mem_ptr;
+}
+
+static void gzip_release(void **ptr)
+{
+ free_mem_ptr = (long) *ptr;
+}
+
+static void scroll(void)
+{
+ int i;
+
+ memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
+ for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
+ vidmem[i] = ' ';
+}
+
+static void puts(const char *s)
+{
+ int x,y,pos;
+ char c;
+
+ x = SCREEN_INFO.orig_x;
+ y = SCREEN_INFO.orig_y;
+
+ while ( ( c = *s++ ) != '\0' ) {
+ if ( c == '\n' ) {
+ x = 0;
+ if ( ++y >= lines ) {
+ scroll();
+ y--;
+ }
+ } else {
+ vidmem [ ( x + cols * y ) * 2 ] = c;
+ if ( ++x >= cols ) {
+ x = 0;
+ if ( ++y >= lines ) {
+ scroll();
+ y--;
+ }
+ }
+ }
+ }
+
+ SCREEN_INFO.orig_x = x;
+ SCREEN_INFO.orig_y = y;
+
+ pos = x + cols * y; /* Update cursor position */
+ while (!(inb_p(0x60) & 4));
+ outb_p(0x49, 0x62);
+ outb_p(pos & 0xff, 0x60);
+ outb_p((pos >> 8) & 0xff, 0x60);
+}
+
+static void* memset(void* s, int c, size_t n)
+{
+ int i;
+ char *ss = (char*)s;
+
+ for (i=0;i<n;i++) ss[i] = c;
+ return s;
+}
+
+static void* memcpy(void* __dest, __const void* __src,
+ size_t __n)
+{
+ int i;
+ char *d = (char *)__dest, *s = (char *)__src;
+
+ for (i=0;i<__n;i++) d[i] = s[i];
+ return __dest;
+}
+
+/* ===========================================================================
+ * Fill the input buffer. This is called only when the buffer is empty
+ * and at least one byte is really needed.
+ */
+static int fill_inbuf(void)
+{
+ if (insize != 0) {
+ error("ran out of input data");
+ }
+
+ inbuf = input_data;
+ insize = input_len;
+ inptr = 1;
+ return inbuf[0];
+}
+
+/* ===========================================================================
+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+ * (Used for the decompressed data only.)
+ */
+static void flush_window_low(void)
+{
+ ulg c = crc; /* temporary variable */
+ unsigned n;
+ uch *in, *out, ch;
+
+ in = window;
+ out = &output_data[output_ptr];
+ for (n = 0; n < outcnt; n++) {
+ ch = *out++ = *in++;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ output_ptr += (ulg)outcnt;
+ outcnt = 0;
+}
+
+static void flush_window_high(void)
+{
+ ulg c = crc; /* temporary variable */
+ unsigned n;
+ uch *in, ch;
+ in = window;
+ for (n = 0; n < outcnt; n++) {
+ ch = *output_data++ = *in++;
+ if ((ulg)output_data == low_buffer_end) output_data=high_buffer_start;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ outcnt = 0;
+}
+
+static void flush_window(void)
+{
+ if (high_loaded) flush_window_high();
+ else flush_window_low();
+}
+
+static void error(char *x)
+{
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+
+ while(1); /* Halt */
+}
+
+#define STACK_SIZE (4096)
+
+long user_stack [STACK_SIZE];
+
+struct {
+ long * a;
+ short b;
+ } stack_start = { & user_stack [STACK_SIZE] , __BOOT_DS };
+
+static void setup_normal_output_buffer(void)
+{
+#ifdef STANDARD_MEMORY_BIOS_CALL
+ if (EXT_MEM_K < 1024) error("Less than 2MB of memory");
+#else
+ if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory");
+#endif
+ output_data = (char *)0x100000; /* Points to 1M */
+ free_mem_end_ptr = (long)real_mode;
+}
+
+struct moveparams {
+ uch *low_buffer_start; int lcount;
+ uch *high_buffer_start; int hcount;
+};
+
+static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
+{
+ high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
+#ifdef STANDARD_MEMORY_BIOS_CALL
+ if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
+#else
+ if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
+#endif
+ mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START;
+ low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
+ ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
+ low_buffer_size = low_buffer_end - LOW_BUFFER_START;
+ high_loaded = 1;
+ free_mem_end_ptr = (long)high_buffer_start;
+ if ( (0x100000 + low_buffer_size) > ((ulg)high_buffer_start)) {
+ high_buffer_start = (uch *)(0x100000 + low_buffer_size);
+ mv->hcount = 0; /* say: we need not to move high_buffer */
+ }
+ else mv->hcount = -1;
+ mv->high_buffer_start = high_buffer_start;
+}
+
+static void close_output_buffer_if_we_run_high(struct moveparams *mv)
+{
+ if (bytes_out > low_buffer_size) {
+ mv->lcount = low_buffer_size;
+ if (mv->hcount)
+ mv->hcount = bytes_out - low_buffer_size;
+ } else {
+ mv->lcount = bytes_out;
+ mv->hcount = 0;
+ }
+}
+
+
+asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
+{
+ real_mode = rmode;
+
+ vidmem = (char *)(((unsigned int)SCREEN_INFO.orig_video_page) << 4);
+
+ lines = SCREEN_INFO.orig_video_lines;
+ cols = SCREEN_INFO.orig_video_cols;
+
+ if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
+ else setup_output_buffer_if_we_run_high(mv);
+
+ makecrc();
+ puts("Uncompressing Linux... ");
+ gunzip();
+ puts("Ok, booting the kernel.\n");
+ if (high_loaded) close_output_buffer_if_we_run_high(mv);
+ return high_loaded;
+}
+
+/* We don't actually check for stack overflows this early. */
+__asm__(".globl mcount ; mcount: ret\n");
+
--- /dev/null
+SECTIONS
+{
+ .data : {
+ input_len = .;
+ LONG(input_data_end - input_data) input_data = .;
+ *(.data)
+ input_data_end = .;
+ }
+}
--- /dev/null
+/*
+ * $Id: build.c,v 1.5 1997/05/19 12:29:58 mj Exp $
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares
+ */
+
+/*
+ * This file builds a disk-image from three different files:
+ *
+ * - bootsect: exactly 512 bytes of 8086 machine code, loads the rest
+ * - setup: 8086 machine code, sets up system parm
+ * - system: 80386 code for actual system
+ *
+ * It does some checking that all files are of the correct type, and
+ * just writes the result to stdout, removing headers and padding to
+ * the right amount. It also writes some system data to stderr.
+ */
+
+/*
+ * Changes by tytso to allow root device specification
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ * Cross compiling fixes by Gertjan van Wingerde, July 1996
+ * Rewritten by Martin Mares, April 1997
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <asm/boot.h>
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long u32;
+
+#define DEFAULT_MAJOR_ROOT 0
+#define DEFAULT_MINOR_ROOT 0
+
+/* Minimal number of setup sectors (see also bootsect.S) */
+#define SETUP_SECTS 4
+
+byte buf[1024];
+int fd;
+int is_big_kernel;
+
+void die(const char * str, ...)
+{
+ va_list args;
+ va_start(args, str);
+ vfprintf(stderr, str, args);
+ fputc('\n', stderr);
+ exit(1);
+}
+
+void file_open(const char *name)
+{
+ if ((fd = open(name, O_RDONLY, 0)) < 0)
+ die("Unable to open `%s': %m", name);
+}
+
+void usage(void)
+{
+ die("Usage: build [-b] bootsect setup system [rootdev] [> image]");
+}
+
+int main(int argc, char ** argv)
+{
+ unsigned int i, c, sz, setup_sectors;
+ u32 sys_size;
+ byte major_root, minor_root;
+ struct stat sb;
+
+ if (argc > 2 && !strcmp(argv[1], "-b"))
+ {
+ is_big_kernel = 1;
+ argc--, argv++;
+ }
+ if ((argc < 4) || (argc > 5))
+ usage();
+ if (argc > 4) {
+ if (!strcmp(argv[4], "CURRENT")) {
+ if (stat("/", &sb)) {
+ perror("/");
+ die("Couldn't stat /");
+ }
+ major_root = major(sb.st_dev);
+ minor_root = minor(sb.st_dev);
+ } else if (strcmp(argv[4], "FLOPPY")) {
+ if (stat(argv[4], &sb)) {
+ perror(argv[4]);
+ die("Couldn't stat root device.");
+ }
+ major_root = major(sb.st_rdev);
+ minor_root = minor(sb.st_rdev);
+ } else {
+ major_root = 0;
+ minor_root = 0;
+ }
+ } else {
+ major_root = DEFAULT_MAJOR_ROOT;
+ minor_root = DEFAULT_MINOR_ROOT;
+ }
+ fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root);
+
+ file_open(argv[1]);
+ i = read(fd, buf, sizeof(buf));
+ fprintf(stderr,"Boot sector %d bytes.\n",i);
+ if (i != 512)
+ die("Boot block must be exactly 512 bytes");
+ if (buf[510] != 0x55 || buf[511] != 0xaa)
+ die("Boot block hasn't got boot flag (0xAA55)");
+ buf[508] = minor_root;
+ buf[509] = major_root;
+ if (write(1, buf, 512) != 512)
+ die("Write call failed");
+ close (fd);
+
+ file_open(argv[2]); /* Copy the setup code */
+ for (i=0 ; (c=read(fd, buf, sizeof(buf)))>0 ; i+=c )
+ if (write(1, buf, c) != c)
+ die("Write call failed");
+ if (c != 0)
+ die("read-error on `setup'");
+ close (fd);
+
+ setup_sectors = (i + 511) / 512; /* Pad unused space with zeros */
+ if (!(setup_sectors & 1))
+ setup_sectors++; /* setup_sectors must be odd on NEC PC-9800 */
+ fprintf(stderr, "Setup is %d bytes.\n", i);
+ memset(buf, 0, sizeof(buf));
+ while (i < setup_sectors * 512) {
+ c = setup_sectors * 512 - i;
+ if (c > sizeof(buf))
+ c = sizeof(buf);
+ if (write(1, buf, c) != c)
+ die("Write call failed");
+ i += c;
+ }
+
+ file_open(argv[3]);
+ if (fstat (fd, &sb))
+ die("Unable to stat `%s': %m", argv[3]);
+ sz = sb.st_size;
+ fprintf (stderr, "System is %d kB\n", sz/1024);
+ sys_size = (sz + 15) / 16;
+ /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */
+ if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE))
+ die("System is too big. Try using %smodules.",
+ is_big_kernel ? "" : "bzImage or ");
+ while (sz > 0) {
+ int l, n;
+
+ l = (sz > sizeof(buf)) ? sizeof(buf) : sz;
+ if ((n=read(fd, buf, l)) != l) {
+ if (n < 0)
+ die("Error reading %s: %m", argv[3]);
+ else
+ die("%s: Unexpected EOF", argv[3]);
+ }
+ if (write(1, buf, l) != l)
+ die("Write failed");
+ sz -= l;
+ }
+ close(fd);
+
+ if (lseek(1, 497, SEEK_SET) != 497) /* Write sizes to the bootsector */
+ die("Output: seek failed");
+ buf[0] = setup_sectors;
+ if (write(1, buf, 1) != 1)
+ die("Write of setup sector count failed");
+ if (lseek(1, 500, SEEK_SET) != 500)
+ die("Output: seek failed");
+ buf[0] = (sys_size & 0xff);
+ buf[1] = ((sys_size >> 8) & 0xff);
+ if (write(1, buf, 2) != 2)
+ die("Write of image length failed");
+
+ return 0; /* Everything is OK */
+}
+++ /dev/null
-#
-# i386/crypto/Makefile
-#
-# Arch-specific CryptoAPI modules.
-#
-
-obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
-
-aes-i586-y := aes-i586-asm.o aes.o
+++ /dev/null
-// -------------------------------------------------------------------------
-// Copyright (c) 2001, Dr Brian Gladman < >, Worcester, UK.
-// All rights reserved.
-//
-// LICENSE TERMS
-//
-// The free distribution and use of this software in both source and binary
-// form is allowed (with or without changes) provided that:
-//
-// 1. distributions of this source code include the above copyright
-// notice, this list of conditions and the following disclaimer//
-//
-// 2. distributions in binary form include the above copyright
-// notice, this list of conditions and the following disclaimer
-// in the documentation and/or other associated materials//
-//
-// 3. the copyright holder's name is not used to endorse products
-// built using this software without specific written permission.
-//
-//
-// ALTERNATIVELY, provided that this notice is retained in full, this product
-// may be distributed under the terms of the GNU General Public License (GPL),
-// in which case the provisions of the GPL apply INSTEAD OF those given above.
-//
-// Copyright (c) 2004 Linus Torvalds <torvalds@osdl.org>
-// Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
-
-// DISCLAIMER
-//
-// This software is provided 'as is' with no explicit or implied warranties
-// in respect of its properties including, but not limited to, correctness
-// and fitness for purpose.
-// -------------------------------------------------------------------------
-// Issue Date: 29/07/2002
-
-.file "aes-i586-asm.S"
-.text
-
-// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
-// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])//
-
-#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
-
-// offsets to parameters with one register pushed onto stack
-
-#define in_blk 8 // input byte array address parameter
-#define out_blk 12 // output byte array address parameter
-#define ctx 16 // AES context structure
-
-// offsets in context structure
-
-#define ekey 0 // encryption key schedule base address
-#define nrnd 256 // number of rounds
-#define dkey 260 // decryption key schedule base address
-
-// register mapping for encrypt and decrypt subroutines
-
-#define r0 eax
-#define r1 ebx
-#define r2 ecx
-#define r3 edx
-#define r4 esi
-#define r5 edi
-#define r6 ebp
-
-#define eaxl al
-#define eaxh ah
-#define ebxl bl
-#define ebxh bh
-#define ecxl cl
-#define ecxh ch
-#define edxl dl
-#define edxh dh
-
-#define _h(reg) reg##h
-#define h(reg) _h(reg)
-
-#define _l(reg) reg##l
-#define l(reg) _l(reg)
-
-// This macro takes a 32-bit word representing a column and uses
-// each of its four bytes to index into four tables of 256 32-bit
-// words to obtain values that are then xored into the appropriate
-// output registers r0, r1, r4 or r5.
-
-// Parameters:
-// %1 out_state[0]
-// %2 out_state[1]
-// %3 out_state[2]
-// %4 out_state[3]
-// %5 table base address
-// %6 input register for the round (destroyed)
-// %7 scratch register for the round
-
-#define do_col(a1, a2, a3, a4, a5, a6, a7) \
- movzx %l(a6),%a7; \
- xor a5(,%a7,4),%a1; \
- movzx %h(a6),%a7; \
- shr $16,%a6; \
- xor a5+tlen(,%a7,4),%a2; \
- movzx %l(a6),%a7; \
- movzx %h(a6),%a6; \
- xor a5+2*tlen(,%a7,4),%a3; \
- xor a5+3*tlen(,%a6,4),%a4;
-
-// initialise output registers from the key schedule
-
-#define do_fcol(a1, a2, a3, a4, a5, a6, a7, a8) \
- mov 0 a8,%a1; \
- movzx %l(a6),%a7; \
- mov 12 a8,%a2; \
- xor a5(,%a7,4),%a1; \
- mov 4 a8,%a4; \
- movzx %h(a6),%a7; \
- shr $16,%a6; \
- xor a5+tlen(,%a7,4),%a2; \
- movzx %l(a6),%a7; \
- movzx %h(a6),%a6; \
- xor a5+3*tlen(,%a6,4),%a4; \
- mov %a3,%a6; \
- mov 8 a8,%a3; \
- xor a5+2*tlen(,%a7,4),%a3;
-
-// initialise output registers from the key schedule
-
-#define do_icol(a1, a2, a3, a4, a5, a6, a7, a8) \
- mov 0 a8,%a1; \
- movzx %l(a6),%a7; \
- mov 4 a8,%a2; \
- xor a5(,%a7,4),%a1; \
- mov 12 a8,%a4; \
- movzx %h(a6),%a7; \
- shr $16,%a6; \
- xor a5+tlen(,%a7,4),%a2; \
- movzx %l(a6),%a7; \
- movzx %h(a6),%a6; \
- xor a5+3*tlen(,%a6,4),%a4; \
- mov %a3,%a6; \
- mov 8 a8,%a3; \
- xor a5+2*tlen(,%a7,4),%a3;
-
-
-// original Gladman had conditional saves to MMX regs.
-#define save(a1, a2) \
- mov %a2,4*a1(%esp)
-
-#define restore(a1, a2) \
- mov 4*a2(%esp),%a1
-
-// This macro performs a forward encryption cycle. It is entered with
-// the first previous round column values in r0, r1, r4 and r5 and
-// exits with the final values in the same registers, using the MMX
-// registers mm0-mm1 or the stack for temporary storage
-
-// mov current column values into the MMX registers
-#define fwd_rnd(arg, table) \
- /* mov current column values into the MMX registers */ \
- mov %r0,%r2; \
- save (0,r1); \
- save (1,r5); \
- \
- /* compute new column values */ \
- do_fcol(r0,r5,r4,r1,table, r2,r3, arg); \
- do_col (r4,r1,r0,r5,table, r2,r3); \
- restore(r2,0); \
- do_col (r1,r0,r5,r4,table, r2,r3); \
- restore(r2,1); \
- do_col (r5,r4,r1,r0,table, r2,r3);
-
-// This macro performs an inverse encryption cycle. It is entered with
-// the first previous round column values in r0, r1, r4 and r5 and
-// exits with the final values in the same registers, using the MMX
-// registers mm0-mm1 or the stack for temporary storage
-
-#define inv_rnd(arg, table) \
- /* mov current column values into the MMX registers */ \
- mov %r0,%r2; \
- save (0,r1); \
- save (1,r5); \
- \
- /* compute new column values */ \
- do_icol(r0,r1,r4,r5, table, r2,r3, arg); \
- do_col (r4,r5,r0,r1, table, r2,r3); \
- restore(r2,0); \
- do_col (r1,r4,r5,r0, table, r2,r3); \
- restore(r2,1); \
- do_col (r5,r0,r1,r4, table, r2,r3);
-
-// AES (Rijndael) Encryption Subroutine
-
-.global aes_enc_blk
-
-.extern ft_tab
-.extern fl_tab
-
-.align 4
-
-aes_enc_blk:
- push %ebp
- mov ctx(%esp),%ebp // pointer to context
- xor %eax,%eax
-
-// CAUTION: the order and the values used in these assigns
-// rely on the register mappings
-
-1: push %ebx
- mov in_blk+4(%esp),%r2
- push %esi
- mov nrnd(%ebp),%r3 // number of rounds
- push %edi
- lea ekey(%ebp),%r6 // key pointer
-
-// input four columns and xor in first round key
-
- mov (%r2),%r0
- mov 4(%r2),%r1
- mov 8(%r2),%r4
- mov 12(%r2),%r5
- xor (%r6),%r0
- xor 4(%r6),%r1
- xor 8(%r6),%r4
- xor 12(%r6),%r5
-
- sub $8,%esp // space for register saves on stack
- add $16,%r6 // increment to next round key
- sub $10,%r3
- je 4f // 10 rounds for 128-bit key
- add $32,%r6
- sub $2,%r3
- je 3f // 12 rounds for 128-bit key
- add $32,%r6
-
-2: fwd_rnd( -64(%r6) ,ft_tab) // 14 rounds for 128-bit key
- fwd_rnd( -48(%r6) ,ft_tab)
-3: fwd_rnd( -32(%r6) ,ft_tab) // 12 rounds for 128-bit key
- fwd_rnd( -16(%r6) ,ft_tab)
-4: fwd_rnd( (%r6) ,ft_tab) // 10 rounds for 128-bit key
- fwd_rnd( +16(%r6) ,ft_tab)
- fwd_rnd( +32(%r6) ,ft_tab)
- fwd_rnd( +48(%r6) ,ft_tab)
- fwd_rnd( +64(%r6) ,ft_tab)
- fwd_rnd( +80(%r6) ,ft_tab)
- fwd_rnd( +96(%r6) ,ft_tab)
- fwd_rnd(+112(%r6) ,ft_tab)
- fwd_rnd(+128(%r6) ,ft_tab)
- fwd_rnd(+144(%r6) ,fl_tab) // last round uses a different table
-
-// move final values to the output array. CAUTION: the
-// order of these assigns rely on the register mappings
-
- add $8,%esp
- mov out_blk+12(%esp),%r6
- mov %r5,12(%r6)
- pop %edi
- mov %r4,8(%r6)
- pop %esi
- mov %r1,4(%r6)
- pop %ebx
- mov %r0,(%r6)
- pop %ebp
- mov $1,%eax
- ret
-
-// AES (Rijndael) Decryption Subroutine
-
-.global aes_dec_blk
-
-.extern it_tab
-.extern il_tab
-
-.align 4
-
-aes_dec_blk:
- push %ebp
- mov ctx(%esp),%ebp // pointer to context
- xor %eax,%eax
-
-// CAUTION: the order and the values used in these assigns
-// rely on the register mappings
-
-1: push %ebx
- mov in_blk+4(%esp),%r2
- push %esi
- mov nrnd(%ebp),%r3 // number of rounds
- push %edi
- lea dkey(%ebp),%r6 // key pointer
- mov %r3,%r0
- shl $4,%r0
- add %r0,%r6
-
-// input four columns and xor in first round key
-
- mov (%r2),%r0
- mov 4(%r2),%r1
- mov 8(%r2),%r4
- mov 12(%r2),%r5
- xor (%r6),%r0
- xor 4(%r6),%r1
- xor 8(%r6),%r4
- xor 12(%r6),%r5
-
- sub $8,%esp // space for register saves on stack
- sub $16,%r6 // increment to next round key
- sub $10,%r3
- je 4f // 10 rounds for 128-bit key
- sub $32,%r6
- sub $2,%r3
- je 3f // 12 rounds for 128-bit key
- sub $32,%r6
-
-2: inv_rnd( +64(%r6), it_tab) // 14 rounds for 128-bit key
- inv_rnd( +48(%r6), it_tab)
-3: inv_rnd( +32(%r6), it_tab) // 12 rounds for 128-bit key
- inv_rnd( +16(%r6), it_tab)
-4: inv_rnd( (%r6), it_tab) // 10 rounds for 128-bit key
- inv_rnd( -16(%r6), it_tab)
- inv_rnd( -32(%r6), it_tab)
- inv_rnd( -48(%r6), it_tab)
- inv_rnd( -64(%r6), it_tab)
- inv_rnd( -80(%r6), it_tab)
- inv_rnd( -96(%r6), it_tab)
- inv_rnd(-112(%r6), it_tab)
- inv_rnd(-128(%r6), it_tab)
- inv_rnd(-144(%r6), il_tab) // last round uses a different table
-
-// move final values to the output array. CAUTION: the
-// order of these assigns rely on the register mappings
-
- add $8,%esp
- mov out_blk+12(%esp),%r6
- mov %r5,12(%r6)
- pop %edi
- mov %r4,8(%r6)
- pop %esi
- mov %r1,4(%r6)
- pop %ebx
- mov %r0,(%r6)
- pop %ebp
- mov $1,%eax
- ret
-
+++ /dev/null
-/*
- *
- * Glue Code for optimized 586 assembler version of AES
- *
- * Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK.
- * All rights reserved.
- *
- * LICENSE TERMS
- *
- * The free distribution and use of this software in both source and binary
- * form is allowed (with or without changes) provided that:
- *
- * 1. distributions of this source code include the above copyright
- * notice, this list of conditions and the following disclaimer;
- *
- * 2. distributions in binary form include the above copyright
- * notice, this list of conditions and the following disclaimer
- * in the documentation and/or other associated materials;
- *
- * 3. the copyright holder's name is not used to endorse products
- * built using this software without specific written permission.
- *
- * ALTERNATIVELY, provided that this notice is retained in full, this product
- * may be distributed under the terms of the GNU General Public License (GPL),
- * in which case the provisions of the GPL apply INSTEAD OF those given above.
- *
- * DISCLAIMER
- *
- * This software is provided 'as is' with no explicit or implied warranties
- * in respect of its properties, including, but not limited to, correctness
- * and/or fitness for purpose.
- *
- * Copyright (c) 2003, Adam J. Richter <adam@yggdrasil.com> (conversion to
- * 2.5 API).
- * Copyright (c) 2003, 2004 Fruhwirth Clemens <clemens@endorphin.org>
- * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/linkage.h>
-
-asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx);
-asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx);
-
-#define AES_MIN_KEY_SIZE 16
-#define AES_MAX_KEY_SIZE 32
-#define AES_BLOCK_SIZE 16
-#define AES_KS_LENGTH 4 * AES_BLOCK_SIZE
-#define RC_LENGTH 29
-
-struct aes_ctx {
- u32 ekey[AES_KS_LENGTH];
- u32 rounds;
- u32 dkey[AES_KS_LENGTH];
-};
-
-#define WPOLY 0x011b
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
-#define bytes2word(b0, b1, b2, b3) \
- (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
-
-/* define the finite field multiplies required for Rijndael */
-#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
-#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
-#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
-#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
-#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
-#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
-#define fi(x) ((x) ? pow[255 - log[x]]: 0)
-
-static inline u32 upr(u32 x, int n)
-{
- return (x << 8 * n) | (x >> (32 - 8 * n));
-}
-
-static inline u8 bval(u32 x, int n)
-{
- return x >> 8 * n;
-}
-
-/* The forward and inverse affine transformations used in the S-box */
-#define fwd_affine(x) \
- (w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8)))
-
-#define inv_affine(x) \
- (w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8)))
-
-static u32 rcon_tab[RC_LENGTH];
-
-u32 ft_tab[4][256];
-u32 fl_tab[4][256];
-u32 ls_tab[4][256];
-u32 im_tab[4][256];
-u32 il_tab[4][256];
-u32 it_tab[4][256];
-
-void gen_tabs(void)
-{
- u32 i, w;
- u8 pow[512], log[256];
-
- /*
- * log and power tables for GF(2^8) finite field with
- * WPOLY as modular polynomial - the simplest primitive
- * root is 0x03, used here to generate the tables.
- */
- i = 0; w = 1;
-
- do {
- pow[i] = (u8)w;
- pow[i + 255] = (u8)w;
- log[w] = (u8)i++;
- w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0);
- } while (w != 1);
-
- for(i = 0, w = 1; i < RC_LENGTH; ++i) {
- rcon_tab[i] = bytes2word(w, 0, 0, 0);
- w = f2(w);
- }
-
- for(i = 0; i < 256; ++i) {
- u8 b;
-
- b = fwd_affine(fi((u8)i));
- w = bytes2word(f2(b), b, b, f3(b));
-
- /* tables for a normal encryption round */
- ft_tab[0][i] = w;
- ft_tab[1][i] = upr(w, 1);
- ft_tab[2][i] = upr(w, 2);
- ft_tab[3][i] = upr(w, 3);
- w = bytes2word(b, 0, 0, 0);
-
- /*
- * tables for last encryption round
- * (may also be used in the key schedule)
- */
- fl_tab[0][i] = w;
- fl_tab[1][i] = upr(w, 1);
- fl_tab[2][i] = upr(w, 2);
- fl_tab[3][i] = upr(w, 3);
-
- /*
- * table for key schedule if fl_tab above is
- * not of the required form
- */
- ls_tab[0][i] = w;
- ls_tab[1][i] = upr(w, 1);
- ls_tab[2][i] = upr(w, 2);
- ls_tab[3][i] = upr(w, 3);
-
- b = fi(inv_affine((u8)i));
- w = bytes2word(fe(b), f9(b), fd(b), fb(b));
-
- /* tables for the inverse mix column operation */
- im_tab[0][b] = w;
- im_tab[1][b] = upr(w, 1);
- im_tab[2][b] = upr(w, 2);
- im_tab[3][b] = upr(w, 3);
-
- /* tables for a normal decryption round */
- it_tab[0][i] = w;
- it_tab[1][i] = upr(w,1);
- it_tab[2][i] = upr(w,2);
- it_tab[3][i] = upr(w,3);
-
- w = bytes2word(b, 0, 0, 0);
-
- /* tables for last decryption round */
- il_tab[0][i] = w;
- il_tab[1][i] = upr(w,1);
- il_tab[2][i] = upr(w,2);
- il_tab[3][i] = upr(w,3);
- }
-}
-
-#define four_tables(x,tab,vf,rf,c) \
-( tab[0][bval(vf(x,0,c),rf(0,c))] ^ \
- tab[1][bval(vf(x,1,c),rf(1,c))] ^ \
- tab[2][bval(vf(x,2,c),rf(2,c))] ^ \
- tab[3][bval(vf(x,3,c),rf(3,c))] \
-)
-
-#define vf1(x,r,c) (x)
-#define rf1(r,c) (r)
-#define rf2(r,c) ((r-c)&3)
-
-#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0)
-#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c)
-
-#define ff(x) inv_mcol(x)
-
-#define ke4(k,i) \
-{ \
- k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \
- k[4*(i)+5] = ss[1] ^= ss[0]; \
- k[4*(i)+6] = ss[2] ^= ss[1]; \
- k[4*(i)+7] = ss[3] ^= ss[2]; \
-}
-
-#define kel4(k,i) \
-{ \
- k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \
- k[4*(i)+5] = ss[1] ^= ss[0]; \
- k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \
-}
-
-#define ke6(k,i) \
-{ \
- k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 7] = ss[1] ^= ss[0]; \
- k[6*(i)+ 8] = ss[2] ^= ss[1]; \
- k[6*(i)+ 9] = ss[3] ^= ss[2]; \
- k[6*(i)+10] = ss[4] ^= ss[3]; \
- k[6*(i)+11] = ss[5] ^= ss[4]; \
-}
-
-#define kel6(k,i) \
-{ \
- k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 7] = ss[1] ^= ss[0]; \
- k[6*(i)+ 8] = ss[2] ^= ss[1]; \
- k[6*(i)+ 9] = ss[3] ^= ss[2]; \
-}
-
-#define ke8(k,i) \
-{ \
- k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 9] = ss[1] ^= ss[0]; \
- k[8*(i)+10] = ss[2] ^= ss[1]; \
- k[8*(i)+11] = ss[3] ^= ss[2]; \
- k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \
- k[8*(i)+13] = ss[5] ^= ss[4]; \
- k[8*(i)+14] = ss[6] ^= ss[5]; \
- k[8*(i)+15] = ss[7] ^= ss[6]; \
-}
-
-#define kel8(k,i) \
-{ \
- k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 9] = ss[1] ^= ss[0]; \
- k[8*(i)+10] = ss[2] ^= ss[1]; \
- k[8*(i)+11] = ss[3] ^= ss[2]; \
-}
-
-#define kdf4(k,i) \
-{ \
- ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \
- ss[1] = ss[1] ^ ss[3]; \
- ss[2] = ss[2] ^ ss[3]; \
- ss[3] = ss[3]; \
- ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
- ss[i % 4] ^= ss[4]; \
- ss[4] ^= k[4*(i)]; \
- k[4*(i)+4] = ff(ss[4]); \
- ss[4] ^= k[4*(i)+1]; \
- k[4*(i)+5] = ff(ss[4]); \
- ss[4] ^= k[4*(i)+2]; \
- k[4*(i)+6] = ff(ss[4]); \
- ss[4] ^= k[4*(i)+3]; \
- k[4*(i)+7] = ff(ss[4]); \
-}
-
-#define kd4(k,i) \
-{ \
- ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
- ss[i % 4] ^= ss[4]; \
- ss[4] = ff(ss[4]); \
- k[4*(i)+4] = ss[4] ^= k[4*(i)]; \
- k[4*(i)+5] = ss[4] ^= k[4*(i)+1]; \
- k[4*(i)+6] = ss[4] ^= k[4*(i)+2]; \
- k[4*(i)+7] = ss[4] ^= k[4*(i)+3]; \
-}
-
-#define kdl4(k,i) \
-{ \
- ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
- ss[i % 4] ^= ss[4]; \
- k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \
- k[4*(i)+5] = ss[1] ^ ss[3]; \
- k[4*(i)+6] = ss[0]; \
- k[4*(i)+7] = ss[1]; \
-}
-
-#define kdf6(k,i) \
-{ \
- ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 6] = ff(ss[0]); \
- ss[1] ^= ss[0]; \
- k[6*(i)+ 7] = ff(ss[1]); \
- ss[2] ^= ss[1]; \
- k[6*(i)+ 8] = ff(ss[2]); \
- ss[3] ^= ss[2]; \
- k[6*(i)+ 9] = ff(ss[3]); \
- ss[4] ^= ss[3]; \
- k[6*(i)+10] = ff(ss[4]); \
- ss[5] ^= ss[4]; \
- k[6*(i)+11] = ff(ss[5]); \
-}
-
-#define kd6(k,i) \
-{ \
- ss[6] = ls_box(ss[5],3) ^ rcon_tab[i]; \
- ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \
- k[6*(i)+ 6] = ss[6] ^= k[6*(i)]; \
- ss[1] ^= ss[0]; \
- k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1]; \
- ss[2] ^= ss[1]; \
- k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2]; \
- ss[3] ^= ss[2]; \
- k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3]; \
- ss[4] ^= ss[3]; \
- k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4]; \
- ss[5] ^= ss[4]; \
- k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5]; \
-}
-
-#define kdl6(k,i) \
-{ \
- ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
- k[6*(i)+ 6] = ss[0]; \
- ss[1] ^= ss[0]; \
- k[6*(i)+ 7] = ss[1]; \
- ss[2] ^= ss[1]; \
- k[6*(i)+ 8] = ss[2]; \
- ss[3] ^= ss[2]; \
- k[6*(i)+ 9] = ss[3]; \
-}
-
-#define kdf8(k,i) \
-{ \
- ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 8] = ff(ss[0]); \
- ss[1] ^= ss[0]; \
- k[8*(i)+ 9] = ff(ss[1]); \
- ss[2] ^= ss[1]; \
- k[8*(i)+10] = ff(ss[2]); \
- ss[3] ^= ss[2]; \
- k[8*(i)+11] = ff(ss[3]); \
- ss[4] ^= ls_box(ss[3],0); \
- k[8*(i)+12] = ff(ss[4]); \
- ss[5] ^= ss[4]; \
- k[8*(i)+13] = ff(ss[5]); \
- ss[6] ^= ss[5]; \
- k[8*(i)+14] = ff(ss[6]); \
- ss[7] ^= ss[6]; \
- k[8*(i)+15] = ff(ss[7]); \
-}
-
-#define kd8(k,i) \
-{ \
- u32 __g = ls_box(ss[7],3) ^ rcon_tab[i]; \
- ss[0] ^= __g; \
- __g = ff(__g); \
- k[8*(i)+ 8] = __g ^= k[8*(i)]; \
- ss[1] ^= ss[0]; \
- k[8*(i)+ 9] = __g ^= k[8*(i)+ 1]; \
- ss[2] ^= ss[1]; \
- k[8*(i)+10] = __g ^= k[8*(i)+ 2]; \
- ss[3] ^= ss[2]; \
- k[8*(i)+11] = __g ^= k[8*(i)+ 3]; \
- __g = ls_box(ss[3],0); \
- ss[4] ^= __g; \
- __g = ff(__g); \
- k[8*(i)+12] = __g ^= k[8*(i)+ 4]; \
- ss[5] ^= ss[4]; \
- k[8*(i)+13] = __g ^= k[8*(i)+ 5]; \
- ss[6] ^= ss[5]; \
- k[8*(i)+14] = __g ^= k[8*(i)+ 6]; \
- ss[7] ^= ss[6]; \
- k[8*(i)+15] = __g ^= k[8*(i)+ 7]; \
-}
-
-#define kdl8(k,i) \
-{ \
- ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
- k[8*(i)+ 8] = ss[0]; \
- ss[1] ^= ss[0]; \
- k[8*(i)+ 9] = ss[1]; \
- ss[2] ^= ss[1]; \
- k[8*(i)+10] = ss[2]; \
- ss[3] ^= ss[2]; \
- k[8*(i)+11] = ss[3]; \
-}
-
-static int
-aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
-{
- int i;
- u32 ss[8];
- struct aes_ctx *ctx = ctx_arg;
-
- /* encryption schedule */
-
- ctx->ekey[0] = ss[0] = u32_in(in_key);
- ctx->ekey[1] = ss[1] = u32_in(in_key + 4);
- ctx->ekey[2] = ss[2] = u32_in(in_key + 8);
- ctx->ekey[3] = ss[3] = u32_in(in_key + 12);
-
- switch(key_len) {
- case 16:
- for (i = 0; i < 9; i++)
- ke4(ctx->ekey, i);
- kel4(ctx->ekey, 9);
- ctx->rounds = 10;
- break;
-
- case 24:
- ctx->ekey[4] = ss[4] = u32_in(in_key + 16);
- ctx->ekey[5] = ss[5] = u32_in(in_key + 20);
- for (i = 0; i < 7; i++)
- ke6(ctx->ekey, i);
- kel6(ctx->ekey, 7);
- ctx->rounds = 12;
- break;
-
- case 32:
- ctx->ekey[4] = ss[4] = u32_in(in_key + 16);
- ctx->ekey[5] = ss[5] = u32_in(in_key + 20);
- ctx->ekey[6] = ss[6] = u32_in(in_key + 24);
- ctx->ekey[7] = ss[7] = u32_in(in_key + 28);
- for (i = 0; i < 6; i++)
- ke8(ctx->ekey, i);
- kel8(ctx->ekey, 6);
- ctx->rounds = 14;
- break;
-
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- /* decryption schedule */
-
- ctx->dkey[0] = ss[0] = u32_in(in_key);
- ctx->dkey[1] = ss[1] = u32_in(in_key + 4);
- ctx->dkey[2] = ss[2] = u32_in(in_key + 8);
- ctx->dkey[3] = ss[3] = u32_in(in_key + 12);
-
- switch (key_len) {
- case 16:
- kdf4(ctx->dkey, 0);
- for (i = 1; i < 9; i++)
- kd4(ctx->dkey, i);
- kdl4(ctx->dkey, 9);
- break;
-
- case 24:
- ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16));
- ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20));
- kdf6(ctx->dkey, 0);
- for (i = 1; i < 7; i++)
- kd6(ctx->dkey, i);
- kdl6(ctx->dkey, 7);
- break;
-
- case 32:
- ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16));
- ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20));
- ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24));
- ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28));
- kdf8(ctx->dkey, 0);
- for (i = 1; i < 6; i++)
- kd8(ctx->dkey, i);
- kdl8(ctx->dkey, 6);
- break;
- }
- return 0;
-}
-
-static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src)
-{
- aes_enc_blk(src, dst, ctx);
-}
-static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src)
-{
- aes_dec_blk(src, dst, ctx);
-}
-
-
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
- .cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = aes_encrypt,
- .cia_decrypt = aes_decrypt
- }
- }
-};
-
-static int __init aes_init(void)
-{
- gen_tabs();
- return crypto_register_alg(&aes_alg);
-}
-
-static void __exit aes_fini(void)
-{
- crypto_unregister_alg(&aes_alg);
-}
-
-module_init(aes_init);
-module_exit(aes_fini);
-
-MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter");
-MODULE_ALIAS("aes");
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
memset(line, 0, LINE_SIZE);
if (len > LINE_SIZE)
len = LINE_SIZE;
switch (cmd) {
default:
- return -ENOTTY;
+ return -ENOIOCTLCMD;
case MTRRIOC_ADD_ENTRY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
-#ifdef CONFIG_TUX
- .long __sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .long sys_tux
-# else
- .long sys_ni_syscall
-# endif
-#endif
+ .long sys_ni_syscall /* reserved for TUX */
.long sys_ni_syscall
.long sys_gettid
.long sys_readahead /* 225 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* reserved for kexec */
- .long sys_ioprio_set
- .long sys_ioprio_get /* 285 */
syscall_table_size=(.-sys_call_table)
} irq_2_pin[PIN_MAP_SIZE];
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
int assign_irq_vector(int irq)
#else
int __init assign_irq_vector(int irq)
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
- printk("could not set ID!\n");
+ panic("could not set ID!\n");
else
printk(" ok.\n");
}
}
}
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return;
}
printk(" failed :(.\n");
- panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
+ panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
}
/*
}
-/*
- * These should really be __section__(".bss.page_aligned") as well, but
- * gcc's 3.0 and earlier don't handle that correctly.
- */
-static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
-static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned")));
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned")));
/*
* allocate per-cpu stacks for hardirq and for softirq processing
return 0;
}
+/*
+ * Get a random word:
+ */
+static inline unsigned int get_random_int(void)
+{
+ unsigned int val = 0;
+
+ if (!exec_shield_randomize)
+ return 0;
+
+#ifdef CONFIG_X86_HAS_TSC
+ rdtscl(val);
+#endif
+ val += current->pid + jiffies + (int)&val;
+
+ /*
+ * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
+ * every second, from the entropy pool (and thus creates a limited
+ * drain on it), and uses halfMD4Transform within the second. We
+ * also spice it with the TSC (if available), jiffies, PID and the
+ * stack address:
+ */
+ return secure_ip_id(val);
+}
unsigned long arch_align_stack(unsigned long sp)
{
return sp & ~0xf;
}
+#if SHLIB_BASE >= 0x01000000
+# error SHLIB_BASE must be under 16MB!
+#endif
+
+static unsigned long
+arch_get_unmapped_nonexecutable_area(struct mm_struct *mm, unsigned long addr, unsigned long len)
+{
+ struct vm_area_struct *vma, *prev_vma;
+ unsigned long stack_limit;
+ int first_time = 1;
+
+ if (!mm->mmap_top) {
+ printk("hm, %s:%d, !mmap_top.\n", current->comm, current->pid);
+ mm->mmap_top = mmap_top();
+ }
+ stack_limit = mm->mmap_top;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /* dont allow allocations above current stack limit */
+ if (mm->non_executable_cache > stack_limit)
+ mm->non_executable_cache = stack_limit;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ /* make sure it can fit in the remaining address space */
+ if (mm->non_executable_cache < len)
+ return -ENOMEM;
+
+ /* either no address requested or cant fit in requested address hole */
+try_again:
+ addr = (mm->non_executable_cache - len)&PAGE_MASK;
+ do {
+ if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+ return -ENOMEM;
+
+ /* new region fits between prev_vma->vm_end and vma->vm_start, use it */
+ if (addr+len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) {
+ /* remember the address as a hint for next time */
+ mm->non_executable_cache = addr;
+ return addr;
+
+ /* pull non_executable_cache down to the first hole */
+ } else if (mm->non_executable_cache == vma->vm_end)
+ mm->non_executable_cache = vma->vm_start;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ } while (len <= vma->vm_start);
+ /* if hint left us with no space for the requested mapping try again */
+ if (first_time) {
+ first_time = 0;
+ mm->non_executable_cache = stack_limit;
+ goto try_again;
+ }
+ return -ENOMEM;
+}
+
+static unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
+{
+ unsigned long range = end - len - start;
+ if (end <= start + len)
+ return 0;
+ return PAGE_ALIGN(get_random_int() % range + start);
+}
+
+static inline unsigned long
+stock_arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ start_addr = addr = mm->free_area_cache;
+
+full_search:
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+ unsigned long len0, unsigned long pgoff, unsigned long flags,
+ unsigned long prot)
+{
+ unsigned long addr = addr0, len = len0;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ascii_shield = 0;
+ unsigned long tmp;
+
+ /*
+ * Fall back to the old layout:
+ */
+ if (!(current->flags & PF_RELOCEXEC))
+ return stock_arch_get_unmapped_area(filp, addr0, len0, pgoff, flags);
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (!addr && (prot & PROT_EXEC) && !(flags & MAP_FIXED))
+ addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start)) {
+ return addr;
+ }
+ }
+
+ if (prot & PROT_EXEC) {
+ ascii_shield = 1;
+ addr = SHLIB_BASE;
+ } else {
+ /* this can fail if the stack was unlimited */
+ if ((tmp = arch_get_unmapped_nonexecutable_area(mm, addr, len)) != -ENOMEM)
+ return tmp;
+search_upper:
+ addr = PAGE_ALIGN(arch_align_stack(TASK_UNMAPPED_BASE));
+ }
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Must not let a PROT_EXEC mapping get into the
+ * brk area:
+ */
+ if (ascii_shield && (addr + len > mm->brk)) {
+ ascii_shield = 0;
+ goto search_upper;
+ }
+ /*
+ * Up until the brk area we randomize addresses
+ * as much as possible:
+ */
+ if (ascii_shield && (addr >= 0x01000000)) {
+ tmp = randomize_range(0x01000000, mm->brk, len);
+ vma = find_vma(mm, tmp);
+ if (TASK_SIZE - len >= tmp &&
+ (!vma || tmp + len <= vma->vm_start))
+ return tmp;
+ }
+ /*
+ * Ok, randomization didnt work out - return
+ * the result of the linear search:
+ */
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+}
void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
{
current->mm->brk = new_brk;
}
+/*
+ * Top of mmap area (just below the process stack).
+ * leave an at least ~128 MB hole. Randomize it.
+ */
+#define MIN_GAP (128*1024*1024)
+#define MAX_GAP (TASK_SIZE/6*5)
+
+unsigned long mmap_top(void)
+{
+ unsigned long gap = 0;
+
+ gap = current->rlim[RLIMIT_STACK].rlim_cur;
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ gap = arch_align_stack(gap) & PAGE_MASK;
+
+ return TASK_SIZE - gap;
+}
+
See vsyscall-sigreturn.S. */
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
+extern SYSENTER_RETURN;
static void setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
+ if (get_user(fourth.__pad, (void * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
extern void SYSENTER_RETURN_OFFSET;
-unsigned int vdso_enabled = 0;
+unsigned int vdso_enabled = 1;
void map_vsyscall(void)
{
{
printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
printk("You probably have a hardware problem with your RAM chips\n");
- panic("Halting\n");
+
/* Clear and disable the memory parity error line. */
clear_mem_error(reason);
}
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/init.h>
-#include <linux/dmi.h>
#include <asm/mach-bigsmp/mach_apic.h>
#include <asm/mach-bigsmp/mach_apicdef.h>
#include <asm/mach-bigsmp/mach_ipi.h>
#include <asm/mach-default/mach_mpparse.h>
-static int dmi_bigsmp; /* can be set by dmi scanners */
-
-static __init int hp_ht_bigsmp(struct dmi_system_id *d)
-{
-#ifdef CONFIG_X86_GENERICARCH
- printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
- dmi_bigsmp = 1;
-#endif
- return 0;
-}
-
-
-static struct dmi_system_id __initdata bigsmp_dmi_table[] = {
- { hp_ht_bigsmp, "HP ProLiant DL760 G2", {
- DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
- }},
-
- { hp_ht_bigsmp, "HP ProLiant DL740", {
- DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
- }},
- { }
-};
-
+int dmi_bigsmp; /* can be set by dmi scanners */
static __init int probe_bigsmp(void)
{
- dmi_check_system(bigsmp_dmi_table);
return dmi_bigsmp;
}
# Makefile for the linux i386-specific parts of the memory manager.
#
-obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
+obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
void __init set_max_mapnr_init(void)
{
#ifdef CONFIG_HIGHMEM
- struct zone *high0 = &NODE_DATA(0)->node_zones[ZONE_HIGHMEM];
- if (high0->spanned_pages > 0)
- highmem_start_page = high0->zone_mem_map;
- else
- highmem_start_page = pfn_to_page(max_low_pfn+1);
+ highmem_start_page = NODE_DATA(0)->node_zones[ZONE_HIGHMEM].zone_mem_map;
num_physpages = highend_pfn;
#else
num_physpages = max_low_pfn;
extern int is_available_memory(efi_memory_desc_t *);
-static inline int page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
{
int i;
unsigned long addr, end;
}
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
- * valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
- if (pagenr <= 256)
- return 1;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
-}
-
-
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
+++ /dev/null
-/*
- * linux/arch/i386/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(struct mm_struct *mm)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (sysctl_legacy_va_layout || (current->personality & ADDR_COMPAT_LAYOUT) ||
- current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base(mm);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
if ( dev2->irq && dev2->irq != irq && \
(!(pci_probe & PCI_USE_PIRQ_MASK) || \
((1 << dev2->irq) & mask)) ) {
-#ifndef CONFIG_PCI_MSI
+#ifndef CONFIG_PCI_USE_VECTOR
printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
pci_name(dev2), dev2->irq, irq);
#endif
}
dev = temp_dev;
if (irq >= 0) {
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
if (!platform_legacy_irq(irq))
irq = IO_APIC_VECTOR(irq);
#endif
config DISCONTIGMEM
bool "Discontiguous memory support"
- depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1) && NUMA && VIRTUAL_MEM_MAP
+ depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
help
Say Y to support efficient handling of discontiguous physical memory,
See <file:Documentation/vm/numa> for more.
config IA64_CYCLONE
- bool "Cyclone (EXA) Time Source support"
+ bool "Support Cyclone(EXA) Time Source"
help
- Say Y here to enable support for IBM EXA Cyclone time source.
- If you're unsure, answer N.
+ Say Y here to enable support for IBM EXA Cyclone time source.
+ If you're unsure, answer N.
config IOSAPIC
bool
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
-core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
-
+ifeq ($(CONFIG_DISCONTIGMEM),y)
+ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
+endif
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
-drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
+ifeq ($(CONFIG_DISCONTIGMEM),y)
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/sn/
+endif
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=20
-CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
CONFIG_KMOD=y
-CONFIG_STOP_MACHINE=y
#
# Processor type and features
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_TIME_INTERPOLATION=y
-CONFIG_EFI=y
+# CONFIG_ITANIUM is not set
+CONFIG_MCKINLEY=y
CONFIG_IA64_GENERIC=y
# CONFIG_IA64_DIG is not set
+# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_HP_ZX1 is not set
# CONFIG_IA64_SGI_SN2 is not set
-# CONFIG_IA64_HP_SIM is not set
-# CONFIG_ITANIUM is not set
-CONFIG_MCKINLEY=y
# CONFIG_IA64_PAGE_SIZE_4KB is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
+CONFIG_ACPI=y
+CONFIG_ACPI_INTERPRETER=y
+CONFIG_ACPI_KERNEL_CONFIG=y
CONFIG_IA64_L1_CACHE_SHIFT=7
+# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set
CONFIG_NUMA=y
-CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_DISCONTIGMEM=y
-CONFIG_IA64_CYCLONE=y
+CONFIG_VIRTUAL_MEM_MAP=y
+CONFIG_IA64_MCA=y
+CONFIG_PM=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=18
+# CONFIG_HUGETLB_PAGE_SIZE_4GB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1GB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+CONFIG_HUGETLB_PAGE_SIZE_16MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256KB is not set
+# CONFIG_IA64_PAL_IDLE is not set
CONFIG_SMP=y
-CONFIG_NR_CPUS=512
-CONFIG_HOTPLUG_CPU=y
# CONFIG_PREEMPT is not set
-CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
+CONFIG_HAVE_DEC_LOCK=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
-
-#
-# Firmware Drivers
-#
+CONFIG_EFI=y
CONFIG_EFI_VARS=y
-CONFIG_EFI_PCDP=y
+CONFIG_NR_CPUS=512
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
-#
-# Power management and ACPI
-#
-CONFIG_PM=y
-CONFIG_ACPI=y
-
#
# ACPI (Advanced Configuration and Power Interface) Support
#
CONFIG_ACPI_BOOT=y
-CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_POWER=y
CONFIG_ACPI_PCI=y
CONFIG_ACPI_SYSTEM=y
-
-#
-# Bus options (PCI, PCMCIA)
-#
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
-# CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
+CONFIG_HOTPLUG=y
#
# PCI Hotplug Support
# CONFIG_HOTPLUG_PCI_FAKE is not set
CONFIG_HOTPLUG_PCI_ACPI=m
# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_PCIE is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
#
# PCMCIA/CardBus support
# CONFIG_PCMCIA is not set
#
-# Device Drivers
+# Parallel port support
#
+# CONFIG_PARPORT is not set
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
#
+# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_INITRD is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
# CONFIG_BLK_DEV_PDC202XX_NEW is not set
# CONFIG_BLK_DEV_SVWKS is not set
-CONFIG_BLK_DEV_SGIIOC4=y
# CONFIG_BLK_DEV_SIIMAGE is not set
# CONFIG_BLK_DEV_SLC90E66 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
CONFIG_IDEDMA_AUTO=y
# CONFIG_BLK_DEV_HD is not set
+#
+# IEEE 1394 (FireWire) support (EXPERIMENTAL)
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=y
+CONFIG_FUSION_BOOT=y
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_ISENSE is not set
+# CONFIG_FUSION_CTL is not set
+
#
# SCSI device support
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_REPORT_LUNS=y
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
-#
-# SCSI Transport Attributes
-#
-CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=y
-
#
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_PIO is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
CONFIG_SCSI_QLOGIC_FC=y
# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_SCSI_QLA2XXX=y
-CONFIG_SCSI_QLA21XX=m
-CONFIG_SCSI_QLA22XX=m
-CONFIG_SCSI_QLA2300=m
-CONFIG_SCSI_QLA2322=m
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA6322 is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID5=m
-CONFIG_MD_RAID6=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_MAX_SGE=40
-# CONFIG_FUSION_ISENSE is not set
-# CONFIG_FUSION_CTL is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
#
# Networking support
#
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
CONFIG_ARPD=y
+# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_RX is not set
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
# CONFIG_DE4X5 is not set
# CONFIG_WINBOND_840 is not set
# CONFIG_DM9102 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=m
# CONFIG_EEPRO100_PIO is not set
CONFIG_E100=m
-# CONFIG_E100_NAPI is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
-# CONFIG_VIA_VELOCITY is not set
#
# Ethernet (1000 Mbit)
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=y
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
-# CONFIG_TR is not set
+# CONFIG_NET_RADIO is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-CONFIG_NETCONSOLE=y
#
-# ISDN subsystem
+# Amateur Radio support
#
-# CONFIG_ISDN is not set
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
#
-# Telephony Support
+# Bluetooth support
#
-# CONFIG_PHONE is not set
+# CONFIG_BT is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN_BOOL is not set
#
# Input device support
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
CONFIG_HW_CONSOLE=y
CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_ROCKETPORT is not set
-# CONFIG_CYCLADES is not set
# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
+CONFIG_SGI_L1_SERIAL=y
+CONFIG_SGI_L1_SERIAL_CONSOLE=y
#
# Serial drivers
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_ACPI=y
+CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# I2C Algorithms
+#
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C Hardware Sensors Chip support
+#
+# CONFIG_I2C_SENSOR is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
# CONFIG_QIC02_TAPE is not set
#
#
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
CONFIG_EFI_RTC=y
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_RAW_DRIVER=m
-CONFIG_HPET=y
-# CONFIG_HPET_RTC_IRQ is not set
-CONFIG_HPET_MMAP=y
CONFIG_MAX_RAW_DEVS=256
#
-# I2C support
+# Multimedia devices
#
-# CONFIG_I2C is not set
+# CONFIG_VIDEO_DEV is not set
#
-# Dallas's 1-wire bus
+# Digital Video Broadcasting Devices
#
-# CONFIG_W1 is not set
+# CONFIG_DVB is not set
#
-# Misc devices
+# File systems
#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=y
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
#
-# Multimedia devices
+# CD-ROM/DVD Filesystems
#
-# CONFIG_VIDEO_DEV is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+CONFIG_UDF_FS=m
#
-# Digital Video Broadcasting Devices
+# DOS/FAT/NT Filesystems
#
-# CONFIG_DVB is not set
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_GSS is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+CONFIG_CIFS=m
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
#
# Graphics support
# Advanced Linux Sound Architecture
#
CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_RAWMIDI=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_OSSEMUL=y
#
# Generic devices
#
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
#
# PCI devices
#
-CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_ALI5451 is not set
-# CONFIG_SND_ATIIXP is not set
-# CONFIG_SND_AU8810 is not set
-# CONFIG_SND_AU8820 is not set
-# CONFIG_SND_AU8830 is not set
# CONFIG_SND_AZT3328 is not set
-# CONFIG_SND_BT87X is not set
CONFIG_SND_CS46XX=m
CONFIG_SND_CS46XX_NEW_DSP=y
CONFIG_SND_CS4281=m
CONFIG_SND_EMU10K1=m
# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
# CONFIG_SND_RME32 is not set
# CONFIG_SND_RME96 is not set
# CONFIG_SND_ES1968 is not set
# CONFIG_SND_MAESTRO3 is not set
CONFIG_SND_FM801=m
-# CONFIG_SND_FM801_TEA575X is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
-# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_SONICVIBES is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VX222 is not set
# USB Host Controller Drivers
#
CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_SPLIT_ISO is not set
-# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_PRINTER is not set
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
-# CONFIG_USB_STORAGE_RW_DETECT is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_WACOM is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
#
# USB Imaging devices
#
# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
# CONFIG_USB_HPUSBSCSI is not set
#
# USB Miscellaneous drivers
#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
# CONFIG_USB_TIGL is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_TEST is not set
-
-#
-# USB Gadget Support
-#
# CONFIG_USB_GADGET is not set
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_REISERFS_FS_XATTR is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=y
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_SECURITY is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-# CONFIG_ZISOFS is not set
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=y
-# CONFIG_MSDOS_FS is not set
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-# CONFIG_NTFS_RW is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=m
-CONFIG_SUNRPC_GSS=m
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp437"
-CONFIG_CIFS=m
-# CONFIG_CIFS_STATS is not set
-# CONFIG_CIFS_XATTR is not set
-# CONFIG_CIFS_POSIX is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-CONFIG_SGI_PARTITION=y
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-CONFIG_EFI_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
#
# HP Simulator drivers
# CONFIG_IA64_DEBUG_CMPXCHG is not set
# CONFIG_IA64_DEBUG_IRQ is not set
# CONFIG_DEBUG_INFO is not set
-CONFIG_SYSVIPC_COMPAT=y
#
# Security options
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_DES is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_GENERIC is not set
+# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_TEST is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
-# CONFIG_EFI_PCDP is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
CONFIG_SCSI_SATA=y
# CONFIG_SCSI_SATA_SVW is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
+CONFIG_SGI_L1_SERIAL=y
+CONFIG_SGI_L1_SERIAL_CONSOLE=y
#
# Serial drivers
#
# Non-8250 serial port support
#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
#
# CONFIG_I2C is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=m
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
-CONFIG_EFI_PCDP=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=y
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_CARMEL is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
-# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
CONFIG_IDEDISK_MULTI_MODE=y
CONFIG_BLK_DEV_IDECD=y
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
# CONFIG_DM_CRYPT is not set
-# CONFIG_DM_SNAPSHOT is not set
-# CONFIG_DM_MIRROR is not set
-# CONFIG_DM_ZERO is not set
#
# Fusion MPT device support
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
-# CONFIG_VIA_VELOCITY is not set
#
# Ethernet (1000 Mbit)
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_ACPI=y
CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_SIS is not set
# CONFIG_RAW_DRIVER is not set
-# CONFIG_HPET is not set
#
# I2C support
#
# CONFIG_I2C_SENSOR is not set
# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ASB100 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_FSCHER is not set
# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
# CONFIG_SENSORS_LM80 is not set
# CONFIG_SENSORS_LM83 is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON_OLD is not set
CONFIG_FB_RADEON=m
# CONFIG_MDA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=y
-CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V4 is not set
-# CONFIG_NFSD_TCP is not set
+CONFIG_NFSD_TCP=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=y
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ASCII=y
+CONFIG_NLS_CODEPAGE_1251=y
CONFIG_NLS_ISO8859_1=y
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end);
- /*
- * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
- * if a TLB entry is purged while in use. sba_mark_invalid()
- * purges IOTLB entries in power-of-two sizes, so we also
- * allocate IOVA space in power-of-two sizes.
- */
- bits_wanted = 1UL << get_iovp_order(bits_wanted << PAGE_SHIFT);
-
if (likely(bits_wanted == 1)) {
unsigned int bitshiftcnt;
for(; res_ptr < res_end ; res_ptr++) {
int bits_not_wanted = size >> iovp_shift;
unsigned long m;
- /* Round up to power-of-two size: see AR2305 note above */
- bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << PAGE_SHIFT);
for (; bits_not_wanted > 0 ; res_ptr++) {
if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
{
}
-static void
-hpsim_set_affinity_noop (unsigned int a, cpumask_t b)
-{
-}
-
static struct hw_interrupt_type irq_type_hp_sim = {
.typename = "hpsim",
.startup = hpsim_irq_startup,
.disable = hpsim_irq_noop,
.ack = hpsim_irq_noop,
.end = hpsim_irq_noop,
- .set_affinity = hpsim_set_affinity_noop,
+ .set_affinity = (void (*)(unsigned int, unsigned long)) hpsim_irq_noop,
};
void __init
printk(KERN_WARNING "%s: set_multicast_list called\n", dev->name);
}
+#ifdef CONFIG_NET_FASTROUTE
+static int
+simeth_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+ printk(KERN_WARNING "%s: simeth_accept_fastpath called\n", dev->name);
+ return -1;
+}
+#endif
+
__initcall(simeth_probe);
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/security.h>
-#include <linux/vs_memory.h>
#include <asm/param.h>
#include <asm/signal.h>
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
-#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
-
/* Ugly but avoids duplication */
#include "../../../fs/binfmt_elf.c"
set_personality(PER_LINUX32);
current->thread.map_base = IA32_PAGE_OFFSET/3;
current->thread.task_size = IA32_PAGE_OFFSET; /* use what Linux/x86 uses... */
+ current->thread.flags |= IA64_THREAD_XSTACK; /* data must be executable */
set_fs(USER_DS); /* set addr limit for new TASK_SIZE */
}
data8 compat_clock_gettime /* 265 */
data8 compat_clock_getres
data8 compat_clock_nanosleep
- data8 compat_statfs64
- data8 compat_fstatfs64
+ data8 sys_statfs64
+ data8 sys_fstatfs64
data8 sys_tgkill /* 270 */
data8 compat_sys_utimes
data8 sys32_fadvise64_64
if (BAD_MADT_ENTRY(lapic, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
if (lapic->address) {
iounmap((void *) ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
if (BAD_MADT_ENTRY(lsapic, end))
return -EINVAL;
- if (lsapic->flags.enabled) {
+ acpi_table_print_madt_entry(header);
+
+ printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
+
+ if (!lsapic->flags.enabled)
+ printk(" disabled");
+ else {
+ printk(" enabled");
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
+ if (hard_smp_processor_id()
+ == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
+ printk(" (BSP)");
#endif
++available_cpus;
}
+ printk("\n");
+
total_cpus++;
return 0;
}
if (BAD_MADT_ENTRY(lacpi_nmi, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
/* TBD: Support lapic_nmi entries */
return 0;
}
if (BAD_MADT_ENTRY(iosapic, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
iosapic_init(iosapic->address, iosapic->global_irq_base);
return 0;
if (BAD_MADT_ENTRY(plintsrc, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
/*
* Get vector assignment for this interrupt, set attributes,
* and program the IOSAPIC routing table.
if (BAD_MADT_ENTRY(p, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
iosapic_override_isa_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
+ acpi_table_print_madt_entry(header);
+
/* TBD: Support nimsrc entries */
return 0;
}
-static void __init
-acpi_madt_oem_check (char *oem_id, char *oem_table_id)
+/* Hook from generic ACPI tables.c */
+void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strncmp(oem_id, "IBM", 3) &&
- (!strncmp(oem_table_id, "SERMOW", 6))) {
+ (!strncmp(oem_table_id, "SERMOW", 6))){
- /*
- * Unfortunately ITC_DRIFT is not yet part of the
+ /* Unfortunatly ITC_DRIFT is not yet part of the
* official SAL spec, so the ITC_DRIFT bit is not
* set by the BIOS on this hardware.
*/
sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
- cyclone_setup();
+ /*Start cyclone clock*/
+ cyclone_setup(0);
}
}
#define CYCLONE_TIMER_FREQ 100000000
int use_cyclone;
-void __init cyclone_setup(void)
+int __init cyclone_setup(char *str)
{
use_cyclone = 1;
+ return 1;
}
static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
data8 sys_syslog
data8 sys_setitimer
data8 sys_getitimer
-#ifdef CONFIG_TUX
- data8 __sys_tux // 1120 /* was: ia64_oldstat */
-#else
-# ifdef CONFIG_TUX_MODULE
- data8 sys_tux // 1120 /* was: ia64_oldstat */
-# else
data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
-# endif
-#endif
data8 sys_ni_syscall /* was: ia64_oldlstat */
data8 sys_ni_syscall /* was: ia64_oldfstat */
data8 sys_vhangup
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-#define __KERNEL_SYSCALLS__
#include <asm/unistd.h>
EXPORT_SYMBOL(__ia64_syscall);
-EXPORT_SYMBOL(execve);
-EXPORT_SYMBOL(clone);
/* from arch/ia64/lib */
extern void __divsi3(void);
/*
* This is updated when the user sets irq affinity via /proc
*/
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
+cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
#ifdef CONFIG_IA64_GENERIC
irq_desc_t * __ia64_irq_desc (unsigned int irq)
int prelen;
irq_desc_t *desc = irq_descp(irq);
unsigned long flags;
- int redir = 0;
if (!desc->handler->set_affinity)
return -EIO;
prelen = 0;
if (tolower(*rbuf) == 'r') {
prelen = strspn(rbuf, "Rr ");
- redir++;
+ irq |= IA64_IRQ_REDIRECTED;
}
err = cpumask_parse(buffer+prelen, count-prelen, new_value);
spin_lock_irqsave(&desc->lock, flags);
pending_irq_cpumask[irq] = new_value;
- if (redir)
- set_bit(irq, pending_irq_redir);
- else
- clear_bit(irq, pending_irq_redir);
spin_unlock_irqrestore(&desc->lock, flags);
return full_count;
/* note - we hold desc->lock */
cpumask_t tmp;
irq_desc_t *desc = irq_descp(irq);
- int redir = test_bit(irq, pending_irq_redir);
if (!cpus_empty(pending_irq_cpumask[irq])) {
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
if (unlikely(!cpus_empty(tmp))) {
- desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
- pending_irq_cpumask[irq]);
+ desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
}
cpus_clear(pending_irq_cpumask[irq]);
}
*/
static int cpe_poll_enabled = 1;
-extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
+static int cpe_vector = -1;
-static int mca_init;
+extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
/*
* IA64_MCA log support
#ifdef CONFIG_ACPI
-static int cpe_vector = -1;
-
static irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{
}
IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x registered\n", __FUNCTION__, cpev);
+ "vector %#x setup and enabled\n", __FUNCTION__, cpev);
}
#endif /* CONFIG_ACPI */
/*
* ia64_mca_cmc_vector_setup
*
- * Setup the corrected machine check vector register in the processor.
- * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
- * This function is invoked on a per-processor basis.
+ * Setup the corrected machine check vector register in the processor and
+ * unmask interrupt. This function is invoked on a per-processor basis.
*
* Inputs
* None
cmcv_reg_t cmcv;
cmcv.cmcv_regval = 0;
- cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
+ cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x registered.\n",
+ "machine check vector %#x setup and enabled.\n",
__FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
*/
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
- ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
+ ia64_mca_cmc_vector_setup(); /* Setup vector on BSP & enable */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
- cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
- register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ {
+ irq_desc_t *desc;
+ unsigned int irq;
+
+ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+
+ if (cpe_vector >= 0) {
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ if (irq_to_vector(irq) == cpe_vector) {
+ desc = irq_descp(irq);
+ desc->status |= IRQ_PER_CPU;
+ setup_irq(irq, &mca_cpe_irqaction);
+ }
+ ia64_mca_register_cpev(cpe_vector);
+ }
+ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ }
#endif
/* Initialize the areas set aside by the OS to buffer the
ia64_log_init(SAL_INFO_TYPE_CMC);
ia64_log_init(SAL_INFO_TYPE_CPE);
- mca_init = 1;
printk(KERN_INFO "MCA related initialization done\n");
}
static int __init
ia64_mca_late_init(void)
{
- if (!mca_init)
- return 0;
-
- /* Setup the CMCI/P vector and handler */
init_timer(&cmc_poll_timer);
cmc_poll_timer.function = ia64_mca_cmc_poll;
- /* Unmask/enable the vector */
+ /* Reset to the correct state */
cmc_polling_enabled = 0;
- schedule_work(&cmc_enable_work);
-
- IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
-#ifdef CONFIG_ACPI
- /* Setup the CPEI/P vector and handler */
init_timer(&cpe_poll_timer);
cpe_poll_timer.function = ia64_mca_cpe_poll;
- {
- irq_desc_t *desc;
- unsigned int irq;
-
- if (cpe_vector >= 0) {
- /* If platform supports CPEI, enable the irq. */
- cpe_poll_enabled = 0;
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == cpe_vector) {
- desc = irq_descp(irq);
- desc->status |= IRQ_PER_CPU;
- setup_irq(irq, &mca_cpe_irqaction);
- }
- ia64_mca_register_cpev(cpe_vector);
- IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
- } else {
- /* If platform doesn't support CPEI, get the timer going. */
- if (cpe_poll_enabled) {
- ia64_mca_cpe_poll(0UL);
- IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
- }
- }
+#ifdef CONFIG_ACPI
+ /* If platform doesn't support CPEI, get the timer going. */
+ if (cpe_vector < 0 && cpe_poll_enabled) {
+ ia64_mca_cpe_poll(0UL);
+ } else {
+ cpe_poll_enabled = 0;
}
#endif
static inline unsigned long
pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
{
- return get_unmapped_area(file, addr, len, pgoff, flags);
+ return get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
mntput(pfmfs_mnt);
}
+static loff_t
+pfm_lseek(struct file *file, loff_t offset, int whence)
+{
+ DPRINT(("pfm_lseek called\n"));
+ return -ESPIPE;
+}
+
static ssize_t
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
return -EINVAL;
}
+ /*
+ * seeks are not allowed on message queues
+ */
+ if (ppos != &filp->f_pos) return -ESPIPE;
PROTECT_CTX(ctx, flags);
static struct file_operations pfm_file_ops = {
- .llseek = no_llseek,
+ .llseek = pfm_lseek,
.read = pfm_read,
.write = pfm_write,
.poll = pfm_poll,
*/
if (task == current || ctx->ctx_fl_system) return 0;
- /*
- * if context is UNLOADED we are safe to go
- */
- if (state == PFM_CTX_UNLOADED) return 0;
-
/*
* no command can operate on a zombie context
*/
}
/*
- * context is LOADED or MASKED. Some commands may need to have
- * the task stopped.
- *
+ * if context is UNLOADED, MASKED we are safe to go
+ */
+ if (state != PFM_CTX_LOADED) return 0;
+
+ /*
+ * context is LOADED, we must make sure the task is stopped
* We could lift this restriction for UP but it would mean that
* the user has no guarantee the task would not run between
* two successive calls to perfmonctl(). That's probably OK.
return error;
}
+void
+ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
+{
+ set_personality(PER_LINUX);
+ if (elf_ex->e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK)
+ current->thread.flags |= IA64_THREAD_XSTACK;
+ else
+ current->thread.flags &= ~IA64_THREAD_XSTACK;
+}
+
pid_t
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
struct inode *inode = file->f_dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
+ void *saldata;
+ size_t size;
u8 *buf;
u64 bufsize;
buf = NULL;
bufsize = 0;
}
- return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
+ if (*ppos >= bufsize)
+ return 0;
+
+ saldata = buf + file->f_pos;
+ size = bufsize - file->f_pos;
+ if (size > count)
+ size = count;
+ if (copy_to_user(buffer, saldata, size))
+ return -EFAULT;
+
+ *ppos += size;
+ return size;
}
static void
}
#endif
- /* enable IA-64 Machine Check Abort Handling unless disabled */
- if (!strstr(saved_command_line, "nomca"))
- ia64_mca_init();
-
+ /* enable IA-64 Machine Check Abort Handling */
+ ia64_mca_init();
+
platform_setup(cmdline_p);
paging_init();
}
smp_setup_percpu_timer();
- ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
+ ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
#ifdef CONFIG_PERFMON
pfm_init_percpu();
low = pgt_cache_water[0];
high = pgt_cache_water[1];
- preempt_disable();
if (pgtable_cache_size > (u64) high) {
do {
if (pgd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > (u64) low);
}
- preempt_enable();
}
void
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
+ vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
insert_vm_struct(current->mm, vma);
}
{
struct page *page;
/*
- * EFI uses 4KB pages while the kernel can use 4KB or bigger.
+ * EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
* therefore possible to have the initrd share the same page as
* the end of the kernel (given current setup).
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
- setup_gate();
+ setup_gate(); /* setup gate pages before we free up boot memory... */
#ifdef CONFIG_IA32_SUPPORT
ia32_boot_gdt_init();
*
* This code is executed once for each Hub chip.
*/
-static void __init
+static void
per_hub_init(cnodeid_t cnode)
{
nasid_t nasid;
klhwg_add_all_modules(hwgraph_root);
klhwg_add_all_nodes(hwgraph_root);
- for (cnode = 0; cnode < numionodes; cnode++)
+ for (cnode = 0; cnode < numionodes; cnode++) {
+ extern void per_hub_init(cnodeid_t);
per_hub_init(cnode);
+ }
/*
*
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
- ii_ieclr_u_t ieclr;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
- /* Clear IBLS0/1 error bits */
- ieclr.ii_ieclr_regval = 0;
- if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
- ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
- if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
- ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
- REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
-
/* Reinitialize both BTE state machines. */
ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/nodedata.h>
+#include <asm/delay.h>
#include <linux/bootmem.h>
#include <linux/string.h>
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
u64 transfer_size;
- u64 transfer_stat;
struct bteinfo_s *bte;
bte_result_t bte_status;
unsigned long irq_flags;
if (!(mode & BTE_WACQUIRE)) {
return BTEFAIL_NOTAVAIL;
}
+
+ /* Wait until a bte is available. */
+ udelay(1);
} while (1);
return BTE_SUCCESS;
}
- while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
+ while (*bte->most_rcnt_na == -1UL) {
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
- if (transfer_stat & IBLS_ERROR) {
- bte_status = transfer_stat & ~IBLS_ERROR;
+ if (*bte->most_rcnt_na & IBLS_ERROR) {
+ bte_status = *bte->most_rcnt_na & ~IBLS_ERROR;
*bte->most_rcnt_na = 0L;
} else {
bte_status = BTE_SUCCESS;
}
u8
-sn_irq_to_vector(unsigned int irq)
+sn_irq_to_vector(u8 irq)
{
return(irq);
}
*oemdata_size = 0;
vfree(*oemdata);
*oemdata = NULL;
- if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0 ||
- efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0)
+ if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0)
return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
return 0;
}
return 1;
}
-void bvme6000_reset(void)
+void bvme6000_reset()
{
volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
# CONFIG_LLC is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
_060_real_lock_page:
move.l %d2,-(%sp)
| load sfc/dfc
+ moveq #5,%d0
tst.b %d0
jne 1f
moveq #1,%d0
- jra 2f
-1: moveq #5,%d0
-2: movec.l %dfc,%d2
+1: movec.l %dfc,%d2
movec.l %d0,%dfc
movec.l %d0,%sfc
}
#endif
- if (CPU_IS_060) {
- u32 pcr;
-
- asm (".chip 68060; movec %%pcr,%0; .chip 68k"
- : "=d" (pcr));
- if (((pcr >> 8) & 0xff) <= 5) {
- printk("Enabling workaround for errata I14\n");
- asm (".chip 68060; movec %0,%%pcr; .chip 68k"
- : : "d" (pcr | 0x20));
- }
- }
-
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
/*
* user process trying to return with weird frame format
*/
-#ifdef DEBUG
+#if DEBUG
printk("user process returning with weird frame format\n");
#endif
goto badframe;
/*
* user process trying to return with weird frame format
*/
-#ifdef DEBUG
+#if DEBUG
printk("user process returning with weird frame format\n");
#endif
goto badframe;
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
+#if DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
+#if DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
-#ifdef DEBUG
+#if DEBUG
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
unsigned short mmusr;
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
-#ifdef DEBUG
+#if DEBUG
unsigned long desc;
printk ("pid = %x ", current->pid);
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
-#ifdef DEBUG
+#if DEBUG
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
#endif
mmusr = temp;
-#ifdef DEBUG
+#if DEBUG
printk("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk("descriptor address is %#lx, contents %#lx\n",
: "a" (&tlong));
printk("tt1 is %#lx\n", tlong);
#endif
-#ifdef DEBUG
+#if DEBUG
printk("Unknown SIGSEGV - 1\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
should still create the ATC entry. */
goto create_atc_entry;
-#ifdef DEBUG
+#if DEBUG
asm volatile ("ptestr #1,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk ("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
-#ifdef DEBUG
+#if DEBUG
printk("Unknown SIGSEGV - 2\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
-#ifdef DEBUG
+#if DEBUG
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
-#ifdef DEBUG
+#if DEBUG
printk("Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
printk ("\n");
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+extern void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *endstack;
int i;
* csum_partial_copy_from_user.
*/
-#include <linux/module.h>
#include <net/checksum.h>
/*
#endif
if (irq < VIA1_SOURCE_BASE) {
- cpu_free_irq(irq, dev_id);
- return;
+ return cpu_free_irq(irq, dev_id);
}
if (irq >= NUM_MAC_SOURCES) {
static inline void free_io_area(void *addr)
{
- vfree((void *)(PAGE_MASK & (unsigned long)addr));
+ return vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else
return 0;
}
-#ifdef DEBUG_INVALID_PTOV
+#if DEBUG_INVALID_PTOV
int mm_inv_cnt = 5;
#endif
voff += m68k_memory[i].size;
} while (++i < m68k_num_memory);
-#ifdef DEBUG_INVALID_PTOV
+#if DEBUG_INVALID_PTOV
if (mm_inv_cnt > 0) {
mm_inv_cnt--;
printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
return 1;
}
-void mvme147_reset(void)
+void mvme147_reset()
{
printk ("\r\n\nCalled mvme147_reset\r\n");
m147_pcc->watchdog = 0x0a; /* Clear timer */
return 1;
}
-void mvme16x_reset(void)
+void mvme16x_reset()
{
printk ("\r\n\nCalled mvme16x_reset\r\n"
"\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
}
#endif
-void q40_reset(void)
+void q40_reset()
{
halted=1;
printk ("\n\n*******************************************\n"
Q40_LED_ON();
while(1) ;
}
-void q40_halt(void)
+void q40_halt()
{
halted=1;
printk ("\n\n*******************\n"
return 0;
}
-unsigned int q40_get_ss(void)
+unsigned int q40_get_ss()
{
return bcd2bin(Q40_RTC_SECS);
}
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
goto out;
if (pos < 0)
goto out;
- ret = -ESPIPE;
- if (!(file->f_mode & FMODE_PREAD))
- goto out;
ret = read(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_ACCESS);
if (pos < 0)
goto out;
- ret = -ESPIPE;
- if (!(file->f_mode & FMODE_PWRITE))
- goto out;
-
ret = write(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_MODIFY);
/* And the same for proc */
int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
- r = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ r = proc_dostring(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
/* proc function to write EEPROM after changing int entry */
int proc_dolasatint(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
#ifdef CONFIG_DS1603
/* proc function to read/write RealTime Clock */
int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
if (rtctmp < 0)
rtctmp = 0;
}
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
static char proc_lasat_ipbuf[32];
/* Parsing of IP address */
int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int len;
unsigned int ip;
char *p, c;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
proc_lasat_ipbuf[len] = 0;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
/* Now see if we can convert it to a valid IP */
ip = in_aton(proc_lasat_ipbuf);
*(unsigned int *)(table->data) = ip;
len++;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
}
update_bcastaddr();
up(&lasat_info_sem);
}
int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
int r;
down(&lasat_info_sem);
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, filp, buffer, lenp);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
If you don't know what to do here, say N.
-config HOTPLUG_CPU
- bool
- default y if SMP
- select HOTPLUG
-
-config DISCONTIGMEM
- bool "Discontiguous memory support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa> for more.
-
config PREEMPT
bool
# bool "Preemptible Kernel"
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
-config DEBUG_SPINLOCK
- bool "Spinlock debugging"
- depends on DEBUG_KERNEL
- help
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
- best used in conjunction with the NMI watchdog so that spinlock
- deadlocks are also debuggable.
-
-config DEBUG_RWLOCK
- bool "Read-write spinlock debugging"
- depends on DEBUG_KERNEL && SMP
- help
- If you say Y here then read-write lock processing will count how many
- times it has tried to get the lock and issue an error message after
- too many attempts. If you suspect a rwlock problem or a kernel
- hacker asks for this option then say Y. Otherwise say N.
-
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
help
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# CONFIG_PA7000 is not set
# CONFIG_PA7100LC is not set
# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
CONFIG_PA8X00=y
CONFIG_PA20=y
-CONFIG_PREFETCH=y
CONFIG_PARISC64=y
CONFIG_64BIT=y
+# CONFIG_PDC_NARROW is not set
# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
CONFIG_COMPAT=y
CONFIG_IOSAPIC=y
CONFIG_IOMMU_SBA=y
# CONFIG_SUPERIO is not set
-# CONFIG_CHASSIS_LCD_LED is not set
-CONFIG_PDC_CHASSIS=y
+CONFIG_CHASSIS_LCD_LED=y
+# CONFIG_PDC_CHASSIS is not set
#
# PCMCIA/CardBus support
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
+# CONFIG_SCSI_FC_ATTRS is not set
#
# SCSI low-level drivers
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
CONFIG_SCSI_SYM53C8XX_IOMAPPED=y
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
#
# CONFIG_PCMCIA_FDOMAIN is not set
# CONFIG_PCMCIA_QLOGIC is not set
-# CONFIG_PCMCIA_SYM53C500 is not set
#
# Multi-device support (RAID and LVM)
#
# I2O device support
#
-# CONFIG_I2O is not set
#
# Networking support
#
# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
CONFIG_IP_NF_ARP_MANGLE=m
# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
#
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
CONFIG_LLC=m
CONFIG_LLC2=m
# CONFIG_IPX is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Network testing
#
CONFIG_NET_PKTGEN=m
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_8139_RXBUF_IDX=1
# CONFIG_SIS900 is not set
CONFIG_EPIC100=m
# CONFIG_SUNDANCE is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=m
#
CONFIG_IXGB=m
CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPPOE is not set
+# CONFIG_SLIP is not set
#
# Wireless LAN (non-hamradio)
# CONFIG_PRISM54 is not set
CONFIG_NET_WIRELESS=y
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
#
# PCMCIA network device support
#
# CONFIG_PCMCIA_AXNET is not set
#
-# Wan interfaces
+# Amateur Radio support
#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_PPPOE is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
CONFIG_CIFS=m
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_TEST=m
#
# Library routines
#
CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=15
# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# Processor type and features
#
# CONFIG_PA7000 is not set
-CONFIG_PA7100LC=y
-# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
+# CONFIG_PA7100LC is not set
+CONFIG_PA7200=y
# CONFIG_PA8X00 is not set
CONFIG_PA11=y
# CONFIG_64BIT is not set
CONFIG_PCI_NAMES=y
CONFIG_GSC_DINO=y
# CONFIG_PCI_LBA is not set
-CONFIG_CHASSIS_LCD_LED=y
+# CONFIG_CHASSIS_LCD_LED is not set
# CONFIG_PDC_CHASSIS is not set
#
#
# Generic Driver Options
#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_DEBUG_DRIVER=y
#
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_CARMEL=y
# CONFIG_BLK_DEV_RAM is not set
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_REPORT_LUNS is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_7000FASST is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AHA152X is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_ZALON is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_PSI240I is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
-# CONFIG_NETFILTER is not set
+# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NETFILTER is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
#
# Ethernet (10 or 100Mbit)
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
+# CONFIG_FDDI is not set
+# CONFIG_PLIP is not set
+CONFIG_PPP=y
+# CONFIG_PPP_FILTER is not set
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_SLIP is not set
#
# Wireless LAN (non-hamradio)
#
CONFIG_NET_WIRELESS=y
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_PLIP is not set
-CONFIG_PPP=y
-# CONFIG_PPP_FILTER is not set
-# CONFIG_PPP_ASYNC is not set
-# CONFIG_PPP_SYNC_TTY is not set
-# CONFIG_PPP_DEFLATE is not set
-# CONFIG_PPP_BSDCOMP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
# CONFIG_INPUT_UINPUT is not set
# CONFIG_HP_SDC_RTC is not set
#
# CONFIG_I2C is not set
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
#
# Misc devices
#
# Graphics support
#
CONFIG_FB=y
-# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_STI=y
# CONFIG_FB_RIVA is not set
CONFIG_DUMMY_CONSOLE_ROWS=64
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+# CONFIG_FAT_FS is not set
# CONFIG_NTFS_FS is not set
#
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
# Miscellaneous filesystems
#
# CONFIG_HFSPLUS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_NFS_V3=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
-CONFIG_NFSD_TCP=y
CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_TEST is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# CONFIG_PA7000 is not set
# CONFIG_PA7100LC is not set
# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
CONFIG_PA8X00=y
CONFIG_PA20=y
-CONFIG_PREFETCH=y
# CONFIG_PARISC64 is not set
# CONFIG_64BIT is not set
# CONFIG_SMP is not set
CONFIG_IOSAPIC=y
CONFIG_IOMMU_SBA=y
CONFIG_SUPERIO=y
-CONFIG_CHASSIS_LCD_LED=y
+# CONFIG_CHASSIS_LCD_LED is not set
# CONFIG_PDC_CHASSIS is not set
#
# CONFIG_BLK_DEV_SLC90E66 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
# CONFIG_IDEDMA_AUTO is not set
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
+# CONFIG_SCSI_FC_ATTRS is not set
#
# SCSI low-level drivers
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=m
CONFIG_SCSI_SATA_PROMISE=m
-# CONFIG_SCSI_SATA_SX4 is not set
CONFIG_SCSI_SATA_SIL=m
-# CONFIG_SCSI_SATA_SIS is not set
CONFIG_SCSI_SATA_VIA=m
# CONFIG_SCSI_SATA_VITESSE is not set
# CONFIG_SCSI_BUSLOGIC is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_PCMCIA_FDOMAIN is not set
# CONFIG_PCMCIA_NINJA_SCSI is not set
CONFIG_PCMCIA_QLOGIC=m
-# CONFIG_PCMCIA_SYM53C500 is not set
#
# Multi-device support (RAID and LVM)
#
# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
CONFIG_NETFILTER=y
CONFIG_NETFILTER_DEBUG=y
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_IP_NF_COMPAT_IPCHAINS=m
CONFIG_IP_NF_COMPAT_IPFWADM=m
-# CONFIG_IP_NF_RAW is not set
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
#
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
CONFIG_LLC=m
CONFIG_LLC2=m
# CONFIG_IPX is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_8139_RXBUF_IDX=1
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=m
#
CONFIG_IXGB=y
CONFIG_IXGB_NAPI=y
-# CONFIG_S2IO is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+# CONFIG_SLIP is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
#
# Token Ring devices
#
# CONFIG_TR is not set
+# CONFIG_NET_FC is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
#
-# Wireless LAN (non-hamradio)
+# Wan interfaces
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_WAN is not set
#
# PCMCIA network device support
CONFIG_PCMCIA_AXNET=m
#
-# Wan interfaces
+# Amateur Radio support
#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_STI=y
# CONFIG_FB_RIVA is not set
CONFIG_USB_KBTAB=m
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
CONFIG_USB_LEGOTOWER=m
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_TEST is not set
#
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_TEST=m
#
# Library routines
#
CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_PARISC=y
-CONFIG_MMU=y
-CONFIG_STACK_GROWSUP=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-# CONFIG_STANDALONE is not set
-CONFIG_BROKEN=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_HOTPLUG=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_EMBEDDED=y
-CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-# CONFIG_PA7000 is not set
-# CONFIG_PA7100LC is not set
-# CONFIG_PA7200 is not set
-# CONFIG_PA7300LC is not set
-CONFIG_PA8X00=y
-CONFIG_PA20=y
-CONFIG_PREFETCH=y
-CONFIG_PARISC64=y
-CONFIG_64BIT=y
-# CONFIG_SMP is not set
-CONFIG_DISCONTIGMEM=y
-# CONFIG_PREEMPT is not set
-CONFIG_COMPAT=y
-
-#
-# Bus options (PCI, PCMCIA, EISA, GSC, ISA)
-#
-# CONFIG_GSC is not set
-CONFIG_PCI=y
-CONFIG_PCI_LEGACY_PROC=y
-CONFIG_PCI_NAMES=y
-CONFIG_PCI_LBA=y
-CONFIG_IOSAPIC=y
-CONFIG_IOMMU_SBA=y
-# CONFIG_SUPERIO is not set
-CONFIG_CHASSIS_LCD_LED=y
-# CONFIG_PDC_CHASSIS is not set
-
-#
-# PCMCIA/CardBus support
-#
-CONFIG_PCMCIA=m
-CONFIG_PCMCIA_DEBUG=y
-CONFIG_YENTA=m
-CONFIG_CARDBUS=y
-# CONFIG_I82092 is not set
-# CONFIG_TCIC is not set
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-# CONFIG_FW_LOADER is not set
-CONFIG_DEBUG_DRIVER=y
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_CARMEL is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_BLK_DEV_INITRD=y
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=y
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-# CONFIG_CHR_DEV_OSST is not set
-CONFIG_BLK_DEV_SR=y
-# CONFIG_BLK_DEV_SR_VENDOR is not set
-CONFIG_CHR_DEV_SG=y
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-
-#
-# SCSI Transport Attributes
-#
-CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_MEGARAID is not set
-# CONFIG_SCSI_SATA is not set
-# CONFIG_SCSI_BUSLOGIC is not set
-# CONFIG_SCSI_CPQFCTS is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_IOMAPPED=y
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PCI2000 is not set
-# CONFIG_SCSI_PCI2220I is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
-CONFIG_SCSI_QLOGIC_FC=m
-# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-CONFIG_SCSI_QLA2300=m
-CONFIG_SCSI_QLA2322=m
-CONFIG_SCSI_QLA6312=m
-CONFIG_SCSI_QLA6322=m
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-CONFIG_SCSI_DEBUG=m
-
-#
-# PCMCIA SCSI adapter support
-#
-# CONFIG_PCMCIA_FDOMAIN is not set
-# CONFIG_PCMCIA_QLOGIC is not set
-# CONFIG_PCMCIA_SYM53C500 is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-# CONFIG_MD_RAID5 is not set
-# CONFIG_MD_RAID6 is not set
-# CONFIG_MD_MULTIPATH is not set
-# CONFIG_BLK_DEV_DM is not set
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=m
-CONFIG_FUSION_MAX_SGE=40
-CONFIG_FUSION_ISENSE=m
-CONFIG_FUSION_CTL=m
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-CONFIG_NETLINK_DEV=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-# CONFIG_INET_IPCOMP is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-# CONFIG_IP_NF_NAT_LOCAL is not set
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
-CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-CONFIG_LLC=m
-CONFIG_LLC2=m
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_FASTROUTE is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-CONFIG_NET_PKTGEN=m
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-
-#
-# Tulip family network device support
-#
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=y
-CONFIG_TULIP=y
-# CONFIG_TULIP_MWI is not set
-CONFIG_TULIP_MMIO=y
-# CONFIG_TULIP_NAPI is not set
-# CONFIG_DE4X5 is not set
-# CONFIG_WINBOND_840 is not set
-# CONFIG_DM9102 is not set
-CONFIG_PCMCIA_XIRCOM=m
-CONFIG_PCMCIA_XIRTULIP=m
-CONFIG_HP100=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_DGRS is not set
-CONFIG_EEPRO100=m
-# CONFIG_EEPRO100_PIO is not set
-CONFIG_E100=m
-CONFIG_E100_NAPI=y
-# CONFIG_FEALNX is not set
-CONFIG_NATSEMI=m
-# CONFIG_NE2K_PCI is not set
-# CONFIG_8139CP is not set
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_SIS900 is not set
-CONFIG_EPIC100=m
-# CONFIG_SUNDANCE is not set
-CONFIG_VIA_RHINE=m
-CONFIG_VIA_RHINE_MMIO=y
-
-#
-# Ethernet (1000 Mbit)
-#
-CONFIG_ACENIC=m
-CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_DL2K=m
-CONFIG_E1000=m
-CONFIG_E1000_NAPI=y
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SK98LIN is not set
-CONFIG_TIGON3=m
-
-#
-# Ethernet (10000 Mbit)
-#
-CONFIG_IXGB=m
-CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-CONFIG_NET_RADIO=y
-
-#
-# Obsolete Wireless cards support (pre-802.11)
-#
-# CONFIG_STRIP is not set
-CONFIG_PCMCIA_WAVELAN=m
-CONFIG_PCMCIA_NETWAVE=m
-
-#
-# Wireless 802.11 Frequency Hopping cards support
-#
-# CONFIG_PCMCIA_RAYCS is not set
-
-#
-# Wireless 802.11b ISA/PCI cards support
-#
-# CONFIG_AIRO is not set
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_PCI_HERMES=m
-# CONFIG_ATMEL is not set
-
-#
-# Wireless 802.11b Pcmcia/Cardbus cards support
-#
-CONFIG_PCMCIA_HERMES=m
-CONFIG_AIRO_CS=m
-# CONFIG_PCMCIA_WL3501 is not set
-
-#
-# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-#
-# CONFIG_PRISM54 is not set
-CONFIG_NET_WIRELESS=y
-
-#
-# PCMCIA network device support
-#
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_3C574=m
-# CONFIG_PCMCIA_FMVJ18X is not set
-# CONFIG_PCMCIA_PCNET is not set
-# CONFIG_PCMCIA_NMCLAN is not set
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-# CONFIG_PCMCIA_AXNET is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_PPPOE is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-# CONFIG_SERIO is not set
-
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_CS is not set
-CONFIG_SERIAL_8250_NR_UARTS=8
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-# CONFIG_SERIAL_8250_MULTIPORT is not set
-# CONFIG_SERIAL_8250_RSA is not set
-
-#
-# Non-8250 serial port support
-#
-# CONFIG_SERIAL_MUX is not set
-CONFIG_PDC_CONSOLE=y
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-CONFIG_GEN_RTC=y
-CONFIG_GEN_RTC_X=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-
-#
-# PCMCIA character devices
-#
-# CONFIG_SYNCLINK_CS is not set
-CONFIG_RAW_DRIVER=y
-CONFIG_MAX_RAW_DEVS=256
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Misc devices
-#
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-
-#
-# Console display driver support
-#
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE_COLUMNS=160
-CONFIG_DUMMY_CONSOLE_ROWS=64
-CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-# CONFIG_REISERFS_FS is not set
-CONFIG_JFS_FS=m
-# CONFIG_JFS_POSIX_ACL is not set
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-CONFIG_XFS_FS=m
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_SECURITY is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-# CONFIG_ZISOFS is not set
-CONFIG_UDF_FS=m
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=y
-CONFIG_SUNRPC_GSS=y
-CONFIG_RPCSEC_GSS_KRB5=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp437"
-CONFIG_CIFS=m
-# CONFIG_CIFS_STATS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-CONFIG_NLS_CODEPAGE_863=m
-# CONFIG_NLS_CODEPAGE_864 is not set
-CONFIG_NLS_CODEPAGE_865=m
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-CONFIG_NLS_ISO8859_15=m
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=m
-
-#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_DEBUG_INFO is not set
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_DES=y
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-# CONFIG_CRYPTO_ARC4 is not set
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_TEST=m
-
-#
-# Library routines
-#
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=m
-CONFIG_ZLIB_DEFLATE=m
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_RWLOCK is not set
CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_INFO is not set
{
struct page *page = pte_page(pte);
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
+ if (VALID_PAGE(page) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page(page_address(page));
{
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
- seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %d-way associative)\n",
+ seq_printf(m, "D-cache\t\t: %ld KB (%s)%s\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
- (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
- (cache_info.dc_conf.cc_assoc)
+ (cache_info.dc_conf.cc_sh ? " - shared I/D":"")
);
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
- printk("ic_size %lx dc_size %lx it_size %lx\n",
- cache_info.ic_size,
- cache_info.dc_size,
- cache_info.it_size);
-
- printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
- cache_info.dc_base,
- cache_info.dc_stride,
- cache_info.dc_count,
- cache_info.dc_loop);
-
- printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
- *(unsigned long *) (&cache_info.dc_conf),
- cache_info.dc_conf.cc_alias,
- cache_info.dc_conf.cc_block,
- cache_info.dc_conf.cc_line,
- cache_info.dc_conf.cc_shift);
- printk(" wt %d sh %d cst %d assoc %d\n",
- cache_info.dc_conf.cc_wt,
- cache_info.dc_conf.cc_sh,
- cache_info.dc_conf.cc_cst,
- cache_info.dc_conf.cc_assoc);
-
- printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
- cache_info.ic_base,
- cache_info.ic_stride,
- cache_info.ic_count,
- cache_info.ic_loop);
-
- printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
- *(unsigned long *) (&cache_info.ic_conf),
- cache_info.ic_conf.cc_alias,
- cache_info.ic_conf.cc_block,
- cache_info.ic_conf.cc_line,
- cache_info.ic_conf.cc_shift);
- printk(" wt %d sh %d cst %d assoc %d\n",
- cache_info.ic_conf.cc_wt,
- cache_info.ic_conf.cc_sh,
- cache_info.ic_conf.cc_cst,
- cache_info.ic_conf.cc_assoc);
-
- printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
- cache_info.dt_conf.tc_sh,
- cache_info.dt_conf.tc_page,
- cache_info.dt_conf.tc_cst,
- cache_info.dt_conf.tc_aid,
- cache_info.dt_conf.tc_pad1);
-
- printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
- cache_info.it_conf.tc_sh,
- cache_info.it_conf.tc_page,
- cache_info.it_conf.tc_cst,
- cache_info.it_conf.tc_aid,
- cache_info.it_conf.tc_pad1);
+ printk(KERN_DEBUG "ic_size %lx dc_size %lx it_size %lx pdc_cache_info %d*long pdc_cache_cf %d\n",
+ cache_info.ic_size,
+ cache_info.dc_size,
+ cache_info.it_size,
+ sizeof (struct pdc_cache_info) / sizeof (long),
+ sizeof (struct pdc_cache_cf)
+ );
+
+ printk(KERN_DEBUG "dc base %x dc stride %x dc count %x dc loop %d\n",
+ cache_info.dc_base,
+ cache_info.dc_stride,
+ cache_info.dc_count,
+ cache_info.dc_loop);
+
+ printk(KERN_DEBUG "dc conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
+ cache_info.dc_conf.cc_alias,
+ cache_info.dc_conf.cc_block,
+ cache_info.dc_conf.cc_line,
+ cache_info.dc_conf.cc_wt,
+ cache_info.dc_conf.cc_sh,
+ cache_info.dc_conf.cc_cst,
+ cache_info.dc_conf.cc_assoc);
+
+ printk(KERN_DEBUG "ic conf: alias %d block %d line %d wt %d sh %d cst %d assoc %d\n",
+ cache_info.ic_conf.cc_alias,
+ cache_info.ic_conf.cc_block,
+ cache_info.ic_conf.cc_line,
+ cache_info.ic_conf.cc_wt,
+ cache_info.ic_conf.cc_sh,
+ cache_info.ic_conf.cc_cst,
+ cache_info.ic_conf.cc_assoc);
+
+ printk(KERN_DEBUG "dt conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ cache_info.dt_conf.tc_sh,
+ cache_info.dt_conf.tc_page,
+ cache_info.dt_conf.tc_cst,
+ cache_info.dt_conf.tc_aid,
+ cache_info.dt_conf.tc_pad1);
+
+ printk(KERN_DEBUG "it conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ cache_info.it_conf.tc_sh,
+ cache_info.it_conf.tc_page,
+ cache_info.it_conf.tc_cst,
+ cache_info.it_conf.tc_aid,
+ cache_info.it_conf.tc_pad1);
#endif
split_tlb = 0;
split_tlb = 1;
}
- /* "New and Improved" version from Jim Hull
- * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
- */
-#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
- dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
- icache_stride = CAFL_STRIDE(cache_info.ic_conf);
-#undef CAFL_STRIDE
-
+ dcache_stride = (1 << (cache_info.dc_conf.cc_block + 3)) *
+ cache_info.dc_conf.cc_line;
+ icache_stride = (1 << (cache_info.ic_conf.cc_block + 3)) *
+ cache_info.ic_conf.cc_line;
#ifndef CONFIG_PA20
if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
- printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
-#if 0
+ printk(KERN_WARNING "Only equivalent aliasing supported\n");
+#ifndef CONFIG_SMP
panic("SMP kernel required to avoid non-equivalent aliasing");
#endif
}
disable_sr_hashing_asm(srhash_type);
}
-void flush_dcache_page(struct page *page)
+void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct vm_area_struct *mpnt = NULL;
unsigned long offset;
unsigned long addr;
pgoff_t pgoff;
- pte_t *pte;
- unsigned long pfn = page_to_pfn(page);
-
-
- if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &page->flags);
- return;
- }
flush_kernel_dcache_page(page_address(page));
* isn't there, there's no point exciting the
* nadtlb handler into a nullification frenzy */
-
- if(!(pte = translation_exists(mpnt, addr)))
+ if (!translation_exists(mpnt, addr))
continue;
- /* make sure we really have this page: the private
- * mappings may cover this area but have COW'd this
- * particular page */
- if(pte_pfn(*pte) != pfn)
- continue;
-
__flush_cache_page(mpnt, addr);
break;
}
flush_dcache_mmap_unlock(mapping);
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(__flush_dcache_page);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
/* Look up a PTE in a 2-Level scheme (faulting at each
* level if the entry isn't present
*
- * NOTE: we use ldw even for LP64, since the short pointers
- * can address up to 1TB
- */
+ * NOTE: we use ldw even for LP64 because our pte
+ * and pmd are allocated <4GB */
.macro L2_ptep pmd,pte,index,va,fault
#if PT_NLEVELS == 3
EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
copy %r0,\pte
ldw,s \index(\pmd),\pmd
- bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
- DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
- copy \pmd,%r9
-#ifdef __LP64__
- shld %r9,PxD_VALUE_SHIFT,\pmd
-#else
- shlw %r9,PxD_VALUE_SHIFT,\pmd
-#endif
EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ bb,>=,n \pmd,_PAGE_PRESENT_BIT,\fault
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
LDREG %r0(\pmd),\pte /* pmd is now pte */
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
copy %r0,\pte
extrd,u,*= \va,31,32,%r0
ldw,s \index(\pgd),\pgd
- extrd,u,*= \va,31,32,%r0
- bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
- extrd,u,*= \va,31,32,%r0
- shld \pgd,PxD_VALUE_SHIFT,\index
- extrd,u,*= \va,31,32,%r0
- copy \index,\pgd
extrd,u,*<> \va,31,32,%r0
ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+ extrd,u,*= \va,31,32,%r0
+ bb,>=,n \pgd,_PAGE_PRESENT_BIT,\fault
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty ptep,pte,tmp
+ .macro update_dirty ptep,pte,tmp,tmp1
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
STREG \pte,0(\ptep)
ret_from_kernel_thread:
/* Call schedule_tail first though */
- BL schedule_tail, %r2
+ bl schedule_tail, %r2
nop
LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
and %r9,%r16,%r17
cmpb,<>,n %r16,%r17,nadtlb_fault /* Not fdc,fic,pdc */
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
- BL get_register,%r25
+ b,l get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
- BL get_register,%r25
+ b,l get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
- BL set_register,%r25
+ b,l set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
nadtlb_nullify:
dbit_nolock_20w:
#endif
- update_dirty ptp,pte,t1
+ update_dirty ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
dbit_nolock_11:
#endif
- update_dirty ptp,pte,t1
+ update_dirty ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
dbit_nolock_20:
#endif
- update_dirty ptp,pte,t1
+ update_dirty ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t1
+ f_extend pte,t0
idtlbt pte,prot
#include <asm/page.h>
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/system.h>
#include <asm/processor.h> /* for boot_cpu_data */
*/
void pdc_emergency_unlock(void)
{
- /* Spinlock DEBUG code freaks out if we unconditionally unlock */
- if (spin_is_locked(&pdc_lock))
- spin_unlock(&pdc_lock);
+ spin_unlock(&pdc_lock);
}
#ifdef __LP64__
int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
{
- int retval = 0;
-
if (!is_pdc_pat())
return -1;
+ int retval = 0;
+
spin_lock_irq(&pdc_lock);
retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data));
spin_unlock_irq(&pdc_lock);
return retval;
}
-
-/**
- * pdc_pat_io_pci_cfg_read - Read PCI configuration space.
- * @pci_addr: PCI configuration space address for which the read request is being made.
- * @pci_size: Size of read in bytes. Valid values are 1, 2, and 4.
- * @mem_addr: Pointer to return memory buffer.
- *
- */
-int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
-{
- int retval;
- spin_lock_irq(&pdc_lock);
- retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
- __pa(pdc_result), pci_addr, pci_size);
- switch(pci_size) {
- case 1: *(u8 *) mem_addr = (u8) pdc_result[0];
- case 2: *(u16 *)mem_addr = (u16) pdc_result[0];
- case 4: *(u32 *)mem_addr = (u32) pdc_result[0];
- }
- spin_unlock_irq(&pdc_lock);
-
- return retval;
-}
-
-/**
- * pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
- * @pci_addr: PCI configuration space address for which the write request is being made.
- * @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
- * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be
- * written to PCI Config space.
- *
- */
-int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
-{
- int retval;
-
- spin_lock_irq(&pdc_lock);
- retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE,
- pci_addr, pci_size, val);
- spin_unlock_irq(&pdc_lock);
-
- return retval;
-}
#endif /* __LP64__ */
long real64_call(unsigned long fn, ...)
{
va_list args;
- extern struct wide_stack real64_stack __attribute__ ((alias ("real_stack")));
+ extern struct wide_stack real_stack;
extern unsigned long real64_call_asm(unsigned long *,
unsigned long *,
unsigned long);
va_start(args, fn);
- real64_stack.arg0 = va_arg(args, unsigned long);
- real64_stack.arg1 = va_arg(args, unsigned long);
- real64_stack.arg2 = va_arg(args, unsigned long);
- real64_stack.arg3 = va_arg(args, unsigned long);
- real64_stack.arg4 = va_arg(args, unsigned long);
- real64_stack.arg5 = va_arg(args, unsigned long);
- real64_stack.arg6 = va_arg(args, unsigned long);
- real64_stack.arg7 = va_arg(args, unsigned long);
- real64_stack.arg8 = va_arg(args, unsigned long);
- real64_stack.arg9 = va_arg(args, unsigned long);
- real64_stack.arg10 = va_arg(args, unsigned long);
- real64_stack.arg11 = va_arg(args, unsigned long);
- real64_stack.arg12 = va_arg(args, unsigned long);
- real64_stack.arg13 = va_arg(args, unsigned long);
+ real_stack.arg0 = va_arg(args, unsigned long);
+ real_stack.arg1 = va_arg(args, unsigned long);
+ real_stack.arg2 = va_arg(args, unsigned long);
+ real_stack.arg3 = va_arg(args, unsigned long);
+ real_stack.arg4 = va_arg(args, unsigned long);
+ real_stack.arg5 = va_arg(args, unsigned long);
+ real_stack.arg6 = va_arg(args, unsigned long);
+ real_stack.arg7 = va_arg(args, unsigned long);
+ real_stack.arg8 = va_arg(args, unsigned long);
+ real_stack.arg9 = va_arg(args, unsigned long);
+ real_stack.arg10 = va_arg(args, unsigned long);
+ real_stack.arg11 = va_arg(args, unsigned long);
+ real_stack.arg12 = va_arg(args, unsigned long);
+ real_stack.arg13 = va_arg(args, unsigned long);
va_end(args);
- return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
+ return real64_call_asm(&real_stack.sp, &real_stack.arg0, fn);
}
#endif /* __LP64__ */
{HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"},
{HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"},
{HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"},
- {HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"},
- {HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"},
- {HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"},
- {HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"},
- {HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"},
- {HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"},
- {HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"},
- {HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"},
- {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
- {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
- {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak"},
{HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"},
{HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"},
{HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"},
{HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"},
{HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"},
{HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"},
- {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"},
- {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"},
- {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"},
- {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"},
- {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"},
+ {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O Bus Converter Merced Port"},
+ {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O Bus Converter Ropes Port"},
+ {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O Bus Converter Merced Port"},
+ {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O Bus Converter Ropes Port"},
{HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"},
{HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"},
{HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
{HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"},
- {HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"},
- {HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"},
{HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"},
{HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"},
{HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"},
{HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"},
{HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"},
{HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"},
- {HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"},
{HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"},
{HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"},
{HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"},
{HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"},
{HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"},
- {HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"},
{HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"},
{HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"},
{HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"},
{HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"},
- {HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"},
{HPHW_FAULTY, 0, } /* Special Marker for last entry */
};
[pcxw] { "PA8500 (PCX-W)", "2.0" },
[pcxw_] { "PA8600 (PCX-W+)", "2.0" },
[pcxw2] { "PA8700 (PCX-W2)", "2.0" },
- [mako] { "PA8800 (Mako)", "2.0" }
+ [mako] { "PA8800 (MAKO)", "2.0" }
};
const char * __init
/* Initialize startup VM. Just map first 8 MB of memory */
ldil L%PA(pg0),%r1
ldo R%PA(pg0)(%r1),%r1
- shr %r1,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo _PAGE_TABLE(%r1),%r3
ldil L%PA(swapper_pg_dir),%r4
ldo R%PA(swapper_pg_dir)(%r4),%r4
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
1:
stw %r3,0(%r4)
- ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+ ldo ASM_PAGE_SIZE(%r3),%r3
addib,> -1,%r1,1b
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
ldil L%PA(smp_init_current_idle_task),%sp
ldo R%PA(smp_init_current_idle_task)(%sp),%sp
ldw 0(%sp),%sp /* load task address */
- tophys_r1 %sp
- ldw TASK_THREAD_INFO(%sp), %sp
mtctl %sp,%cr30 /* store in cr30 */
addil L%THREAD_SZ_ALGN,%sp /* stack is above task */
ldo R%THREAD_SZ_ALGN(%r1),%sp
ldil L%PA(pmd0),%r5
ldo R%PA(pmd0)(%r5),%r5
- shrd %r5,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo _PAGE_TABLE(%r5),%r3
ldil L%PA(swapper_pg_dir),%r4
ldo R%PA(swapper_pg_dir)(%r4),%r4
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
- shrd %r1,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo _PAGE_TABLE(%r1),%r3
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r5
ldi ASM_PT_INITIAL,%r1
1:
stw %r3,0(%r5)
- ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+ ldo ASM_PAGE_SIZE(%r3),%r3
addib,> -1,%r1,1b
ldo ASM_PMD_ENTRY_SIZE(%r5),%r5
- ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+ ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
ldil L%PA(pg0),%r1
ldo R%PA(pg0)(%r1),%r1
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
ldd 0(%sp),%sp /* load task address */
- tophys_r1 %sp
ldd TASK_THREAD_INFO(%sp), %sp
mtctl %sp,%cr30 /* store in cr30 */
ldo THREAD_SZ_ALGN(%sp),%sp
#include <linux/mm.h>
#include <asm/hardware.h>
#include <asm/io.h>
-#include <asm/mmzone.h>
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
#endif
};
+struct irq_region_ops cpu_irq_ops = {
+ .disable_irq = disable_cpu_irq,
+ .enable_irq = enable_cpu_irq,
+ .mask_irq = unmask_cpu_irq,
+ .unmask_irq = unmask_cpu_irq
+};
struct irq_region cpu0_irq_region = {
.ops = {
{
struct irq_region *region;
- DBG_IRQ(irq, ("enable_irq(%d) %d+%d EIRR 0x%lx EIEM 0x%lx\n", irq,
- IRQ_REGION(irq), IRQ_OFFSET(irq), mfctl(23), mfctl(15)));
+ DBG_IRQ(irq, ("enable_irq(%d) %d+%d eiem 0x%lx\n", irq,
+ IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
irq = irq_canonicalize(irq);
region = irq_region[IRQ_REGION(irq)];
seq_puts(p, " ");
#ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
#endif
- seq_printf(p, " CPU%02d ", i);
+ seq_printf(p, " CPU%02d ", i);
#ifdef PARISC_IRQ_CR16_COUNTS
seq_printf(p, "[min/avg/max] (CPU cycle counts)");
seq_printf(p, "%3d: ", irq_no);
#ifdef CONFIG_SMP
for (; j < NR_CPUS; j++)
- if (cpu_online(j))
#endif
seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq_no]);
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
- while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
- !cpu_online(next_cpu)))
+ while ((next_cpu < NR_CPUS) && !cpu_data[next_cpu].txn_addr)
next_cpu++;
if (next_cpu >= NR_CPUS)
irq_enter();
++kstat_cpu(cpu).irqs[irq];
- DBG_IRQ(irq, ("do_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem));
+ DBG_IRQ(irq, ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)));
for (; action; action = action->next) {
#ifdef PARISC_IRQ_CR16_COUNTS
#ifdef DEBUG_IRQ
if (eirr_val != (1UL << MAX_CPU_IRQ))
- printk(KERN_DEBUG "do_cpu_irq_mask 0x%x & 0x%x\n", eirr_val, cpu_eiem);
+ printk(KERN_DEBUG "do_cpu_irq_mask %x\n", eirr_val);
#endif
/* Work our way from MSb to LSb...same order we alloc EIRs */
void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
- mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
+ mtctl(-1L, 23); /* EIRR : clear all pending external intr */
#ifdef CONFIG_SMP
if (!cpu_eiem)
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
-
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(pfnnid_map);
-#endif
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
-#ifdef DEBUG_PCI
-#undef ASSERT
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
static struct proc_dir_entry * proc_gsc_root = NULL;
static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
static unsigned long pcxl_used_bytes = 0;
* interfaces to log Chassis Codes via PDC (firmware)
*
* Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
- * Copyright (C) 2002-2004 Thibaut VARENE <varenet@esiee.fr>
+ * Copyright (C) 2002-2003 Thibaut Varene <varenet@esiee.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
-#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#ifdef CONFIG_PDC_CHASSIS
static int pdc_chassis_old = 0;
-static unsigned int pdc_chassis_enabled = 1;
-
-
-/**
- * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
- * @str configuration param: 0 to disable chassis log
- * @return 1
- */
-
-static int __init pdc_chassis_setup(char *str)
-{
- /*panic_timeout = simple_strtoul(str, NULL, 0);*/
- get_option(&str, &pdc_chassis_enabled);
- return 1;
-}
-__setup("pdcchassis=", pdc_chassis_setup);
/**
{
#ifdef CONFIG_PDC_CHASSIS
int handle = 0;
- if (pdc_chassis_enabled) {
- DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
-
- /* Let see if we have something to handle... */
- /* Check for PDC_PAT or old LED Panel */
- pdc_chassis_checkold();
- if (is_pdc_pat()) {
- printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
- handle = 1;
- }
- else if (pdc_chassis_old) {
- printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
- handle = 1;
- }
- if (handle) {
- /* initialize panic notifier chain */
- notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+ DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
- /* initialize reboot notifier chain */
- register_reboot_notifier(&pdc_chassis_reboot_block);
- }
+ /* Let see if we have something to handle... */
+ /* Check for PDC_PAT or old LED Panel */
+ pdc_chassis_checkold();
+ if (is_pdc_pat()) {
+#ifdef __LP64__ /* see pdc_chassis_send_status() */
+ printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
+ handle = 1;
+#endif /* __LP64__ */
+ }
+ else if (pdc_chassis_old) {
+ printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
+ handle = 1;
+ }
+
+ if (handle) {
+ /* initialize panic notifier chain */
+ notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+
+ /* initialize reboot notifier chain */
+ register_reboot_notifier(&pdc_chassis_reboot_block);
}
#endif /* CONFIG_PDC_CHASSIS */
}
/* Maybe we should do that in an other way ? */
int retval = 0;
#ifdef CONFIG_PDC_CHASSIS
- if (pdc_chassis_enabled) {
-
- DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
-
-#ifdef CONFIG_PARISC64
- if (is_pdc_pat()) {
- switch(message) {
- case PDC_CHASSIS_DIRECT_BSTART:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
- break;
-
- case PDC_CHASSIS_DIRECT_BCOMPLETE:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
- break;
-
- case PDC_CHASSIS_DIRECT_SHUTDOWN:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
- break;
-
- case PDC_CHASSIS_DIRECT_PANIC:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
- break;
-
- case PDC_CHASSIS_DIRECT_LPMC:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
- break;
-
- case PDC_CHASSIS_DIRECT_HPMC:
- retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
- break;
-
- default:
- retval = -1;
- }
- } else retval = -1;
+ DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
+
+#ifdef __LP64__ /* pdc_pat_chassis_send_log is defined only when #ifdef __LP64__ */
+ if (is_pdc_pat()) {
+ switch(message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
+ break;
+
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
#else
- if (pdc_chassis_old) {
- switch (message) {
- case PDC_CHASSIS_DIRECT_BSTART:
- case PDC_CHASSIS_DIRECT_BCOMPLETE:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
- break;
-
- case PDC_CHASSIS_DIRECT_SHUTDOWN:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
- break;
-
- case PDC_CHASSIS_DIRECT_HPMC:
- case PDC_CHASSIS_DIRECT_PANIC:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
- break;
-
- case PDC_CHASSIS_DIRECT_LPMC:
- retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
- break;
-
- default:
- retval = -1;
- }
- } else retval = -1;
-#endif /* CONFIG_PARISC64 */
- } /* if (pdc_chassis_enabled) */
+ if (pdc_chassis_old) {
+ switch (message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
+#endif /* __LP64__ */
#endif /* CONFIG_PDC_CHASSIS */
return retval;
}
/*
* These bracket the sleeping functions..
*/
+# define first_sched ((unsigned long) scheduling_functions_start_here)
+# define last_sched ((unsigned long) scheduling_functions_end_here)
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
ip = info.ip;
- if (!in_sched_functions(ip))
+ if (ip < first_sched || ip >= last_sched)
return ip;
} while (count++ < 16);
return 0;
+# undef first_sched
+# undef last_sched
}
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/cpu.h>
#include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
cpu_irq_actions[cpuid] = actions;
}
#endif
-
- /*
- * Bring this CPU up now! (ignore bootstrap cpuid == 0)
- */
-#ifdef CONFIG_SMP
- if (cpuid) {
- cpu_set(cpuid, cpu_present_map);
- cpu_up(cpuid);
- }
-#endif
-
return 0;
}
.section .bss
.export real_stack
- .export real32_stack
- .export real64_stack
.align 64
real_stack:
-real32_stack:
-real64_stack:
.block 8192
#ifdef __LP64__
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
struct proc_dir_entry * proc_runway_root = NULL;
struct proc_dir_entry * proc_gsc_root = NULL;
-struct proc_dir_entry * proc_mckinley_root = NULL;
-
void __init setup_cmdline(char **cmdline_p)
{
case pcxw:
case pcxw_:
case pcxw2:
+ case mako: /* XXX : this is really mckinley bus */
if (NULL == proc_runway_root)
{
proc_runway_root = proc_mkdir("bus/runway", 0);
}
break;
- case mako:
- if (NULL == proc_mckinley_root)
- {
- proc_mckinley_root = proc_mkdir("bus/mckinley", 0);
- }
- break;
default:
/* FIXME: this was added to prevent the compiler
* complaining about missing pcx, pcxs and pcxt
#define kDEBUG 0
+spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
+
spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
volatile struct task_struct *smp_init_current_idle_task;
static volatile int cpu_now_booting = 0; /* track which CPU is booting */
+static int parisc_max_cpus = -1; /* Command line */
unsigned long cache_decay_ticks; /* declared by include/linux/sched.h */
-
-static int parisc_max_cpus = 1;
-
-/* online cpus are ones that we've managed to bring up completely
- * possible cpus are all valid cpu
- * present cpus are all detected cpu
- *
- * On startup we bring up the "possible" cpus. Since we discover
- * CPUs later, we add them as hotplug, so the possible cpu mask is
- * empty in the beginning.
- */
-
cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */
+cpumask_t cpu_possible_map = CPU_MASK_NONE; /* Bitmap of Present CPUs */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
{
int i;
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < parisc_max_cpus; i++) {
if (cpu_online(i) && i != smp_processor_id())
send_IPI_single(i, op);
}
unsigned long timeout;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- if (num_online_cpus() < 2)
- return 0;
-
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
EXPORT_SYMBOL(smp_call_function);
+
+
+/*
+ * Setup routine for controlling SMP activation
+ *
+ * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
+ * activation entirely (the MPS table probe still happens, though).
+ *
+ * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
+ * greater than 0, limits the maximum number of CPUs activated in
+ * SMP mode to <NUM>.
+ */
+
+static int __init nosmp(char *str)
+{
+ parisc_max_cpus = 0;
+ return 1;
+}
+
+__setup("nosmp", nosmp);
+
+static int __init maxcpus(char *str)
+{
+ get_option(&str, &parisc_max_cpus);
+ return 1;
+}
+
+__setup("maxcpus=", maxcpus);
+
/*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding.
panic("smp_callin() AAAAaaaaahhhh....\n");
}
+#if 0
/*
* Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
* because that could end up calling schedule(). If it did, the new idle
/*
* Bring one cpu online.
*/
-int __init smp_boot_one_cpu(int cpuid)
+int __init smp_boot_one_cpu(int cpuid, int cpunum)
{
struct task_struct *idle;
long timeout;
panic("SMP: fork failed for CPU:%d", cpuid);
wake_up_forked_process(idle);
- init_idle(idle, cpuid);
+ init_idle(idle, cpunum);
unhash_process(idle);
- idle->thread_info->cpu = cpuid;
+ idle->thread_info->cpu = cpunum;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
- cpu_now_booting = cpuid;
+ cpu_now_booting = cpunum;
/*
** boot strap code needs to know the task address since
smp_init_current_idle_task = idle ;
mb();
- printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
-
/*
** This gets PDC to release the CPU from a very tight loop.
- **
- ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
- ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
- ** is executed after receiving the rendezvous signal (an interrupt to
- ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
- ** contents of memory are valid."
+ ** See MEM_RENDEZ comments in head.S.
*/
- __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpuid].hpa);
+ __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
mb();
/*
* Once the "monarch CPU" sees the bit change, it can move on.
*/
for (timeout = 0; timeout < 10000; timeout++) {
- if(cpu_online(cpuid)) {
+ if(cpu_online(cpunum)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
smp_init_current_idle_task = NULL;
alive:
/* Remember the Slave data */
#if (kDEBUG>=100)
- printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
- cpuid, timeout * 100);
+ printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
+ cpuid, cpunum, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
- cpu_data[cpuid].state = STATE_RUNNING;
+ cpu_data[cpunum].state = STATE_RUNNING;
#endif
return 0;
}
+#endif
+
void __devinit smp_prepare_boot_cpu(void)
{
#endif
/* Setup BSP mappings */
- printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+ printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+ init_task.thread_info->cpu = bootstrap_processor;
+ current->thread_info->cpu = bootstrap_processor;
cpu_set(bootstrap_processor, cpu_online_map);
- cpu_set(bootstrap_processor, cpu_present_map);
+ cpu_set(bootstrap_processor, cpu_possible_map);
+
+ /* Mark Boostrap processor as present */
+ current->active_mm = &init_mm;
cache_decay_ticks = HZ/100; /* FIXME very rough. */
}
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- cpus_clear(cpu_present_map);
- cpu_set(0, cpu_present_map);
- parisc_max_cpus = max_cpus;
- if (!max_cpus)
- printk(KERN_INFO "SMP mode deactivated.\n");
+ if (max_cpus != -1)
+ printk(KERN_INFO "SMP: Limited to %d CPUs\n", max_cpus);
+
+ printk(KERN_INFO "SMP: Monarch CPU activated (%lu.%02lu BogoMIPS)\n",
+ (cpu_data[0].loops_per_jiffy + 25) / 5000,
+ ((cpu_data[0].loops_per_jiffy + 25) / 50) % 100);
+
+ return;
}
int __devinit __cpu_up(unsigned int cpu)
{
- if (cpu != 0 && cpu < parisc_max_cpus)
- smp_boot_one_cpu(cpu);
-
return cpu_online(cpu) ? 0 : -ENOSYS;
}
asmlinkage long sys32_time(compat_time_t *tloc)
{
- struct timeval tv;
- compat_time_t now32;
+ struct timeval tv;
do_gettimeofday(&tv);
- now32 = tv.tv_sec;
+ compat_time_t now32 = tv.tv_sec;
if (tloc)
if (put_user(now32, tloc))
put_user(reclen, &dirent->d_reclen);
copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen);
- dirent = (struct linux32_dirent *)((char *)dirent + reclen);
+ ((char *) dirent) += reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
-#include <asm/unwind.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
/* dumped to the console via printk) */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
-#endif
-
int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
void dump_stack(void)
{
- show_stack(NULL, NULL);
+ unsigned long stack;
+ show_trace(current, &stack);
}
EXPORT_SYMBOL(dump_stack);
-void show_stack(struct task_struct *task, unsigned long *s)
+#ifndef __LP64__
+static int kstack_depth_to_print = 64 * 4;
+#else
+static int kstack_depth_to_print = 128 * 4;
+#endif
+
+void show_stack(struct task_struct *task, unsigned long *sp)
{
- int i = 1;
- struct unwind_frame_info info;
-
- if (!task) {
- unsigned long sp, ip, rp;
-
-HERE:
- asm volatile ("copy %%r30, %0" : "=r"(sp));
- ip = (unsigned long)&&HERE;
- rp = (unsigned long)__builtin_return_address(0);
- unwind_frame_init(&info, current, sp, ip, rp);
- } else {
- unwind_frame_init_from_blocked_task(&info, task);
- }
+ unsigned long *stack;
+ int i;
- printk("Backtrace:\n");
- while (i <= 16) {
- if (unwind_once(&info) < 0 || info.ip == 0)
+ /*
+ * debugging aid: "show_stack(NULL);" prints the
+ * back trace for this cpu.
+ */
+ if (task==NULL)
+ sp = (unsigned long*)&sp;
+ else if(sp == NULL)
+ sp = (unsigned long*)task->thread.regs.ksp;
+
+ stack = sp;
+ printk("\n" KERN_CRIT "Stack Dump:\n");
+ printk(KERN_CRIT " " RFMT ": ", (unsigned long) stack);
+ for (i=0; i < kstack_depth_to_print; i++) {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
+ if (i && ((i & 0x03) == 0))
+ printk("\n" KERN_CRIT " " RFMT ": ",
+ (unsigned long) stack);
+ printk(RFMT " ", *stack--);
+ }
+ printk("\n" KERN_CRIT "\n");
+ show_trace(task, sp);
+}
- if (__kernel_text_address(info.ip)) {
- printk(" [<" RFMT ">] ", info.ip);
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *startstack;
+ unsigned long addr;
+ int i;
+
+ startstack = (unsigned long *)((unsigned long)stack & ~(THREAD_SIZE - 1));
+ i = 1;
+ stack = (long *)((long)(stack + 32) &~ (FRAME_SIZE-1)); /* Align */
+ printk("Kernel addresses on the stack:\n");
+ while (stack > startstack) {
+ stack -= 16; /* Stack frames are a multiple of 16 words */
+ addr = stack[16 - RP_OFFSET / sizeof(long)];
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+ printk(" [<" RFMT ">] ", addr);
#ifdef CONFIG_KALLSYMS
- print_symbol("%s\n", info.ip);
+ print_symbol("%s\n", addr);
#else
if ((i & 0x03) == 0)
printk("\n");
* understand what is happening here
*/
+/*
+ * J. David Anglin writes:
+ *
+ * "You have to adjust the current sp to that at the begining of the function.
+ * There can be up to two stack additions to allocate the frame in the
+ * prologue. Similar things happen in the epilogue. In the presence of
+ * interrupts, you have to be concerned about where you are in the function
+ * and what stack adjustments have taken place."
+ *
+ * For now these cases are not handled, but they should be!
+ */
+
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#define dbg(x...)
#endif
-extern struct unwind_table_entry __start___unwind[];
-extern struct unwind_table_entry __stop___unwind[];
+extern const struct unwind_table_entry __start___unwind[];
+extern const struct unwind_table_entry __stop___unwind[];
static spinlock_t unwind_lock;
/*
const struct unwind_table_entry *e = 0;
unsigned long lo, hi, mid;
+ addr -= table->base_addr;
+
for (lo = 0, hi = table->length; lo < hi; )
{
mid = (lo + hi) / 2;
static void
unwind_table_init(struct unwind_table *table, const char *name,
unsigned long base_addr, unsigned long gp,
- void *table_start, void *table_end)
+ const void *table_start, const void *table_end)
{
- struct unwind_table_entry *start = table_start;
- struct unwind_table_entry *end =
- (struct unwind_table_entry *)table_end - 1;
+ const struct unwind_table_entry *start = table_start;
+ const struct unwind_table_entry *end = table_end - 1;
table->name = name;
table->base_addr = base_addr;
table->start = base_addr + start->region_start;
table->end = base_addr + end->region_end;
table->table = (struct unwind_table_entry *)table_start;
- table->length = end - start + 1;
+ table->length = end - start;
table->next = NULL;
-
- for (; start <= end; start++) {
- start->region_start += base_addr;
- start->region_end += base_addr;
- }
}
void *
unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
- void *start, void *end)
+ const void *start, const void *end)
{
struct unwind_table *table;
unsigned long flags;
sp = info->prev_sp;
} while (info->prev_ip < (unsigned long)_stext ||
info->prev_ip > (unsigned long)_etext);
-
- dbg("analyzing func @ %lx with no unwind info, setting prev_sp=%lx prev_ip=%lx\n", info->ip, info->prev_sp, info->prev_ip);
} else {
dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, Save_RP = %d size = %u\n",
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
((insn & 0x3fff) >> 1);
- dbg("analyzing func @ %lx, insn=%08x @ %lx, frame_size = %ld\n", info->ip, insn, npc, frame_size);
} else if ((insn & 0xffe00008) == 0x7ec00008) {
/* std,ma X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
(((insn >> 4) & 0x3ff) << 3);
- dbg("analyzing func @ %lx, insn=%08x @ %lx, frame_size = %ld\n", info->ip, insn, npc, frame_size);
} else if (insn == 0x6bc23fd9) {
/* stw rp,-20(sp) */
rpoffset = 20;
looking_for_rp = 0;
- dbg("analyzing func @ %lx, insn=stw rp,-20(sp) @ %lx\n", info->ip, npc);
} else if (insn == 0x0fc212c1) {
/* std rp,-16(sr0,sp) */
rpoffset = 16;
looking_for_rp = 0;
- dbg("analyzing func @ %lx, insn=std rp,-16(sp) @ %lx\n", info->ip, npc);
}
}
info->prev_sp = info->sp - frame_size;
if (rpoffset)
- info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
- info->prev_ip = info->rp;
- info->rp = 0;
-
- dbg("analyzing func @ %lx, setting prev_sp=%lx prev_ip=%lx\n", info->ip, info->prev_sp, info->prev_ip);
+ info->prev_ip = *(unsigned long *)(info->prev_sp - rpoffset);
}
}
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
- unsigned long sp, unsigned long ip, unsigned long rp)
+ struct pt_regs *regs)
{
memset(info, 0, sizeof(struct unwind_frame_info));
info->t = t;
- info->sp = sp;
- info->ip = ip;
- info->rp = rp;
+ info->sp = regs->ksp;
+ info->ip = regs->kpc;
- dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", t ? (int)t->pid : 0, info->sp, info->ip);
+ dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", (int)t->pid, info->sp, info->ip);
}
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
{
struct pt_regs *regs = &t->thread.regs;
- unwind_frame_init(info, t, regs->ksp, regs->kpc, 0);
-}
-
-void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
-{
- unwind_frame_init(info, current, regs->gr[30], regs->iaoq[0],
- regs->gr[2]);
+ unwind_frame_init(info, t, regs);
}
int unwind_once(struct unwind_frame_info *next_frame)
#
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o
-
-lib-$(CONFIG_SMP) += debuglocks.o
unsigned long __xchg32(int x, int *ptr)
{
unsigned long flags;
- long temp;
+ unsigned long temp;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- temp = (long) *ptr; /* XXX - sign extension wanted? */
+ (long) temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
- return (unsigned long)temp;
+ return temp;
}
unsigned long __xchg8(char x, char *ptr)
{
unsigned long flags;
- long temp;
+ unsigned long temp;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- temp = (long) *ptr; /* XXX - sign extension wanted? */
+ (long) temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
- return (unsigned long)temp;
+ return temp;
}
+++ /dev/null
-/*
- * Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 2004 Thibaut VARENE <varenet@esiee.fr>
- *
- * Some code stollen from alpha & sparc64 ;)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-#include <asm/hardirq.h> /* in_interrupt() */
-
-#undef INIT_STUCK
-#define INIT_STUCK 1L << 30
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-
-void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
- volatile unsigned int *a;
- long stuck = INIT_STUCK;
- void *inline_pc = __builtin_return_address(0);
- unsigned long started = jiffies;
- int printed = 0;
- int cpu = smp_processor_id();
-
-try_again:
-
- /* Do the actual locking */
- /* <T-Bone> ggg: we can't get stuck on the outter loop?
- * <ggg> T-Bone: We can hit the outer loop
- * alot if multiple CPUs are constantly racing for a lock
- * and the backplane is NOT fair about which CPU sees
- * the update first. But it won't hang since every failed
- * attempt will drop us back into the inner loop and
- * decrement `stuck'.
- * <ggg> K-class and some of the others are NOT fair in the HW
- * implementation so we could see false positives.
- * But fixing the lock contention is easier than
- * fixing the HW to be fair.
- * <tausq> __ldcw() returns 1 if we get the lock; otherwise we
- * spin until the value of the lock changes, or we time out.
- */
- a = __ldcw_align(lock);
- while (stuck && (__ldcw(a) == 0))
- while ((*a == 0) && --stuck);
-
- if (unlikely(stuck <= 0)) {
- printk(KERN_WARNING
- "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)"
- " owned by %s:%d in %s at %p(%d)\n",
- base_file, line_no, lock->module, lock,
- current->comm, inline_pc, cpu,
- lock->bfile, lock->bline, lock->task->comm,
- lock->previous, lock->oncpu);
- stuck = INIT_STUCK;
- printed = 1;
- goto try_again;
- }
-
- /* Exiting. Got the lock. */
- lock->oncpu = cpu;
- lock->previous = inline_pc;
- lock->task = current;
- lock->bfile = (char *)base_file;
- lock->bline = line_no;
-
- if (unlikely(printed)) {
- printk(KERN_WARNING
- "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n",
- base_file, line_no, current->comm, inline_pc,
- cpu, jiffies - started);
- }
-}
-
-void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no)
-{
- CHECK_LOCK(lock);
- volatile unsigned int *a = __ldcw_align(lock);
- if (unlikely((*a != 0) && lock->babble)) {
- lock->babble--;
- printk(KERN_WARNING
- "%s:%d: spin_unlock(%s:%p) not locked\n",
- base_file, line_no, lock->module, lock);
- }
- *a = 1;
-}
-
-int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
- int ret;
- volatile unsigned int *a = __ldcw_align(lock);
- if ((ret = (__ldcw(a) != 0))) {
- lock->oncpu = smp_processor_id();
- lock->previous = __builtin_return_address(0);
- lock->task = current;
- } else {
- lock->bfile = (char *)base_file;
- lock->bline = line_no;
- }
- return ret;
-}
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#ifdef CONFIG_DEBUG_RWLOCK
-
-/* Interrupts trouble detailed explanation, thx Grant:
- *
- * o writer (wants to modify data) attempts to acquire the rwlock
- * o He gets the write lock.
- * o Interupts are still enabled, we take an interrupt with the
- * write still holding the lock.
- * o interrupt handler tries to acquire the rwlock for read.
- * o deadlock since the writer can't release it at this point.
- *
- * In general, any use of spinlocks that competes between "base"
- * level and interrupt level code will risk deadlock. Interrupts
- * need to be disabled in the base level routines to avoid it.
- * Or more precisely, only the IRQ the base level routine
- * is competing with for the lock. But it's more efficient/faster
- * to just disable all interrupts on that CPU to guarantee
- * once it gets the lock it can release it quickly too.
- */
-
-void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline)
-{
- void *inline_pc = __builtin_return_address(0);
- unsigned long started = jiffies;
- long stuck = INIT_STUCK;
- int printed = 0;
- int cpu = smp_processor_id();
-
- if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */
- printk(KERN_WARNING "write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
- BUG();
- }
-
- /* Note: if interrupts are disabled (which is most likely), the printk
- will never show on the console. We might need a polling method to flush
- the dmesg buffer anyhow. */
-
-retry:
- _raw_spin_lock(&rw->lock);
-
- if(rw->counter != 0) {
- /* this basically never happens */
- _raw_spin_unlock(&rw->lock);
-
- stuck--;
- if ((unlikely(stuck <= 0)) && (rw->counter < 0)) {
- printk(KERN_WARNING
- "%s:%d: write_lock stuck on writer"
- " in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
- stuck = INIT_STUCK;
- printed = 1;
- }
- else if (unlikely(stuck <= 0)) {
- printk(KERN_WARNING
- "%s:%d: write_lock stuck on reader"
- " in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
- stuck = INIT_STUCK;
- printed = 1;
- }
-
- while(rw->counter != 0);
-
- goto retry;
- }
-
- /* got it. now leave without unlocking */
- rw->counter = -1; /* remember we are locked */
-
- if (unlikely(printed)) {
- printk(KERN_WARNING
- "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
- }
-}
-
-void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline)
-{
-#if 0
- void *inline_pc = __builtin_return_address(0);
- unsigned long started = jiffies;
- int cpu = smp_processor_id();
-#endif
- unsigned long flags;
-
- local_irq_save(flags);
- _raw_spin_lock(&rw->lock);
-
- rw->counter++;
-#if 0
- printk(KERN_WARNING
- "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n",
- bfile, bline, current->comm, inline_pc,
- cpu, jiffies - started);
-#endif
- _raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
-}
-
-#endif /* CONFIG_DEBUG_RWLOCK */
goto bytecopy;
while (dest & 3) {
writeb(*(char *)src, dest++);
- src++;
+ ((char *)src)++;
count--;
}
while (count > 3) {
bytecopy:
while (count--) {
writeb(*(char *)src, dest++);
- src++;
+ ((char *)src)++;
}
}
/* Then check for misaligned start address */
if (src & 1) {
*(u8 *)dest = readb(src);
- src++;
- dest++;
+ ((u8 *)src)++;
+ ((u8 *)dest)++;
count--;
if (count < 2) goto bytecopy;
}
if (src & 2) {
*(u16 *)dest = __raw_readw(src);
- src += 2;
- dest += 2;
- count -= 2;
+ ((u16 *)src)++;
+ ((u16 *)dest)++;
+ count-=2;
}
while (count > 3) {
shortcopy:
while (count > 1) {
*(u16 *)dest = __raw_readw(src);
- src += 2;
- dest += 2;
- count -= 2;
+ ((u16 *)src)++;
+ ((u16 *)dest)++;
+ count-=2;
}
bytecopy:
while (count--) {
*(char *)dest = readb(src);
- src++;
- dest++;
+ ((char *)src)++;
+ ((char *)dest)++;
}
}
*/
void insb (unsigned long port, void *dst, unsigned long count)
{
- unsigned char *p;
-
- p = (unsigned char *)dst;
-
- while (((unsigned long)p) & 0x3) {
+ while (((unsigned long)dst) & 0x3) {
if (!count)
return;
count--;
- *p = inb(port);
- p++;
+ *(unsigned char *) dst = inb(port);
+ ((unsigned char *) dst)++;
}
while (count >= 4) {
w |= inb(port) << 16;
w |= inb(port) << 8;
w |= inb(port);
- *(unsigned int *) p = w;
- p += 4;
+ *(unsigned int *) dst = w;
+ ((unsigned int *) dst)++;
}
while (count) {
--count;
- *p = inb(port);
- p++;
+ *(unsigned char *) dst = inb(port);
+ ((unsigned char *) dst)++;
}
}
void insw (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
- unsigned char *p;
-
- p = (unsigned char *)dst;
if (!count)
return;
- switch (((unsigned long)p) & 0x3)
+ switch (((unsigned long) dst) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
- *(unsigned int *)p = l;
- p += 4;
+ *(unsigned int *) dst = l;
+ ((unsigned int *) dst)++;
}
if (count) {
- *(unsigned short *)p = cpu_to_le16(inw(port));
+ *(unsigned short *) dst = cpu_to_le16(inw(port));
}
break;
case 0x02: /* Buffer 16-bit aligned */
- *(unsigned short *)p = cpu_to_le16(inw(port));
- p += 2;
+ *(unsigned short *) dst = cpu_to_le16(inw(port));
+ ((unsigned short *) dst)++;
count--;
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
- *(unsigned int *)p = l;
- p += 4;
+ *(unsigned int *) dst = l;
+ ((unsigned int *) dst)++;
}
if (count) {
- *(unsigned short *)p = cpu_to_le16(inw(port));
+ *(unsigned short *) dst = cpu_to_le16(inw(port));
}
break;
--count;
l = cpu_to_le16(inw(port));
- *p = l >> 8;
- p++;
+ *(unsigned char *) dst = l >> 8;
+ ((unsigned char *) dst)++;
while (count--)
{
l2 = cpu_to_le16(inw(port));
- *(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
- p += 2;
+ *(unsigned short *) dst = (l & 0xff) << 8 | (l2 >> 8);
+ ((unsigned short *) dst)++;
l = l2;
}
- *p = l & 0xff;
+ *(unsigned char *) dst = l & 0xff;
break;
}
}
void insl (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
- unsigned char *p;
-
- p = (unsigned char *)dst;
if (!count)
return;
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
- *(unsigned int *)p = cpu_to_le32(inl(port));
- p += 4;
+ *(unsigned int *) dst = cpu_to_le32(inl(port));
+ ((unsigned int *) dst)++;
}
break;
--count;
l = cpu_to_le32(inl(port));
- *(unsigned short *)p = l >> 16;
- p += 2;
+ *(unsigned short *) dst = l >> 16;
+ ((unsigned short *) dst)++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
- *(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
- p += 4;
+ *(unsigned int *) dst = (l & 0xffff) << 16 | (l2 >> 16);
+ ((unsigned int *) dst)++;
l = l2;
}
- *(unsigned short *)p = l & 0xffff;
+ *(unsigned short *) dst = l & 0xffff;
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
- *(unsigned char *)p = l >> 24;
- p++;
- *(unsigned short *)p = (l >> 8) & 0xffff;
- p += 2;
+ *(unsigned char *) dst = l >> 24;
+ ((unsigned char *) dst)++;
+ *(unsigned short *) dst = (l >> 8) & 0xffff;
+ ((unsigned short *) dst)++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
- *(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
- p += 4;
+ *(unsigned int *) dst = (l & 0xff) << 24 | (l2 >> 8);
+ ((unsigned int *) dst)++;
l = l2;
}
- *p = l & 0xff;
+ *(unsigned char *) dst = l & 0xff;
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
- *p = l >> 24;
- p++;
+ *(unsigned char *) dst = l >> 24;
+ ((unsigned char *) dst)++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
- *(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
- p += 4;
+ *(unsigned int *) dst = (l & 0xffffff) << 8 | l2 >> 24;
+ ((unsigned int *) dst)++;
l = l2;
}
- *(unsigned short *)p = (l >> 8) & 0xffff;
- p += 2;
- *p = l & 0xff;
+ *(unsigned short *) dst = (l >> 8) & 0xffff;
+ ((unsigned short *) dst)++;
+ *(unsigned char *) dst = l & 0xff;
break;
}
}
*/
void outsb(unsigned long port, const void * src, unsigned long count)
{
- const unsigned char *p;
-
- p = (const unsigned char *)src;
while (count) {
count--;
- outb(*p, port);
- p++;
+ outb(*(char *)src, port);
+ ((char *) src)++;
}
}
void outsw (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
- const unsigned char *p;
-
- p = (const unsigned char *)src;
if (!count)
return;
- switch (((unsigned long)p) & 0x3)
+ switch (((unsigned long) src) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
- l = *(unsigned int *)p;
- p += 4;
+ l = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
- outw(le16_to_cpu(*(unsigned short*)p), port);
+ outw(le16_to_cpu(*(unsigned short*)src), port);
}
break;
case 0x02: /* Buffer 16-bit aligned */
- outw(le16_to_cpu(*(unsigned short*)p), port);
- p += 2;
+ outw(le16_to_cpu(*(unsigned short*)src), port);
+ ((unsigned short *) src)++;
count--;
while (count>=2) {
count -= 2;
- l = *(unsigned int *)p;
- p += 4;
+ l = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
- outw(le16_to_cpu(*(unsigned short *)p), port);
+ outw(le16_to_cpu(*(unsigned short*)src), port);
}
break;
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
- l = *p << 8;
- p++;
+ l = *(unsigned char *) src << 8;
+ ((unsigned char *) src)++;
count--;
while (count)
{
count--;
- l2 = *(unsigned short *)p;
- p += 2;
+ l2 = *(unsigned short *) src;
+ ((unsigned short *) src)++;
outw(le16_to_cpu(l | l2 >> 8), port);
l = l2 << 8;
}
- l2 = *(unsigned char *)p;
+ l2 = *(unsigned char *) src;
outw (le16_to_cpu(l | l2>>8), port);
break;
void outsl (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
- const unsigned char *p;
-
- p = (const unsigned char *)src;
if (!count)
return;
- switch (((unsigned long)p) & 0x3)
+ switch (((unsigned long) src) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
- outl(le32_to_cpu(*(unsigned int *)p), port);
- p += 4;
+ outl(le32_to_cpu(*(unsigned int *) src), port);
+ ((unsigned int *) src)++;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
- l = *(unsigned short *)p;
- p += 2;
+ l = *(unsigned short *) src;
+ ((unsigned short *) src)++;
while (count--)
{
- l2 = *(unsigned int *)p;
- p += 4;
+ l2 = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outl (le32_to_cpu(l << 16 | l2 >> 16), port);
l = l2;
}
- l2 = *(unsigned short *)p;
+ l2 = *(unsigned short *) src;
outl (le32_to_cpu(l << 16 | l2), port);
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
-
- l = *p << 24;
- p++;
- l |= *(unsigned short *)p << 8;
- p += 2;
-
+
+ l = *(unsigned char *) src << 24;
+ ((unsigned char *) src)++;
+ l |= *(unsigned short *) src << 8;
+ ((unsigned short *) src)++;
while (count--)
{
- l2 = *(unsigned int *)p;
- p += 4;
+ l2 = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outl (le32_to_cpu(l | l2 >> 24), port);
l = l2 << 8;
}
- l2 = *p;
- outl (le32_to_cpu(l | l2), port);
+ l2 = *(unsigned char *) src;
+ outl (le32_to_cpu(l | l2), port);
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
- l = *p << 24;
- p++;
-
+ l = *(unsigned char *) src << 24;
+ ((unsigned char *) src)++;
while (count--)
{
- l2 = *(unsigned int *)p;
- p += 4;
+ l2 = *(unsigned int *) src;
+ ((unsigned int *) src)++;
outl (le32_to_cpu(l | l2 >> 8), port);
l = l2 << 24;
}
- l2 = *(unsigned short *)p << 16;
- p += 2;
- l2 |= *p;
+ l2 = *(unsigned short *) src << 16;
+ ((unsigned short *) src)++;
+ l2 |= *(unsigned char *) src;
outl (le32_to_cpu(l | l2), port);
break;
}
* Copyright 1999 SuSE GmbH
* changed by Philipp Rumpf
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
- * Copyright 2004 Randolph Chung (tausq@debian.org)
*
*/
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
-#include <asm/mmzone.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern char __init_begin, __init_end;
#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES];
-bootmem_data_t bmem_data[MAX_NUMNODES];
-unsigned char pfnnid_map[PFNNID_MAP_MAX];
+struct node_map_data node_data[MAX_PHYSMEM_RANGES];
+bootmem_data_t bmem_data[MAX_PHYSMEM_RANGES];
+unsigned char *chunkmap;
+unsigned int maxchunkmap;
#endif
static struct resource data_resource = {
disable_sr_hashing(); /* Turn off space register hashing */
+#ifdef CONFIG_DISCONTIGMEM
+ /*
+ * The below is still true as of 2.4.2. If this is ever fixed,
+ * we can remove this warning!
+ */
+
+ printk(KERN_WARNING "\n\n");
+ printk(KERN_WARNING "CONFIG_DISCONTIGMEM is enabled, which is probably a mistake. This\n");
+ printk(KERN_WARNING "option can lead to heavy swapping, even when there are gigabytes\n");
+ printk(KERN_WARNING "of free memory.\n\n");
+#endif
+
+#ifdef __LP64__
+
+#ifndef CONFIG_DISCONTIGMEM
/*
* Sort the ranges. Since the number of ranges is typically
* small, and performance is not an issue here, just do
}
}
-#ifndef CONFIG_DISCONTIGMEM
/*
* Throw out ranges that are too far apart (controlled by
- * MAX_GAP).
+ * MAX_GAP). If CONFIG_DISCONTIGMEM wasn't implemented so
+ * poorly, we would recommend enabling that option, but,
+ * until it is fixed, this is the best way to go.
*/
for (i = 1; i < npmem_ranges; i++) {
(pmem_ranges[i-1].start_pfn +
pmem_ranges[i-1].pages) > MAX_GAP) {
npmem_ranges = i;
- printk("Large gap in memory detected (%ld pages). "
- "Consider turning on CONFIG_DISCONTIGMEM\n",
- pmem_ranges[i].start_pfn -
- (pmem_ranges[i-1].start_pfn +
- pmem_ranges[i-1].pages));
break;
}
}
}
}
+#endif /* __LP64__ */
+
sysram_resource_count = npmem_ranges;
for (i = 0; i < sysram_resource_count; i++) {
struct resource *res = &sysram_resources[i];
mem_limit_func(); /* check for "mem=" argument */
mem_max = 0;
- num_physpages = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long rsize;
npmem_ranges = i + 1;
mem_max = mem_limit;
}
- num_physpages += pmem_ranges[i].pages;
break;
}
- num_physpages += pmem_ranges[i].pages;
mem_max += rsize;
}
printk(KERN_INFO "Total Memory: %ld Mb\n",mem_max >> 20);
#ifndef CONFIG_DISCONTIGMEM
+
/* Merge the ranges, keeping track of the holes */
{
bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
#ifdef CONFIG_DISCONTIGMEM
- for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
- memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bmem_data[i];
- }
- memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
-
- numnodes = npmem_ranges;
-
for (i = 0; i < npmem_ranges; i++)
- node_set_online(i);
+ node_data[i].pg_data.bdata = &bmem_data[i];
#endif
-
/*
* Initialize and free the full range of memory in each range.
* Note that the only writing these routines do are to the bootmap,
void __init mem_init(void)
{
- high_memory = __va((max_pfn << PAGE_SHIFT));
+ int i;
-#ifndef CONFIG_DISCONTIGMEM
- max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
- mem_map = zone_table[ZONE_DMA]->zone_mem_map;
- totalram_pages += free_all_bootmem();
-#else
- {
- int i;
+ high_memory = __va((max_pfn << PAGE_SHIFT));
+ max_mapnr = (virt_to_page(high_memory - 1) - mem_map) + 1;
- for (i = 0; i < npmem_ranges; i++)
- totalram_pages += free_all_bootmem_node(NODE_DATA(i));
- }
-#endif
+ num_physpages = 0;
+ mem_map = zone_table[0]->zone_mem_map;
+ for (i = 0; i < npmem_ranges; i++)
+ num_physpages += free_all_bootmem_node(NODE_DATA(i));
+ totalram_pages = num_physpages;
printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
-#ifndef CONFIG_DISCONTIGMEM
i = max_mapnr;
while (i-- > 0) {
total++;
reserved++;
else if (PageSwapCache(mem_map+i))
cached++;
- else if (!page_count(&mem_map[i]))
+ else if (!atomic_read(&mem_map[i].count))
free++;
else
- shared += page_count(&mem_map[i]) - 1;
- }
-#else
- for (i = 0; i < npmem_ranges; i++) {
- int j;
-
- for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
- struct page *p;
-
- p = node_mem_map(i) + j - node_start_pfn(i);
-
- total++;
- if (PageReserved(p))
- reserved++;
- else if (PageSwapCache(p))
- cached++;
- else if (!page_count(p))
- free++;
- else
- shared += page_count(p) - 1;
- }
+ shared += atomic_read(&mem_map[i].count) - 1;
}
-#endif
printk(KERN_INFO "%d pages of RAM\n", total);
printk(KERN_INFO "%d reserved pages\n", reserved);
printk(KERN_INFO "%d pages shared\n", shared);
printk(KERN_INFO "%d pages swap cached\n", cached);
-
-
-#ifdef CONFIG_DISCONTIGMEM
- {
- struct zonelist *zl;
- int i, j, k;
-
- for (i = 0; i < npmem_ranges; i++) {
- for (j = 0; j < MAX_NR_ZONES; j++) {
- zl = NODE_DATA(i)->node_zonelists + j;
-
- printk("Zone list for zone %d on node %d: ", j, i);
- for (k = 0; zl->zones[k] != NULL; k++)
- printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
- printk("\n");
- }
- }
- }
-#endif
}
#if PTRS_PER_PMD == 1
pmd = (pmd_t *)__pa(pg_dir);
#else
- pmd = (pmd_t *)pgd_address(*pg_dir);
+ pmd = (pmd_t *) (PAGE_MASK & pgd_val(*pg_dir));
/*
* pmd is physical at this point
pmd = (pmd_t *) __pa(pmd);
}
- pgd_populate(NULL, pg_dir, __va(pmd));
+ pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pmd;
#endif
pg_dir++;
* pg_table is physical at this point
*/
- pg_table = (pte_t *)pmd_address(*pmd);
+ pg_table = (pte_t *) (PAGE_MASK & pmd_val(*pmd));
if (!pg_table) {
pg_table = (pte_t *)
alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
pg_table = (pte_t *) __pa(pg_table);
}
- pmd_populate_kernel(NULL, pmd, __va(pg_table));
+ pmd_val(*pmd) = _PAGE_TABLE |
+ (unsigned long) pg_table;
/* now change pg_table to kernel virtual addresses */
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
pte_t pte;
+#if !defined(CONFIG_STI_CONSOLE)
+#warning STI console should explicitly allocate executable pages but does not
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
&& address != gw_addr)
pte = __mk_pte(address, PAGE_KERNEL_RO);
else
+#endif
pte = __mk_pte(address, pgprot);
if (address >= end_paddr)
flush_tlb_all_local();
for (i = 0; i < npmem_ranges; i++) {
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0, };
- /* We have an IOMMU, so all memory can go into a single
- ZONE_DMA zone. */
zones_size[ZONE_DMA] = pmem_ranges[i].pages;
-
free_area_init_node(i,NODE_DATA(i),NULL,zones_size,
- pmem_ranges[i].start_pfn, 0);
+ (pmem_ranges[i].start_pfn << PAGE_SHIFT),0);
+ }
#ifdef CONFIG_DISCONTIGMEM
+ /*
+ * Initialize support for virt_to_page() macro.
+ *
+ * Note that MAX_ADDRESS is the largest virtual address that
+ * we can map. However, since we map all physical memory into
+ * the kernel address space, it also has an effect on the maximum
+ * physical address we can map (MAX_ADDRESS - PAGE_OFFSET).
+ */
+
+ maxchunkmap = MAX_ADDRESS >> CHUNKSHIFT;
+ chunkmap = (unsigned char *)alloc_bootmem(maxchunkmap);
+
+ for (i = 0; i < maxchunkmap; i++)
+ chunkmap[i] = BADCHUNK;
+
+ for (i = 0; i < npmem_ranges; i++) {
+
+ ADJ_NODE_MEM_MAP(i) = NODE_MEM_MAP(i) - pmem_ranges[i].start_pfn;
{
- int j;
- for (j = (node_start_pfn(i) >> PFNNID_SHIFT);
- j <= (node_end_pfn(i) >> PFNNID_SHIFT);
- j++) {
- pfnnid_map[j] = i;
- }
+ unsigned long chunk_paddr;
+ unsigned long end_paddr;
+ int chunknum;
+
+ chunk_paddr = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ end_paddr = chunk_paddr + (pmem_ranges[i].pages << PAGE_SHIFT);
+ chunk_paddr &= CHUNKMASK;
+
+ chunknum = (int)CHUNKNUM(chunk_paddr);
+ while (chunk_paddr < end_paddr) {
+ if (chunknum >= maxchunkmap)
+ goto badchunkmap1;
+ if (chunkmap[chunknum] != BADCHUNK)
+ goto badchunkmap2;
+ chunkmap[chunknum] = (unsigned char)i;
+ chunk_paddr += CHUNKSZ;
+ chunknum++;
+ }
}
-#endif
}
+
+ return;
+
+badchunkmap1:
+ panic("paging_init: Physical address exceeds maximum address space!\n");
+badchunkmap2:
+ panic("paging_init: Collision in chunk map array. CHUNKSZ needs to be smaller\n");
+#endif
}
#ifdef CONFIG_PA20
struct net_device *dev;
struct scc_enet_private *cep;
int i, j, err;
- uint dp_offset;
+ void * dpaddr;
unsigned char *eap;
unsigned long mem_addr;
bd_t *bd;
* These are relative offsets in the DP ram address space.
* Initialize base addresses for the buffer descriptors.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
- ep->sen_genscc.scc_rbase = dp_offset;
- cep->rx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
+ dpaddr = cpm2_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
+ ep->sen_genscc.scc_rbase = cpm2_dpram_offset(dpaddr);
+ cep->rx_bd_base = (cbd_t *)dpaddr;
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
- ep->sen_genscc.scc_tbase = dp_offset;
- cep->tx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
+ dpaddr = cpm2_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
+ ep->sen_genscc.scc_tbase = cpm2_dpram_offset(dpaddr);
+ cep->tx_bd_base = (cbd_t *)dpaddr;
cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
cep->cur_rx = cep->rx_bd_base;
#define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
void
-cpm_setbrg(uint brg, uint rate)
+m8xx_cpm_setbrg(uint brg, uint rate)
{
volatile uint *bp;
* with the processor and the microcode patches applied / activated.
* But the following should be at least safe.
*/
- rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
+ rh_attach_region(&cpm_dpmem_info, cp->cp_dpmem + CPM_DATAONLY_BASE,
+ CPM_DATAONLY_SIZE);
}
/*
* Now it returns the actuall physical address of that area.
* use m8xx_cpm_dpram_offset() to get the index
*/
-uint cpm_dpalloc(uint size, uint align)
+void *m8xx_cpm_dpalloc(int size)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- cpm_dpmem_info.alignment = align;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc);
+EXPORT_SYMBOL(m8xx_cpm_dpalloc);
-int cpm_dpfree(uint offset)
+int m8xx_cpm_dpfree(void *addr)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- ret = rh_free(&cpm_dpmem_info, (void *)offset);
+ ret = rh_free(&cpm_dpmem_info, addr);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
-EXPORT_SYMBOL(cpm_dpfree);
+EXPORT_SYMBOL(m8xx_cpm_dpfree);
-uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
+void *m8xx_cpm_dpalloc_fixed(void *addr, int size)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- cpm_dpmem_info.alignment = align;
- start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
+ start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc_fixed);
+EXPORT_SYMBOL(m8xx_cpm_dpalloc_fixed);
-void cpm_dpdump(void)
+void m8xx_cpm_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
-EXPORT_SYMBOL(cpm_dpdump);
+EXPORT_SYMBOL(m8xx_cpm_dpdump);
-void *cpm_dpram_addr(uint offset)
+int m8xx_cpm_dpram_offset(void *addr)
+{
+ return (u_char *)addr - ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem;
+}
+EXPORT_SYMBOL(m8xx_cpm_dpram_offset);
+
+void *m8xx_cpm_dpram_addr(int offset)
{
return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset;
}
-EXPORT_SYMBOL(cpm_dpram_addr);
+EXPORT_SYMBOL(m8xx_cpm_dpram_addr);
static int mixer_open(struct inode *inode, struct file *file)
{
mixer.busy = 1;
- return nonseekable_open(inode, file);
+ return 0;
}
sound_set_format(AFMT_MU_LAW);
}
- return nonseekable_open(inode, file);
+ return 0;
err_out_nobusy:
if (file->f_mode & FMODE_WRITE) {
len += sprintf(buffer+len, "\tsq.active = %d sq.syncing = %d\n",
sq.active, sq.syncing);
state.len = len;
- return nonseekable_open(inode, file);
+ return 0;
}
int __init tdm8xx_sound_init(void)
{
int i, has_sound;
- uint dp_offset;
+ uint dp_addr, dp_mem;
volatile uint *sirp;
volatile cbd_t *bdp;
volatile cpm8xx_t *cp;
/* We need to allocate a transmit and receive buffer
* descriptors from dual port ram.
*/
- dp_addr = cpm_dpalloc(sizeof(cbd_t) * numReadBufs, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * numReadBufs);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
- up->smc_rbase = dp_offset;
+ up->smc_rbase = dp_mem;
rx_cur = rx_base = (cbd_t *)bdp;
for (i=0; i<(numReadBufs-1); i++) {
/* Now, do the same for the transmit buffers.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * numBufs, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * numBufs);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
- up->smc_tbase = dp_offset;
+ up->smc_tbase = dp_mem;
tx_cur = tx_base = (cbd_t *)bdp;
for (i=0; i<(numBufs-1); i++) {
struct net_device *dev;
struct scc_enet_private *cep;
int i, j, k, err;
- uint dp_offset;
+ void *dp_mem;
+ unsigned int dp_addr;
unsigned char *eap, *ba;
dma_addr_t mem_addr;
bd_t *bd;
* These are relative offsets in the DP ram address space.
* Initialize base addresses for the buffer descriptors.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
- ep->sen_genscc.scc_rbase = dp_offset;
- cep->rx_bd_base = cpm_dpram_addr(dp_offset);
-
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
- ep->sen_genscc.scc_tbase = dp_offset;
- cep->tx_bd_base = cpm_dpram_addr(dp_offset);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ ep->sen_genscc.scc_rbase = dp_mem;
+ cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[dp_addr];
+
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ ep->sen_genscc.scc_tbase = dp_mem;
+ cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[dp_addr];
cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
cep->cur_rx = cep->rx_bd_base;
{
struct serial_state * state;
ser_info_t *info;
- uint mem_addr, iobits, dp_offset;
+ uint mem_addr, dp_addr, dp_mem, iobits;
int i, j, idx;
ushort chan;
volatile cbd_t *bdp;
* descriptors from dual port ram, and a character
* buffer area from host mem.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_NUM_FIFO, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_NUM_FIFO);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Allocate space for FIFOs in the host memory.
*/
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
info->rx_cur = info->rx_bd_base = (cbd_t *)bdp;
for (j=0; j<(RX_NUM_FIFO-1); j++) {
if (info->state->smc_scc_num & NUM_IS_SCC) {
scp = &cp->cp_scc[idx];
sup = (scc_uart_t *)&cp->cp_dparam[state->port];
- sup->scc_genscc.scc_rbase = dp_offset;
+ sup->scc_genscc.scc_rbase = dp_mem;
}
else {
sp = &cp->cp_smc[idx];
up = (smc_uart_t *)&cp->cp_dparam[state->port];
- up->smc_rbase = dp_offset;
+ up->smc_rbase = dp_mem;
}
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_NUM_FIFO, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_NUM_FIFO);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Allocate space for FIFOs in the host memory.
*/
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
info->tx_cur = info->tx_bd_base = (cbd_t *)bdp;
for (j=0; j<(TX_NUM_FIFO-1); j++) {
bdp->cbd_sc = (BD_SC_WRAP | BD_SC_INTRPT);
if (info->state->smc_scc_num & NUM_IS_SCC) {
- sup->scc_genscc.scc_tbase = dp_offset;
+ sup->scc_genscc.scc_tbase = dp_mem;
/* Set up the uart parameters in the
* parameter ram.
cp->cp_simode &= ~(0xffff << (idx * 16));
cp->cp_simode |= (i << ((idx * 16) + 12));
- up->smc_tbase = dp_offset;
+ up->smc_tbase = dp_mem;
/* Set up the uart parameters in the
* parameter ram.
static int __init serial_console_setup(struct console *co, char *options)
{
struct serial_state *ser;
- uint mem_addr, bidx, idx, dp_offset;
+ uint mem_addr, dp_addr, dp_mem, bidx, idx;
ushort chan;
volatile cbd_t *bdp;
volatile cpm8xx_t *cp;
* memory yet because vm allocator isn't initialized
* during this early console init.
*/
- dp_offset = cpm_dpalloc(8, 8);
- mem_addr = (uint)(&cpmp->cp_dpmem[dp_offset]);
+ dp_mem = m8xx_cpm_dpalloc(8);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ mem_addr = (uint)(&cpmp->cp_dpmem[dp_addr]);
/* Allocate space for two buffer descriptors in the DP ram.
*/
- dp_offset = cpm_dpalloc(sizeof(cbd_t) * 2, 8);
+ dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * 2);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
/* Set the physical address of the host memory buffers in
* the buffer descriptors.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
bdp->cbd_bufaddr = iopa(mem_addr);
(bdp+1)->cbd_bufaddr = iopa(mem_addr+4);
*/
if (ser->smc_scc_num & NUM_IS_SCC) {
- sup->scc_genscc.scc_rbase = dp_offset;
- sup->scc_genscc.scc_tbase = dp_offset + sizeof(cbd_t);
+ sup->scc_genscc.scc_rbase = dp_mem;
+ sup->scc_genscc.scc_tbase = dp_mem + sizeof(cbd_t);
/* Set up the uart parameters in the
* parameter ram.
}
else {
- up->smc_rbase = dp_offset; /* Base of receive buffer desc. */
- up->smc_tbase = dp_offset+sizeof(cbd_t); /* Base of xmt buffer desc. */
+ up->smc_rbase = dp_mem; /* Base of receive buffer desc. */
+ up->smc_tbase = dp_mem+sizeof(cbd_t); /* Base of xmt buffer desc. */
up->smc_rfcr = SMC_EB;
up->smc_tfcr = SMC_EB;
default 6xx
config 6xx
- bool "6xx/7xx/74xx/52xx/8260"
+ bool "6xx/7xx/74xx/8260"
help
There are four types of PowerPC chips supported. The more common
types (601, 603, 604, 740, 750, 7400), the Motorola embedded
- versions (821, 823, 850, 855, 860, 52xx, 8260), the IBM embedded
- versions (403 and 405) and the high end 64 bit Power processors
- (POWER 3, POWER4, and IBM 970 also known as G5)
+ versions (821, 823, 850, 855, 860, 8260), the IBM embedded versions
+ (403 and 405) and the high end 64 bit Power processors (POWER 3,
+ POWER4, and IBM 970 also known as G5)
Unless you are building a kernel for one of the embedded processor
systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
Note that the kernel runs in 32-bit mode even on 64-bit chips.
- Also note that because the 52xx & 82xx family has a 603e core,
- specific support for that chipset is asked later on.
+ Also note that because the 82xx family has a 603e core, specific
+ support for that chipset is asked later on.
config 40x
bool "40x"
fly. This is a nice method to save battery power on notebooks,
because the lower the clock speed, the less power the CPU consumes.
- For more information, take a look at <file:Documentation/cpu-freq> or
+ For more information, take a look at linux/Documentation/cpu-freq or
at <http://www.brodo.de/cpufreq/>
If in doubt, say N.
More information is available at:
<http://linux-apus.sourceforge.net/>.
+config KATANA
+ bool "Artesyn-Katana"
+
+config DMV182
+ bool "Dy-4 SVME/DMV-182"
+
config WILLOW
bool "Cogent-Willow"
bool "Force-PowerPMC250"
config EV64260
- bool "Galileo-EV-64260-BP"
+ bool "Marvell-EV64260BP"
+ help
+ Select EV64260 if configuring of a Marvell (formerly Galileo)
+ EV64260BP Evaluation platofm.
config SPRUCE
bool "IBM-Spruce"
config SBS8260
bool "SBS8260"
-config RPX8260
+config RPX6
bool "RPXSUPER"
config TQM8260
config ADS8272
bool "ADS8272"
-config LITE5200
- bool "Freescale LITE5200 / (IceCube)"
- select PPC_MPC52xx
- help
- Support for the LITE5200 dev board for the MPC5200 from Freescale.
- This is for the LITE5200 version 2.0 board. Don't know if it changes
- much but it's only been tested on this board version. I think this
- board is also known as IceCube.
-
endchoice
config PQ2ADS
bool
depends on 8xx || 8260
default y
-
-config PPC_MPC52xx
- bool
config 8260
bool "CPM2 Support" if WILLOW
depends on 6xx
- default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx
+ default y if TQM8260 || RPXSUPER || EST8260 || SBS8260 || SBC82xx
help
The MPC8260 is a typical embedded CPU made by Motorola. Selecting
this option means that you wish to build a kernel for a machine with
config CPM2
bool
- depends on 8260 || MPC8560
+ depends on 8260
default y
help
The CPM2 (Communications Processor Module) is a coprocessor on
depends on PPC_PMAC || PPC_CHRP
default y
+menu "Set bridge options"
+ depends on MV64X60
+
+config MV64X60_BASE
+ hex "Set bridge base used by firmware"
+ default "0xf1000000"
+ help
+ A firmware can leave the base address of the bridge's registers at
+ a non-standard location. If so, set this value to reflect the
+ address of that non-standard location.
+
+config MV64X60_NEW_BASE
+ hex "Set bridge base used by kernel"
+ default "0xf1000000"
+ help
+ If the current base address of the bridge's registers is not where
+ you want it, set this value to the address that you want it moved to.
+
+endmenu
+
config PPC_GEN550
bool
- depends on SANDPOINT || MCPN765 || SPRUCE || PPLUS || PCORE || PRPMC750 || K2 || PRPMC800
+ depends on SANDPOINT || MCPN765 || SPRUCE || PPLUS || PCORE || PRPMC750 || K2 || PRPMC800 || (EV64260 && !MV64X60_MPSC) || DMV182
default y
config FORCE
depends on EV64260
default y
+config MV64360
+ bool
+ depends on KATANA || DMV182
+ default y
+
+config MV64X60
+ bool
+ depends on (GT64260 || MV64360)
+ default y
+
config NONMONARCH_SUPPORT
bool "Enable Non-Monarch Support"
depends on PRPMC800
config FSL_OCP
bool
- depends on MPC10X_BRIDGE || PPC_MPC52xx
+ depends on MPC10X_BRIDGE
default y
config MPC10X_OPENPIC
depends on 8xx
default y
-config SERIAL_CONSOLE_BAUD
- int
- depends on EV64260
- default "115200"
-
config PPCBUG_NVRAM
bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
default y if PPC_PREP
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
+config PPC_RTAS
+ bool "Support for RTAS (RunTime Abstraction Services) in /proc"
+ depends on PPC_OF && PROC_FS
+ ---help---
+ When you use this option, you will be able to use RTAS from
+ userspace.
+
+ RTAS stands for RunTime Abstraction Services and should
+ provide a portable way to access and set system information. This is
+ commonly used on RS/6000 (pSeries) computers.
+
+ You can access RTAS via the special proc file system entry rtas.
+ Don't confuse this rtas entry with the one in /proc/device-tree/rtas
+ which is readonly.
+
+ If you don't know if you can use RTAS look into
+ /proc/device-tree/rtas. If there are some entries, it is very likely
+ that you will be able to use RTAS.
+
+ You can do cool things with rtas. To print out information about
+ various sensors in the system, just do a
+
+ $ cat /proc/rtas/sensors
+
+ or if you power off your machine at night but want it running when
+ you enter your office at 7:45 am, do a
+
+ # date -d 'tomorrow 7:30' +%s > /proc/rtas/poweron
+
+ and shutdown.
+
+ If unsure, say Y.
+
config PREP_RESIDUAL
bool "Support for PReP Residual Data"
depends on PPC_PREP
config KGDB
bool "Include kgdb kernel debugger"
- depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx)
+ depends on DEBUG_KERNEL
select DEBUG_INFO
help
Include in-kernel hooks for kgdb, the Linux kernel source level
config SERIAL_TEXT_DEBUG
bool "Support for early boot texts over serial port"
- depends on 4xx || GT64260 || LOPEC || PPLUS || PRPMC800 || PPC_GEN550 || PPC_MPC52xx
+ depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || PPC_GEN550
config PPC_OCP
bool
- depends on IBM_OCP || FSL_OCP
+ depends on IBM_OCP || FSL_OCP || MV64X60
default y
endmenu
LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic
CPPFLAGS += -Iarch/$(ARCH)
AFLAGS += -Iarch/$(ARCH)
-CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \
+cflags-y += -Iarch/$(ARCH) -msoft-float -pipe \
-ffixed-r2 -Wno-uninitialized -mmultiple
CPP = $(CC) -E $(CFLAGS)
-CHECK := $(CHECK) -D__powerpc__=1
-
ifndef CONFIG_E500
-CFLAGS += -mstring
+cflags-y += -mstring
endif
-cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
-cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
-cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
-cpu-as-$(CONFIG_E500) += -Wa,-me500
+cflags-$(CONFIG_4xx) += -Wa,-m405
+cflags-$(CONFIG_E500) += -Wa,-me500
+cflags-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
-AFLAGS += $(cpu-as-y)
-CFLAGS += $(cpu-as-y)
+CFLAGS += $(cflags-y)
head-y := arch/ppc/kernel/head.o
head-$(CONFIG_8xx) := arch/ppc/kernel/head_8xx.o
else
NEW_AS := 0
endif
-# gcc-3.4 and binutils-2.14 are a fatal combination.
-GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
-BAD_GCC_AS := $(shell echo mftb 5 | $(AS) -mppc -many -o /dev/null >/dev/null 2>&1 && echo 0 || echo 1)
-checkbin:
-ifeq ($(GCC_VERSION)$(BAD_GCC_AS),03041)
- @echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
- @echo 'correctly with gcc-3.4 and your version of binutils.'
- @echo '*** Please upgrade your binutils or downgrade your gcc'
- @false
-endif
ifneq ($(NEW_AS),0)
+checkbin:
@echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
@echo 'correctly with old versions of binutils.'
@echo '*** Please upgrade your binutils to ${GOODVER} or newer'
@false
-endif
+else
+checkbin:
@true
+endif
CLEAN_FILES += include/asm-$(ARCH)/offsets.h \
arch/$(ARCH)/kernel/asm-offsets.s
void _vprintk(void(*putc)(const char), const char *fmt0, va_list ap);
unsigned char *ISA_io = NULL;
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
+ defined(CONFIG_SERIAL_MPSC_CONSOLE)
extern unsigned long com_port;
extern int serial_tstc(unsigned long com_port);
int tstc(void)
{
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
+ defined(CONFIG_SERIAL_MPSC_CONSOLE)
if(keyb_present)
return (CRT_tstc() || serial_tstc(com_port));
else
int getc(void)
{
while (1) {
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
+ defined(CONFIG_SERIAL_MPSC_CONSOLE)
if (serial_tstc(com_port))
return (serial_getc(com_port));
#endif /* serial console */
{
int x,y;
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
+ defined(CONFIG_SERIAL_MPSC_CONSOLE)
serial_putc(com_port, c);
if ( c == '\n' )
serial_putc(com_port, '\r');
y = orig_y;
while ( ( c = *s++ ) != '\0' ) {
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
- || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
+ defined(CONFIG_SERIAL_MPSC_CONSOLE)
serial_putc(com_port, c);
if ( c == '\n' ) serial_putc(com_port, '\r');
#endif /* serial console */
end-$(CONFIG_OCOTEA) := ocotea
entrypoint-$(CONFIG_OCOTEA) := 0x01000000
- extra.o-$(CONFIG_EV64260) := direct.o misc-ev64260.o
+ extra.o-$(CONFIG_EV64260) := misc-ev64260.o
end-$(CONFIG_EV64260) := ev64260
cacheflag-$(CONFIG_EV64260) := -include $(clear_L2_L3)
entrypoint-$(CONFIG_SPRUCE) := 0x00800000
misc-$(CONFIG_SPRUCE) += misc-spruce.o
- zimage-$(CONFIG_LITE5200) := zImage-STRIPELF
-zimageinitrd-$(CONFIG_LITE5200) := zImage.initrd-STRIPELF
- end-$(CONFIG_LITE5200) := lite5200
- cacheflag-$(CONFIG_LITE5200) := -include $(clear_L2_L3)
-
-
# SMP images should have a '.smp' suffix.
end-$(CONFIG_SMP) := $(end-y).smp
boot-$(CONFIG_8260) += embed_config.o
boot-$(CONFIG_BSEIP) += iic.o
boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o
+boot-$(CONFIG_MV64X60) += misc-mv64x60.o
+boot-$(CONFIG_DMV182) += mv64x60_stub.o
boot-$(CONFIG_RPXCLASSIC) += iic.o pci.o qspan_pci.o
boot-$(CONFIG_RPXLITE) += iic.o
# Different boards need different serial implementations.
-ifeq ($(CONFIG_SERIAL_CPM_CONSOLE),y)
+ifeq ($(CONFIG_SERIAL_CONSOLE),y)
boot-$(CONFIG_8xx) += m8xx_tty.o
boot-$(CONFIG_8260) += m8260_tty.o
endif
-boot-$(CONFIG_SERIAL_MPC52xx_CONSOLE) += mpc52xx_tty.o
-boot-$(CONFIG_GT64260_CONSOLE) += gt64260_tty.o
+boot-$(CONFIG_SERIAL_MPSC_CONSOLE) += mv64x60_tty.o
LIBS := $(common)/lib.a $(bootlib)/lib.a
ifeq ($(CONFIG_PPC_PREP),y)
#endif /* CONFIG_MBX */
#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC) || \
- defined(CONFIG_RPX8260) || defined(CONFIG_EP405)
+ defined(CONFIG_RPX6) || defined(CONFIG_EP405)
/* Helper functions for Embedded Planet boards.
*/
/* Because I didn't find anything that would do this.......
}
}
-#ifdef CONFIG_RPX8260
+#ifdef CONFIG_RPX6
static uint
rpx_baseten(u_char *cp)
{
}
#endif /* SBS8260 */
-#ifdef CONFIG_RPX8260
+#ifdef CONFIG_RPX6
void
embed_config(bd_t **bdp)
{
+++ /dev/null
-/*
- * arch/ppc/boot/simple/gt64260_tty.c
- *
- * Bootloader version of the embedded MPSC/UART driver for the GT64260[A].
- * Note: Due to 64260A errata, DMA will be used for UART input (via SDMA).
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/* This code assumes that the data cache has been disabled (L1, L2, L3). */
-
-#include <linux/config.h>
-#include <linux/serialP.h>
-#include <linux/serial_reg.h>
-#include <asm/serial.h>
-#include <asm/gt64260_defs.h>
-
-extern void udelay(long);
-static void stop_dma(int chan);
-
-static u32 gt64260_base = EV64260_BRIDGE_REG_BASE; /* base addr of 64260 */
-
-inline unsigned
-gt64260_in_le32(volatile unsigned *addr)
-{
- unsigned ret;
-
- __asm__ __volatile__("lwbrx %0,0,%1; eieio" : "=r" (ret) :
- "r" (addr), "m" (*addr));
- return ret;
-}
-
-inline void
-gt64260_out_le32(volatile unsigned *addr, int val)
-{
- __asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) :
- "r" (val), "r" (addr));
-}
-
-#define GT64260_REG_READ(offs) \
- (gt64260_in_le32((volatile uint *)(gt64260_base + (offs))))
-#define GT64260_REG_WRITE(offs, d) \
- (gt64260_out_le32((volatile uint *)(gt64260_base + (offs)), (int)(d)))
-
-
-static struct {
- u32 sdc;
- u32 sdcm;
- u32 rx_desc;
- u32 rx_buf_ptr;
- u32 scrdp;
- u32 tx_desc;
- u32 sctdp;
- u32 sftdp;
-} sdma_regs;
-
-#define SDMA_REGS_INIT(chan) { \
- sdma_regs.sdc = GT64260_SDMA_##chan##_SDC; \
- sdma_regs.sdcm = GT64260_SDMA_##chan##_SDCM; \
- sdma_regs.rx_desc = GT64260_SDMA_##chan##_RX_DESC; \
- sdma_regs.rx_buf_ptr = GT64260_SDMA_##chan##_RX_BUF_PTR; \
- sdma_regs.scrdp = GT64260_SDMA_##chan##_SCRDP; \
- sdma_regs.tx_desc = GT64260_SDMA_##chan##_TX_DESC; \
- sdma_regs.sctdp = GT64260_SDMA_##chan##_SCTDP; \
- sdma_regs.sftdp = GT64260_SDMA_##chan##_SFTDP; \
-}
-
-typedef struct {
- volatile u16 bufsize;
- volatile u16 bytecnt;
- volatile u32 cmd_stat;
- volatile u32 next_desc_ptr;
- volatile u32 buffer;
-} gt64260_rx_desc_t;
-
-typedef struct {
- volatile u16 bytecnt;
- volatile u16 shadow;
- volatile u32 cmd_stat;
- volatile u32 next_desc_ptr;
- volatile u32 buffer;
-} gt64260_tx_desc_t;
-
-#define MAX_RESET_WAIT 10000
-#define MAX_TX_WAIT 10000
-
-#define RX_NUM_DESC 2
-#define TX_NUM_DESC 2
-
-#define RX_BUF_SIZE 16
-#define TX_BUF_SIZE 16
-
-static gt64260_rx_desc_t rd[RX_NUM_DESC] __attribute__ ((aligned(32)));
-static gt64260_tx_desc_t td[TX_NUM_DESC] __attribute__ ((aligned(32)));
-
-static char rx_buf[RX_NUM_DESC * RX_BUF_SIZE] __attribute__ ((aligned(32)));
-static char tx_buf[TX_NUM_DESC * TX_BUF_SIZE] __attribute__ ((aligned(32)));
-
-static int cur_rd = 0;
-static int cur_td = 0;
-
-
-#define RX_INIT_RDP(rdp) { \
- (rdp)->bufsize = 2; \
- (rdp)->bytecnt = 0; \
- (rdp)->cmd_stat = GT64260_SDMA_DESC_CMDSTAT_L | \
- GT64260_SDMA_DESC_CMDSTAT_F | \
- GT64260_SDMA_DESC_CMDSTAT_O; \
-}
-
-unsigned long
-serial_init(int chan, void *ignored)
-{
- u32 mpsc_adjust, sdma_adjust, brg_bcr;
- int i;
-
- stop_dma(0);
- stop_dma(1);
-
- if (chan != 1) {
- chan = 0; /* default to chan 0 if anything but 1 */
- mpsc_adjust = 0;
- sdma_adjust = 0;
- brg_bcr = GT64260_BRG_0_BCR;
- SDMA_REGS_INIT(0);
- }
- else {
- mpsc_adjust = 0x1000;
- sdma_adjust = 0x2000;
- brg_bcr = GT64260_BRG_1_BCR;
- SDMA_REGS_INIT(1);
- }
-
- /* Set up ring buffers */
- for (i=0; i<RX_NUM_DESC; i++) {
- RX_INIT_RDP(&rd[i]);
- rd[i].buffer = (u32)&rx_buf[i * RX_BUF_SIZE];
- rd[i].next_desc_ptr = (u32)&rd[i+1];
- }
- rd[RX_NUM_DESC - 1].next_desc_ptr = (u32)&rd[0];
-
- for (i=0; i<TX_NUM_DESC; i++) {
- td[i].bytecnt = 0;
- td[i].shadow = 0;
- td[i].buffer = (u32)&tx_buf[i * TX_BUF_SIZE];
- td[i].cmd_stat = GT64260_SDMA_DESC_CMDSTAT_F |
- GT64260_SDMA_DESC_CMDSTAT_L;
- td[i].next_desc_ptr = (u32)&td[i+1];
- }
- td[TX_NUM_DESC - 1].next_desc_ptr = (u32)&td[0];
-
- /* Set MPSC Routing */
- GT64260_REG_WRITE(GT64260_MPSC_MRR, 0x3ffffe38);
- GT64260_REG_WRITE(GT64260_MPP_SERIAL_PORTS_MULTIPLEX, 0x00001102);
-
- /* MPSC 0/1 Rx & Tx get clocks BRG0/1 */
- GT64260_REG_WRITE(GT64260_MPSC_RCRR, 0x00000100);
- GT64260_REG_WRITE(GT64260_MPSC_TCRR, 0x00000100);
-
- /* clear pending interrupts */
- GT64260_REG_WRITE(GT64260_SDMA_INTR_MASK, 0);
-
- GT64260_REG_WRITE(GT64260_SDMA_0_SCRDP + sdma_adjust, &rd[0]);
- GT64260_REG_WRITE(GT64260_SDMA_0_SCTDP + sdma_adjust,
- &td[TX_NUM_DESC - 1]);
- GT64260_REG_WRITE(GT64260_SDMA_0_SFTDP + sdma_adjust,
- &td[TX_NUM_DESC - 1]);
-
- GT64260_REG_WRITE(GT64260_SDMA_0_SDC + sdma_adjust,
- GT64260_SDMA_SDC_RFT | GT64260_SDMA_SDC_SFM |
- GT64260_SDMA_SDC_BLMR | GT64260_SDMA_SDC_BLMT |
- (3 << 12));
-
- /* Set BRG to generate proper baud rate */
- GT64260_REG_WRITE(brg_bcr, ((8 << 18) | (1 << 16) | 36));
-
- /* Put MPSC into UART mode, no null modem, 16x clock mode */
- GT64260_REG_WRITE(GT64260_MPSC_0_MMCRL + mpsc_adjust, 0x000004c4);
- GT64260_REG_WRITE(GT64260_MPSC_0_MMCRH + mpsc_adjust, 0x04400400);
-
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_1 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_9 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_10 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_3 + mpsc_adjust, 4);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_4 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_5 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_6 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_7 + mpsc_adjust, 0);
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_8 + mpsc_adjust, 0);
-
- /* 8 data bits, 1 stop bit */
- GT64260_REG_WRITE(GT64260_MPSC_0_MPCR + mpsc_adjust, (3 << 12));
-
- GT64260_REG_WRITE(GT64260_SDMA_0_SDCM + sdma_adjust,
- GT64260_SDMA_SDCM_ERD);
-
- GT64260_REG_WRITE(GT64260_MPSC_0_CHR_2 + sdma_adjust,
- GT64260_MPSC_UART_CR_EH);
-
- udelay(100);
-
- return (ulong)chan;
-}
-
-static void
-stop_dma(int chan)
-{
- u32 sdma_sdcm = GT64260_SDMA_0_SDCM;
- int i;
-
- if (chan == 1) {
- sdma_sdcm = GT64260_SDMA_1_SDCM;
- }
-
- /* Abort SDMA Rx, Tx */
- GT64260_REG_WRITE(sdma_sdcm,
- GT64260_SDMA_SDCM_AR | GT64260_SDMA_SDCM_STD);
-
- for (i=0; i<MAX_RESET_WAIT; i++) {
- if ((GT64260_REG_READ(sdma_sdcm) & (GT64260_SDMA_SDCM_AR |
- GT64260_SDMA_SDCM_AT)) == 0) break;
- udelay(100);
- }
-
- return;
-}
-
-static int
-wait_for_ownership(void)
-{
- int i;
-
- for (i=0; i<MAX_TX_WAIT; i++) {
- if ((GT64260_REG_READ(sdma_regs.sdcm) &
- GT64260_SDMA_SDCM_TXD) == 0) break;
- udelay(1000);
- }
-
- return (i < MAX_TX_WAIT);
-}
-
-void
-serial_putc(unsigned long com_port, unsigned char c)
-{
- gt64260_tx_desc_t *tdp;
-
- if (wait_for_ownership() == 0) return;
-
- tdp = &td[cur_td];
- if (++cur_td >= TX_NUM_DESC) cur_td = 0;
-
- *(unchar *)(tdp->buffer ^ 7) = c;
- tdp->bytecnt = 1;
- tdp->shadow = 1;
- tdp->cmd_stat = GT64260_SDMA_DESC_CMDSTAT_L |
- GT64260_SDMA_DESC_CMDSTAT_F | GT64260_SDMA_DESC_CMDSTAT_O;
-
- GT64260_REG_WRITE(sdma_regs.sctdp, tdp);
- GT64260_REG_WRITE(sdma_regs.sftdp, tdp);
- GT64260_REG_WRITE(sdma_regs.sdcm,
- GT64260_REG_READ(sdma_regs.sdcm) | GT64260_SDMA_SDCM_TXD);
-
- return;
-}
-
-unsigned char
-serial_getc(unsigned long com_port)
-{
- gt64260_rx_desc_t *rdp;
- unchar c = '\0';
-
- rdp = &rd[cur_rd];
-
- if ((rdp->cmd_stat & (GT64260_SDMA_DESC_CMDSTAT_O |
- GT64260_SDMA_DESC_CMDSTAT_ES)) == 0) {
- c = *(unchar *)(rdp->buffer ^ 7);
- RX_INIT_RDP(rdp);
- if (++cur_rd >= RX_NUM_DESC) cur_rd = 0;
- }
-
- return c;
-}
-
-int
-serial_tstc(unsigned long com_port)
-{
- gt64260_rx_desc_t *rdp;
- int loop_count = 0;
- int rc = 0;
-
- rdp = &rd[cur_rd];
-
- /* Go thru rcv desc's until empty looking for one with data (no error)*/
- while (((rdp->cmd_stat & GT64260_SDMA_DESC_CMDSTAT_O) == 0) &&
- (loop_count++ < RX_NUM_DESC)) {
-
- /* If there was an error, reinit the desc & continue */
- if ((rdp->cmd_stat & GT64260_SDMA_DESC_CMDSTAT_ES) != 0) {
- RX_INIT_RDP(rdp);
- if (++cur_rd >= RX_NUM_DESC) cur_rd = 0;
- rdp = (gt64260_rx_desc_t *)rdp->next_desc_ptr;
- }
- else {
- rc = 1;
- break;
- }
- }
-
- return rc;
-}
-
-void
-serial_close(unsigned long com_port)
-{
- stop_dma(com_port);
- return;
-}
isync
#endif
-#if defined(CONFIG_MBX) || defined(CONFIG_RPX8260) || defined(CONFIG_PPC_PREP)
+#if defined(CONFIG_MBX) || defined(CONFIG_RPX6) || defined(CONFIG_PPC_PREP)
mr r29,r3 /* On the MBX860, r3 is the board info pointer.
* On the RPXSUPER, r3 points to the NVRAM
* configuration keys.
mr r3, r29
#endif
-#if defined(CONFIG_MBX) || defined(CONFIG_RPX8260) || defined(CONFIG_PPC_PREP)
+#if defined(CONFIG_MBX) || defined(CONFIG_RPX6) || defined(CONFIG_PPC_PREP)
mr r4,r29 /* put the board info pointer where the relocate
* routine will find it
*/
#endif
-#ifdef CONFIG_EV64260
- /* Move 64260's base regs & CS window for external UART */
- bl ev64260_init
+#ifdef CONFIG_MV64X60
+ /* mv64x60 specific hook to do things like moving register base, etc. */
+ bl mv64x60_init
#endif
/* Get the load address.
/* If defined, enables serial console. The value (1 through 4)
* should designate which SCC is used, but this isn't complete. Only
* SCC1 is known to work at this time.
- * We're only linked if SERIAL_CPM_CONSOLE=y, so we only need to test
- * SERIAL_CPM_SCC1.
*/
-#ifdef CONFIG_SERIAL_CPM_SCC1
+#ifdef CONFIG_SCC_CONSOLE
#define SCC_CONSOLE 1
#endif
unsigned long
serial_init(int ignored, bd_t *bd)
{
+ volatile smc_t *sp;
+ volatile smc_uart_t *up;
#ifdef SCC_CONSOLE
volatile scc_t *sccp;
volatile scc_uart_t *sup;
-#else
- volatile smc_t *sp;
- volatile smc_uart_t *up;
#endif
volatile cbd_t *tbdf, *rbdf;
volatile cpm2_map_t *ip;
{
volatile cbd_t *rbdf;
volatile char *buf;
-#ifdef SCC_CONSOLE
- volatile scc_uart_t *sup;
-#else
volatile smc_uart_t *up;
-#endif
+ volatile scc_uart_t *sup;
volatile cpm2_map_t *ip;
int i, nc;
{
volatile cbd_t *tbdf;
volatile char *buf;
-#ifdef SCC_CONSOLE
- volatile scc_uart_t *sup;
-#else
volatile smc_uart_t *up;
-#endif
+ volatile scc_uart_t *sup;
volatile cpm2_map_t *ip;
+ extern bd_t *board_info;
ip = (cpm2_map_t *)CPM_MAP_ADDR;
#ifdef SCC_CONSOLE
serial_tstc(void *ignored)
{
volatile cbd_t *rbdf;
-#ifdef SCC_CONSOLE
- volatile scc_uart_t *sup;
-#else
volatile smc_uart_t *up;
-#endif
+ volatile scc_uart_t *sup;
volatile cpm2_map_t *ip;
ip = (cpm2_map_t *)CPM_MAP_ADDR;
* initialize the serial console port.
*/
embed_config(&bp);
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
+#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
com_port = serial_init(0, bp);
#endif
rec = (struct bi_record *)((unsigned long)rec + rec->size);
}
puts("Now booting the kernel\n");
-#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
serial_close(com_port);
-#endif
return (unsigned long)hold_residual;
}
/*
* arch/ppc/boot/simple/misc-ev64260.S
- *
+ *
* Host bridge init code for the Marvell/Galileo EV-64260-BP evaluation board
* with a GT64260 onboard.
*
* Author: Mark Greer <mgreer@mvista.com>
*
- * 2001 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <asm/ppc_asm.h>
+#include <asm/processor.h>
#include <asm/cache.h>
-#include <asm/gt64260_defs.h>
-
+#include <asm/mv64x60_defs.h>
#include <platforms/ev64260.h>
- .globl ev64260_init
-ev64260_init:
- li r20,0
+ .globl mv64x60_board_init
+mv64x60_board_init:
+ /* DINK doesn't enable 745x timebase, so enable here (Adrian Cox) */
+ mfspr r25,PVR
+ srwi r25,r25,16
+ cmplwi r25,(PVR_7450 >> 16)
+ bne 1f
+ mfspr r25,HID0
+ oris r25,r25,(HID0_TBEN >> 16)
+ mtspr HID0,r25
+1:
+#if (CONFIG_MV64X60_NEW_BASE != CONFIG_MV64X60_BASE)
li r23,20
- /* Relocate galileo's regs */
- addis r25,0,GT64260_INTERNAL_SPACE_DEFAULT_ADDR@h
- ori r25,r25,GT64260_INTERNAL_SPACE_DECODE
- lwbrx r26,0,(r25)
- lis r24,0xffff
- and r26,r26,r24
- addis r24,0,EV64260_BRIDGE_REG_BASE@h
- srw r24,r24,r23
- or r26,r26,r24
- stwbrx r26,0,(r25)
- sync
-
- /* Wait for write to take effect */
- addis r25,0,EV64260_BRIDGE_REG_BASE@h
- ori r25,r25,GT64260_INTERNAL_SPACE_DECODE
-1: lwbrx r24,0,(r25)
- cmpw r24,r26
- bne 1b
-
- /* Change CS2 (UARTS on device module) window */
- addis r25,0,EV64260_BRIDGE_REG_BASE@h
- ori r25,r25,GT64260_CPU_CS_DECODE_2_BOT
+ /*
+ * Change the CS2 window for the UART so that the bootloader
+ * can do I/O thru the UARTs.
+ */
+ addis r25,0,CONFIG_MV64X60_NEW_BASE@h
+ ori r25,r25,MV64x60_CPU2DEV_2_BASE
addis r26,0,EV64260_UART_BASE@h
srw r26,r26,r23
stwbrx r26,0,(r25)
sync
- addis r25,0,EV64260_BRIDGE_REG_BASE@h
- ori r25,r25,GT64260_CPU_CS_DECODE_2_TOP
+ addis r25,0,CONFIG_MV64X60_NEW_BASE@h
+ ori r25,r25,MV64x60_CPU2DEV_2_SIZE
addis r26,0,EV64260_UART_END@h
srw r26,r26,r23
stwbrx r26,0,(r25)
sync
-
+#endif
blr
+
+#if defined(CONFIG_SERIAL_MPSC_CONSOLE)
+.data
+ .globl mv64x60_console_baud
+mv64x60_console_baud:
+.long EV64260_DEFAULT_BAUD
+
+ .globl mv64x60_mpsc_clk_src
+mv64x60_mpsc_clk_src:
+.long EV64260_MPSC_CLK_SRC
+
+ .globl mv64x60_mpsc_clk_freq
+mv64x60_mpsc_clk_freq:
+.long EV64260_MPSC_CLK_FREQ
+#endif
--- /dev/null
+/*
+ * arch/ppc/boot/simple/misc-mv64x60.S
+ *
+ * Code to change the base address of the host bridges and call board specific
+ * init routine.
+ *
+ * Author: Mark Greer <mgreer@mvista.com>
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under the terms
+ * of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/mv64x60_defs.h>
+
+ .globl mv64x60_init
+mv64x60_init:
+ mflr r27
+
+#if (CONFIG_MV64X60_NEW_BASE != CONFIG_MV64X60_BASE)
+ bl move_base
+#endif
+ bl mv64x60_board_init
+
+ mtlr r27
+ blr
+
+#if (CONFIG_MV64X60_NEW_BASE != CONFIG_MV64X60_BASE)
+move_base:
+ li r20,0
+ li r23,20
+
+ /* Relocate bridge's regs */
+ addis r25,0,CONFIG_MV64X60_BASE@h
+ ori r25,r25,MV64x60_INTERNAL_SPACE_DECODE
+ lwbrx r26,0,(r25)
+ lis r24,0xffff
+ and r26,r26,r24
+ addis r24,0,CONFIG_MV64X60_NEW_BASE@h
+ srw r24,r24,r23
+ or r26,r26,r24
+ stwbrx r26,0,(r25)
+ sync
+
+ /* Wait for write to take effect */
+ addis r25,0,CONFIG_MV64X60_NEW_BASE@h
+ ori r25,r25,MV64x60_INTERNAL_SPACE_DECODE
+1: lwbrx r24,0,(r25)
+ cmpw r24,r26
+ bne 1b
+
+ blr
+#endif
* user to edit the cmdline or not.
*/
#if (defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_VGA_CONSOLE)) \
- && !defined(CONFIG_GEMINI)
+ && !defined(CONFIG_GEMINI) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
#define INTERACTIVE_CONSOLE 1
#endif
unsigned long initrd_loc, TotalMemory = 0;
serial_fixups();
-#ifdef CONFIG_SERIAL_8250_CONSOLE
+#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
com_port = serial_init(0, NULL);
#endif
puts("\n");
puts("Uncompressing Linux...");
- gunzip(NULL, 0x400000, zimage_start, &zimage_size);
+ gunzip(0, 0x400000, zimage_start, &zimage_size);
puts("done.\n");
/* get the bi_rec address */
+++ /dev/null
-/*
- * arch/ppc/boot/simple/mpc52xx_tty.c
- *
- * Minimal serial functions needed to send messages out a MPC52xx
- * Programmable Serial Controller (PSC).
- *
- * Author: Dale Farnsworth <dfarnsworth@mvista.com>
- *
- * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is licensed
- * "as is" without any warranty of any kind, whether express or implied.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <asm/uaccess.h>
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-#include <asm/serial.h>
-#include <asm/time.h>
-
-#if MPC52xx_PF_CONSOLE_PORT == 0
-#define MPC52xx_CONSOLE MPC52xx_PSC1
-#define MPC52xx_PSC_CONFIG_SHIFT 0
-#elif MPC52xx_PF_CONSOLE_PORT == 1
-#define MPC52xx_CONSOLE MPC52xx_PSC2
-#define MPC52xx_PSC_CONFIG_SHIFT 4
-#elif MPC52xx_PF_CONSOLE_PORT == 2
-#define MPC52xx_CONSOLE MPC52xx_PSC3
-#define MPC52xx_PSC_CONFIG_SHIFT 8
-#else
-#error "MPC52xx_PF_CONSOLE_PORT not defined"
-#endif
-
-static struct mpc52xx_psc *psc = (struct mpc52xx_psc *)MPC52xx_CONSOLE;
-
-/* The decrementer counts at the system bus clock frequency
- * divided by four. The most accurate time base is connected to the
- * rtc. We read the decrementer change during one rtc tick (one second)
- * and multiply by 4 to get the system bus clock frequency.
- */
-int
-mpc52xx_ipbfreq(void)
-{
- struct mpc52xx_rtc *rtc = (struct mpc52xx_rtc*)MPC52xx_RTC;
- struct mpc52xx_cdm *cdm = (struct mpc52xx_cdm*)MPC52xx_CDM;
- int current_time, previous_time;
- int tbl_start, tbl_end;
- int xlbfreq, ipbfreq;
-
- out_be32(&rtc->dividers, 0x8f1f0000); /* Set RTC 64x faster */
- previous_time = in_be32(&rtc->time);
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_start = get_tbl();
- previous_time = current_time;
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_end = get_tbl();
- out_be32(&rtc->dividers, 0xffff0000); /* Restore RTC */
-
- xlbfreq = (tbl_end - tbl_start) << 8;
- ipbfreq = (in_8(&cdm->ipb_clk_sel) & 1) ? xlbfreq / 2 : xlbfreq;
-
- return ipbfreq;
-}
-
-unsigned long
-serial_init(int ignored, void *ignored2)
-{
- struct mpc52xx_gpio *gpio = (struct mpc52xx_gpio *)MPC52xx_GPIO;
- int divisor;
- int mode1;
- int mode2;
- u32 val32;
-
- static int been_here = 0;
-
- if (been_here)
- return 0;
-
- been_here = 1;
-
- val32 = in_be32(&gpio->port_config);
- val32 &= ~(0x7 << MPC52xx_PSC_CONFIG_SHIFT);
- val32 |= MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD
- << MPC52xx_PSC_CONFIG_SHIFT;
- out_be32(&gpio->port_config, val32);
-
- out_8(&psc->command, MPC52xx_PSC_RST_TX
- | MPC52xx_PSC_RX_DISABLE | MPC52xx_PSC_TX_ENABLE);
- out_8(&psc->command, MPC52xx_PSC_RST_RX);
-
- out_be32(&psc->sicr, 0x0);
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00);
- out_be16(&psc->tfalarm, 0xf8);
-
- out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1
- | MPC52xx_PSC_RX_ENABLE
- | MPC52xx_PSC_TX_ENABLE);
-
- divisor = ((mpc52xx_ipbfreq()
- / (CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD * 16)) + 1) >> 1;
-
- mode1 = MPC52xx_PSC_MODE_8_BITS | MPC52xx_PSC_MODE_PARNONE
- | MPC52xx_PSC_MODE_ERR;
- mode2 = MPC52xx_PSC_MODE_ONE_STOP;
-
- out_8(&psc->ctur, divisor>>8);
- out_8(&psc->ctlr, divisor);
- out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
- out_8(&psc->mode, mode1);
- out_8(&psc->mode, mode2);
-
- return 0; /* ignored */
-}
-
-void
-serial_putc(void *ignored, const char c)
-{
- serial_init(0, 0);
-
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP)) ;
- out_8(&psc->mpc52xx_psc_buffer_8, c);
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP)) ;
-}
-
-char
-serial_getc(void *ignored)
-{
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY)) ;
-
- return in_8(&psc->mpc52xx_psc_buffer_8);
-}
-
-int
-serial_tstc(void *ignored)
-{
- return (in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY) != 0;
-}
--- /dev/null
+/*
+ * arch/ppc/boot/simple/mv64x60_stub.c
+ *
+ * Stub for board_init() routine called from mv64x60_init().
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under the terms
+ * of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+long mv64x60_console_baud = 9600; /* Default baud: 9600 */
+long mv64x60_mpsc_clk_src = 8; /* Default clk src: TCLK */
+long mv64x60_mpsc_clk_freq = 100000000; /* Default clk freq: 100 MHz */
+
+void
+mv64x60_board_init(void)
+{
+}
--- /dev/null
+/*
+ * arch/ppc/boot/simple/mv64x60_tty.c
+ *
+ * Bootloader version of the embedded MPSC/UART driver for the Marvell 64x60.
+ * Note: Due to a GT64260A erratum, DMA will be used for UART input (via SDMA).
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* This code assumes that the data cache has been disabled (L1, L2, L3). */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/serial_reg.h>
+#include <asm/serial.h>
+#include <asm/mv64x60_defs.h>
+#include "../../../../drivers/serial/mpsc/mpsc_defs.h"
+
+extern void udelay(long);
+static void stop_dma(int chan);
+
+static u32 mv64x60_base = CONFIG_MV64X60_NEW_BASE;
+
+inline unsigned
+mv64x60_in_le32(volatile unsigned *addr)
+{
+ unsigned ret;
+
+ __asm__ __volatile__("lwbrx %0,0,%1; eieio" : "=r" (ret) :
+ "r" (addr), "m" (*addr));
+ return ret;
+}
+
+inline void
+mv64x60_out_le32(volatile unsigned *addr, int val)
+{
+ __asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) :
+ "r" (val), "r" (addr));
+}
+
+#define MV64x60_REG_READ(offs) \
+ (mv64x60_in_le32((volatile uint *)(mv64x60_base + (offs))))
+#define MV64x60_REG_WRITE(offs, d) \
+ (mv64x60_out_le32((volatile uint *)(mv64x60_base + (offs)), (int)(d)))
+
+
+typedef struct {
+ u32 sdc;
+ u32 sdcm;
+ u32 rx_desc;
+ u32 rx_buf_ptr;
+ u32 scrdp;
+ u32 tx_desc;
+ u32 sctdp;
+ u32 sftdp;
+} sdma_regs_t;
+
+static sdma_regs_t sdma_regs[2];
+
+#define SDMA_REGS_INIT(s, reg_base) { \
+ (s)->sdc = (reg_base) + SDMA_SDC; \
+ (s)->sdcm = (reg_base) + SDMA_SDCM; \
+ (s)->rx_desc = (reg_base) + SDMA_RX_DESC; \
+ (s)->rx_buf_ptr = (reg_base) + SDMA_RX_BUF_PTR; \
+ (s)->scrdp = (reg_base) + SDMA_SCRDP; \
+ (s)->tx_desc = (reg_base) + SDMA_TX_DESC; \
+ (s)->sctdp = (reg_base) + SDMA_SCTDP; \
+ (s)->sftdp = (reg_base) + SDMA_SFTDP; \
+}
+
+typedef struct {
+ volatile u16 bufsize;
+ volatile u16 bytecnt;
+ volatile u32 cmd_stat;
+ volatile u32 next_desc_ptr;
+ volatile u32 buffer;
+} mv64x60_rx_desc_t;
+
+typedef struct {
+ volatile u16 bytecnt;
+ volatile u16 shadow;
+ volatile u32 cmd_stat;
+ volatile u32 next_desc_ptr;
+ volatile u32 buffer;
+} mv64x60_tx_desc_t;
+
+#define MAX_RESET_WAIT 10000
+#define MAX_TX_WAIT 10000
+
+#define RX_NUM_DESC 2
+#define TX_NUM_DESC 2
+
+#define RX_BUF_SIZE 16
+#define TX_BUF_SIZE 16
+
+static mv64x60_rx_desc_t rd[2][RX_NUM_DESC] __attribute__ ((aligned(32)));
+static mv64x60_tx_desc_t td[2][TX_NUM_DESC] __attribute__ ((aligned(32)));
+
+static char rx_buf[2][RX_NUM_DESC * RX_BUF_SIZE] __attribute__ ((aligned(32)));
+static char tx_buf[2][TX_NUM_DESC * TX_BUF_SIZE] __attribute__ ((aligned(32)));
+
+static int cur_rd[2] = { 0, 0 };
+static int cur_td[2] = { 0, 0 };
+
+static char chan_initialized[2] = { 0, 0 };
+
+
+#define RX_INIT_RDP(rdp) { \
+ (rdp)->bufsize = 2; \
+ (rdp)->bytecnt = 0; \
+ (rdp)->cmd_stat = SDMA_DESC_CMDSTAT_L | \
+ SDMA_DESC_CMDSTAT_F | \
+ SDMA_DESC_CMDSTAT_O; \
+}
+
+unsigned long
+serial_init(int chan, void *ignored)
+{
+ u32 mpsc_base, mpsc_routing_base, sdma_base, brg_bcr, cdv;
+ int i;
+ extern long mv64x60_console_baud;
+ extern long mv64x60_mpsc_clk_src;
+ extern long mv64x60_mpsc_clk_freq;
+
+ chan = (chan == 1); /* default to chan 0 if anything but 1 */
+
+ if (chan_initialized[chan]) return chan;
+
+ chan_initialized[chan] = 1;
+
+ if (chan == 0) {
+ mpsc_base = MV64x60_MPSC_0_OFFSET;
+ sdma_base = MV64x60_SDMA_0_OFFSET;
+ brg_bcr = MV64x60_BRG_0_OFFSET + BRG_BCR;
+ SDMA_REGS_INIT(&sdma_regs[0], MV64x60_SDMA_0_OFFSET);
+ }
+ else {
+ mpsc_base = MV64x60_MPSC_1_OFFSET;
+ sdma_base = MV64x60_SDMA_1_OFFSET;
+ brg_bcr = MV64x60_BRG_1_OFFSET + BRG_BCR;
+ SDMA_REGS_INIT(&sdma_regs[0], MV64x60_SDMA_1_OFFSET);
+ }
+
+ mpsc_routing_base = MV64x60_MPSC_ROUTING_OFFSET;
+
+ stop_dma(chan);
+
+ /* Set up ring buffers */
+ for (i=0; i<RX_NUM_DESC; i++) {
+ RX_INIT_RDP(&rd[chan][i]);
+ rd[chan][i].buffer = (u32)&rx_buf[chan][i * RX_BUF_SIZE];
+ rd[chan][i].next_desc_ptr = (u32)&rd[chan][i+1];
+ }
+ rd[chan][RX_NUM_DESC - 1].next_desc_ptr = (u32)&rd[chan][0];
+
+ for (i=0; i<TX_NUM_DESC; i++) {
+ td[chan][i].bytecnt = 0;
+ td[chan][i].shadow = 0;
+ td[chan][i].buffer = (u32)&tx_buf[chan][i * TX_BUF_SIZE];
+ td[chan][i].cmd_stat = SDMA_DESC_CMDSTAT_F|SDMA_DESC_CMDSTAT_L;
+ td[chan][i].next_desc_ptr = (u32)&td[chan][i+1];
+ }
+ td[chan][TX_NUM_DESC - 1].next_desc_ptr = (u32)&td[chan][0];
+
+ /* Set MPSC Routing */
+ MV64x60_REG_WRITE(mpsc_routing_base + MPSC_MRR, 0x3ffffe38);
+
+/* XXXX Not for 64360 XXXX*/
+ MV64x60_REG_WRITE(GT64260_MPP_SERIAL_PORTS_MULTIPLEX, 0x00001102);
+
+ /* MPSC 0/1 Rx & Tx get clocks BRG0/1 */
+ MV64x60_REG_WRITE(mpsc_routing_base + MPSC_RCRR, 0x00000100);
+ MV64x60_REG_WRITE(mpsc_routing_base + MPSC_TCRR, 0x00000100);
+
+ /* clear pending interrupts */
+ MV64x60_REG_WRITE(MV64x60_SDMA_INTR_OFFSET + SDMA_INTR_MASK, 0);
+
+ MV64x60_REG_WRITE(SDMA_SCRDP + sdma_base, &rd[chan][0]);
+ MV64x60_REG_WRITE(SDMA_SCTDP + sdma_base, &td[chan][TX_NUM_DESC - 1]);
+ MV64x60_REG_WRITE(SDMA_SFTDP + sdma_base, &td[chan][TX_NUM_DESC - 1]);
+
+ MV64x60_REG_WRITE(SDMA_SDC + sdma_base,
+ SDMA_SDC_RFT | SDMA_SDC_SFM | SDMA_SDC_BLMR | SDMA_SDC_BLMT |
+ (3 << 12));
+
+ cdv = ((mv64x60_mpsc_clk_freq/(32*mv64x60_console_baud))-1);
+ MV64x60_REG_WRITE(brg_bcr,
+ ((mv64x60_mpsc_clk_src << 18) | (1 << 16) | cdv));
+
+ /* Put MPSC into UART mode, no null modem, 16x clock mode */
+ MV64x60_REG_WRITE(MPSC_MMCRL + mpsc_base, 0x000004c4);
+ MV64x60_REG_WRITE(MPSC_MMCRH + mpsc_base, 0x04400400);
+
+ MV64x60_REG_WRITE(MPSC_CHR_1 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_9 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_10 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_3 + mpsc_base, 4);
+ MV64x60_REG_WRITE(MPSC_CHR_4 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_5 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_6 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_7 + mpsc_base, 0);
+ MV64x60_REG_WRITE(MPSC_CHR_8 + mpsc_base, 0);
+
+ /* 8 data bits, 1 stop bit */
+ MV64x60_REG_WRITE(MPSC_MPCR + mpsc_base, (3 << 12));
+ MV64x60_REG_WRITE(SDMA_SDCM + sdma_base, SDMA_SDCM_ERD);
+ MV64x60_REG_WRITE(MPSC_CHR_2 + mpsc_base, MPSC_CHR_2_EH);
+
+ udelay(100);
+
+ return chan;
+}
+
+static void
+stop_dma(int chan)
+{
+ int i;
+
+ /* Abort SDMA Rx, Tx */
+ MV64x60_REG_WRITE(sdma_regs[chan].sdcm, SDMA_SDCM_AR | SDMA_SDCM_STD);
+
+ for (i=0; i<MAX_RESET_WAIT; i++) {
+ if ((MV64x60_REG_READ(sdma_regs[chan].sdcm) &
+ (SDMA_SDCM_AR | SDMA_SDCM_AT)) == 0) {
+ break;
+ }
+ udelay(100);
+ }
+
+ return;
+}
+
+static int
+wait_for_ownership(int chan)
+{
+ int i;
+
+ for (i=0; i<MAX_TX_WAIT; i++) {
+ if ((MV64x60_REG_READ(sdma_regs[chan].sdcm) &
+ SDMA_SDCM_TXD) == 0)
+ break;
+ udelay(1000);
+ }
+
+ return (i < MAX_TX_WAIT);
+}
+
+void
+serial_putc(unsigned long com_port, unsigned char c)
+{
+ mv64x60_tx_desc_t *tdp;
+
+ if (wait_for_ownership(com_port) == 0) return;
+
+ tdp = &td[com_port][cur_td[com_port]];
+ if (++cur_td[com_port] >= TX_NUM_DESC) cur_td[com_port] = 0;
+
+ *(unchar *)(tdp->buffer ^ 7) = c;
+ tdp->bytecnt = 1;
+ tdp->shadow = 1;
+ tdp->cmd_stat = SDMA_DESC_CMDSTAT_L | SDMA_DESC_CMDSTAT_F |
+ SDMA_DESC_CMDSTAT_O;
+
+ MV64x60_REG_WRITE(sdma_regs[com_port].sctdp, tdp);
+ MV64x60_REG_WRITE(sdma_regs[com_port].sftdp, tdp);
+ MV64x60_REG_WRITE(sdma_regs[com_port].sdcm,
+ MV64x60_REG_READ(sdma_regs[com_port].sdcm) | SDMA_SDCM_TXD);
+
+ return;
+}
+
+unsigned char
+serial_getc(unsigned long com_port)
+{
+ mv64x60_rx_desc_t *rdp;
+ unchar c = '\0';
+
+ rdp = &rd[com_port][cur_rd[com_port]];
+
+ if ((rdp->cmd_stat & (SDMA_DESC_CMDSTAT_O|SDMA_DESC_CMDSTAT_ES)) == 0) {
+ c = *(unchar *)(rdp->buffer ^ 7);
+ RX_INIT_RDP(rdp);
+ if (++cur_rd[com_port] >= RX_NUM_DESC) cur_rd[com_port] = 0;
+ }
+
+ return c;
+}
+
+int
+serial_tstc(unsigned long com_port)
+{
+ mv64x60_rx_desc_t *rdp;
+ int loop_count = 0;
+ int rc = 0;
+
+ rdp = &rd[com_port][cur_rd[com_port]];
+
+ /* Go thru rcv desc's until empty looking for one with data (no error)*/
+ while (((rdp->cmd_stat & SDMA_DESC_CMDSTAT_O) == 0) &&
+ (loop_count++ < RX_NUM_DESC)) {
+
+ /* If there was an error, reinit the desc & continue */
+ if ((rdp->cmd_stat & SDMA_DESC_CMDSTAT_ES) != 0) {
+ RX_INIT_RDP(rdp);
+ if (++cur_rd[com_port] >= RX_NUM_DESC) {
+ cur_rd[com_port] = 0;
+ }
+ rdp = (mv64x60_rx_desc_t *)rdp->next_desc_ptr;
+ }
+ else {
+ rc = 1;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void
+serial_close(unsigned long com_port)
+{
+ stop_dma(com_port);
+ return;
+}
#include <sys/stat.h>
#include <unistd.h>
#include <netinet/in.h>
-#include <stdint.h>
/* This gets tacked on the front of the image. There are also a few
* bytes allocated after the _start label used by the boot rom (see
* head.S for details).
*/
typedef struct boot_block {
- uint32_t bb_magic; /* 0x0052504F */
- uint32_t bb_dest; /* Target address of the image */
- uint32_t bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
- uint32_t bb_debug_flag; /* Run debugger or image after load */
- uint32_t bb_entry_point; /* The image address to start */
- uint32_t bb_checksum; /* 32 bit checksum including header */
- uint32_t reserved[2];
+ unsigned long bb_magic; /* 0x0052504F */
+ unsigned long bb_dest; /* Target address of the image */
+ unsigned long bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
+ unsigned long bb_debug_flag; /* Run debugger or image after load */
+ unsigned long bb_entry_point; /* The image address to start */
+ unsigned long bb_checksum; /* 32 bit checksum including header */
+ unsigned long reserved[2];
} boot_block_t;
#define IMGBLK 512
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_HAVE_DEC_LOCK=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
#
# General setup
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# Loadable module support
CONFIG_KMOD=y
#
-# Platform support
+# Processor
#
-CONFIG_PPC=y
-CONFIG_PPC32=y
CONFIG_6xx=y
# CONFIG_40x is not set
+# CONFIG_44x is not set
# CONFIG_POWER3 is not set
+# CONFIG_POWER4 is not set
# CONFIG_8xx is not set
+CONFIG_ALTIVEC=y
+# CONFIG_TAU is not set
+# CONFIG_CPU_FREQ is not set
+CONFIG_PPC_STD_MMU=y
#
-# IBM 4xx options
+# Platform options
#
-# CONFIG_8260 is not set
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_PPC_STD_MMU=y
# CONFIG_PPC_MULTIPLATFORM is not set
# CONFIG_APUS is not set
-# CONFIG_WILLOW_2 is not set
+# CONFIG_KATANA is not set
+# CONFIG_WILLOW is not set
# CONFIG_PCORE is not set
# CONFIG_POWERPMC250 is not set
CONFIG_EV64260=y
# CONFIG_K2 is not set
# CONFIG_PAL4 is not set
# CONFIG_GEMINI is not set
+# CONFIG_EST8260 is not set
+# CONFIG_SBS8260 is not set
+# CONFIG_RPX6 is not set
+# CONFIG_TQM8260 is not set
+
+#
+# Set bridge base address
+#
+CONFIG_MV64X60_BASE=0xf1000000
+CONFIG_MV64X60_NEW_BASE=0xfbe00000
+CONFIG_PPC_GEN550=y
CONFIG_GT64260=y
+CONFIG_MV64X60=y
CONFIG_SERIAL_CONSOLE_BAUD=115200
# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
-CONFIG_ALTIVEC=y
-CONFIG_TAU=y
-# CONFIG_TAU_INT is not set
-# CONFIG_TAU_AVERAGE is not set
-# CONFIG_CPU_FREQ is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_KERNEL_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200 ip=on"
#
-# General setup
+# Bus options
#
-# CONFIG_HIGHMEM is not set
+CONFIG_GENERIC_ISA_DMA=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
-CONFIG_KCORE_ELF=y
-CONFIG_BINFMT_ELF=y
-CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_MISC=y
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
-# CONFIG_HOTPLUG is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-# CONFIG_PPC601_SYNC_FIX is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,115200 ip=on"
#
# Advanced setup
CONFIG_TASK_SIZE=0x80000000
CONFIG_BOOT_LOAD=0x00800000
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+
#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
#
# Plug and Play support
#
-# CONFIG_PNP is not set
#
# Block devices
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_CARMEL is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
+# CONFIG_LBD is not set
#
-# Multi-device support (RAID and LVM)
+# ATA/ATAPI/MFM/RLL support
#
-# CONFIG_MD is not set
+# CONFIG_IDE is not set
#
-# ATA/IDE/MFM/RLL support
+# SCSI device support
#
-# CONFIG_IDE is not set
+# CONFIG_SCSI is not set
#
-# SCSI support
+# Multi-device support (RAID and LVM)
#
-# CONFIG_SCSI is not set
+# CONFIG_MD is not set
#
# Fusion MPT device support
#
#
-# IEEE 1394 (FireWire) support (EXPERIMENTAL)
+# IEEE 1394 (FireWire) support
#
# CONFIG_IEEE1394 is not set
#
# CONFIG_I2O is not set
+#
+# Macintosh device drivers
+#
+
#
# Networking support
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
# CONFIG_NETLINK_DEV is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
-# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
-
-#
-# IP: Netfilter Configuration
-#
-# CONFIG_IP_NF_CONNTRACK is not set
-# CONFIG_IP_NF_QUEUE is not set
-# CONFIG_IP_NF_IPTABLES is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
# CONFIG_IPV6 is not set
-# CONFIG_XFRM_USER is not set
+# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
-CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
-# CONFIG_LLC is not set
# CONFIG_DECNET is not set
-# CONFIG_BRIDGE is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
+CONFIG_MII=y
# CONFIG_OAKNET is not set
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
#
# Tulip family network device support
#
-# CONFIG_NET_TULIP is not set
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=y
# CONFIG_EEPRO100_PIO is not set
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
+# CONFIG_S2IO is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_TR is not set
#
-# Token Ring devices (depends on LLC=y)
+# Wireless LAN (non-hamradio)
#
-# CONFIG_RCPCI is not set
-# CONFIG_SHAPER is not set
+# CONFIG_NET_RADIO is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-
-#
-# Amateur Radio support
-#
-# CONFIG_HAMRADIO is not set
-
-#
-# IrDA (infrared) support
-#
-# CONFIG_IRDA is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
#
# ISDN subsystem
#
-# CONFIG_ISDN_BOOL is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
+# CONFIG_ISDN is not set
#
-# Old CD-ROM drivers (not SCSI, not IDE)
+# Telephony Support
#
-# CONFIG_CD_NO_IDESCSI is not set
+# CONFIG_PHONE is not set
#
# Input device support
#
-# CONFIG_INPUT is not set
+CONFIG_INPUT=y
#
# Userland interfaces
#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
#
# Input I/O drivers
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
# CONFIG_SERIO is not set
+# CONFIG_SERIO_I8042 is not set
#
# Input Device Drivers
#
-
-#
-# Macintosh device drivers
-#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
#
# Character devices
#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MPSC is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
-
-#
-# I2C support
-#
-CONFIG_I2C=m
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Hardware Sensors Mainboard support
-#
-# CONFIG_I2C_ALI15X3 is not set
-# CONFIG_I2C_AMD756 is not set
-# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_I801 is not set
-# CONFIG_I2C_PIIX4 is not set
-# CONFIG_I2C_SIS96X is not set
-# CONFIG_I2C_VIAPRO is not set
-
-#
-# I2C Hardware Sensors Chip support
-#
-# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM85 is not set
-# CONFIG_SENSORS_VIA686A is not set
-# CONFIG_SENSORS_W83781D is not set
-# CONFIG_I2C_SENSOR is not set
-
-#
-# Mice
-#
-# CONFIG_BUSMOUSE is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_QIC02_TAPE is not set
#
# CONFIG_AGP is not set
# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
-# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Misc devices
+#
#
# Multimedia devices
#
# CONFIG_DVB is not set
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
#
# File systems
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
CONFIG_DEVFS_FS=y
# CONFIG_DEVFS_MOUNT is not set
# CONFIG_DEVFS_DEBUG is not set
-CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
#
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
# CONFIG_NFSD is not set
CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_GSS is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_MSDOS_PARTITION=y
#
-# Sound
+# Native Language Support
#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-# CONFIG_USB_GADGET is not set
-
-#
-# Bluetooth support
-#
-# CONFIG_BT is not set
+# CONFIG_NLS is not set
#
# Library routines
#
-# CONFIG_CRC32 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
#
# Kernel hacking
#
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_KALLSYMS is not set
# CONFIG_SERIAL_TEXT_DEBUG is not set
+CONFIG_PPC_OCP=y
#
# Security options
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_MMU=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_HAVE_DEC_LOCK=y
-CONFIG_PPC=y
-CONFIG_PPC32=y
-CONFIG_GENERIC_NVRAM=y
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
-CONFIG_BROKEN_ON_SMP=y
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-CONFIG_MODVERSIONS=y
-CONFIG_KMOD=y
-#
-# Processor
-#
-CONFIG_6xx=y
-# CONFIG_40x is not set
-# CONFIG_44x is not set
-# CONFIG_POWER3 is not set
-# CONFIG_POWER4 is not set
-# CONFIG_8xx is not set
-# CONFIG_E500 is not set
-# CONFIG_ALTIVEC is not set
-# CONFIG_TAU is not set
-# CONFIG_CPU_FREQ is not set
-CONFIG_FSL_OCP=y
-CONFIG_PPC_STD_MMU=y
-#
-# Platform options
-#
-# CONFIG_PPC_MULTIPLATFORM is not set
-# CONFIG_APUS is not set
-# CONFIG_WILLOW is not set
-# CONFIG_PCORE is not set
-# CONFIG_POWERPMC250 is not set
-# CONFIG_EV64260 is not set
-# CONFIG_SPRUCE is not set
-# CONFIG_LOPEC is not set
-# CONFIG_MCPN765 is not set
-# CONFIG_MVME5100 is not set
-# CONFIG_PPLUS is not set
-# CONFIG_PRPMC750 is not set
-# CONFIG_PRPMC800 is not set
-# CONFIG_SANDPOINT is not set
-# CONFIG_ADIR is not set
-# CONFIG_K2 is not set
-# CONFIG_PAL4 is not set
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
-# CONFIG_SBC82xx is not set
-# CONFIG_SBS8260 is not set
-# CONFIG_RPX6 is not set
-# CONFIG_TQM8260 is not set
-# CONFIG_ADS8272 is not set
-CONFIG_LITE5200=y
-CONFIG_PPC_MPC52xx=y
-# CONFIG_SMP is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_HIGHMEM is not set
-CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 root=/dev/ram0 rw"
-#
-# Bus options
-#
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_PCI=y
-CONFIG_PCI_DOMAINS=y
-# CONFIG_PCI_LEGACY_PROC is not set
-# CONFIG_PCI_NAMES is not set
-#
-# Advanced setup
-#
-CONFIG_ADVANCED_OPTIONS=y
-CONFIG_HIGHMEM_START=0xfe000000
-# CONFIG_LOWMEM_SIZE_BOOL is not set
-CONFIG_LOWMEM_SIZE=0x30000000
-# CONFIG_KERNEL_START_BOOL is not set
-CONFIG_KERNEL_START=0xc0000000
-# CONFIG_TASK_SIZE_BOOL is not set
-CONFIG_TASK_SIZE=0x80000000
-# CONFIG_BOOT_LOAD_BOOL is not set
-CONFIG_BOOT_LOAD=0x00800000
-#
-# Device Drivers
-#
-#
-# Generic Driver Options
-#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-#
-# Plug and Play support
-#
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_LOOP is not set
-# CONFIG_BLK_DEV_SX8 is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-#
-# SCSI device support
-#
-# CONFIG_SCSI is not set
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-#
-# Fusion MPT device support
-#
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-#
-# Macintosh device drivers
-#
-#
-# Networking support
-#
-# CONFIG_NET is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-#
-# ISDN subsystem
-#
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-#
-# Input device support
-#
-CONFIG_INPUT=y
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_EVBUG=y
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_MPC52xx=y
-CONFIG_SERIAL_MPC52xx_CONSOLE=y
-CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-#
-# Misc devices
-#
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-#
-# Digital Video Broadcasting Devices
-#
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-#
-# USB support
-#
-# CONFIG_USB is not set
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_FAT_FS is not set
-# CONFIG_NTFS_FS is not set
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-#
-# Library routines
-#
-# CONFIG_CRC16 is not set
-# CONFIG_CRC32 is not set
-# CONFIG_LIBCRC32C is not set
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-#
-# Kernel hacking
-#
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_DEBUG_SPINLOCK is not set
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_KGDB is not set
-# CONFIG_XMON is not set
-# CONFIG_BDI_SWITCH is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_SERIAL_TEXT_DEBUG=y
-CONFIG_PPC_OCP=y
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_MMU=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_HAVE_DEC_LOCK=y
-CONFIG_PPC=y
-CONFIG_PPC32=y
-CONFIG_GENERIC_NVRAM=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-# CONFIG_IKCONFIG is not set
-CONFIG_EMBEDDED=y
-# CONFIG_KALLSYMS is not set
-CONFIG_FUTEX=y
-# CONFIG_EPOLL is not set
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-
-#
-# Loadable module support
-#
-# CONFIG_MODULES is not set
-
-#
-# Processor
-#
-CONFIG_6xx=y
-# CONFIG_40x is not set
-# CONFIG_44x is not set
-# CONFIG_POWER3 is not set
-# CONFIG_POWER4 is not set
-# CONFIG_8xx is not set
-# CONFIG_E500 is not set
-# CONFIG_CPU_FREQ is not set
-CONFIG_EMBEDDEDBOOT=y
-CONFIG_PPC_STD_MMU=y
-
-#
-# Platform options
-#
-# CONFIG_PPC_MULTIPLATFORM is not set
-# CONFIG_APUS is not set
-# CONFIG_WILLOW is not set
-# CONFIG_PCORE is not set
-# CONFIG_POWERPMC250 is not set
-# CONFIG_EV64260 is not set
-# CONFIG_SPRUCE is not set
-# CONFIG_LOPEC is not set
-# CONFIG_MCPN765 is not set
-# CONFIG_MVME5100 is not set
-# CONFIG_PPLUS is not set
-# CONFIG_PRPMC750 is not set
-# CONFIG_PRPMC800 is not set
-# CONFIG_SANDPOINT is not set
-# CONFIG_ADIR is not set
-# CONFIG_K2 is not set
-# CONFIG_PAL4 is not set
-# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
-# CONFIG_SBC82xx is not set
-# CONFIG_SBS8260 is not set
-CONFIG_RPX8260=y
-# CONFIG_TQM8260 is not set
-# CONFIG_ADS8272 is not set
-CONFIG_8260=y
-CONFIG_CPM2=y
-# CONFIG_PC_KEYBOARD is not set
-# CONFIG_SMP is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_HIGHMEM is not set
-CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_CMDLINE_BOOL is not set
-
-#
-# Bus options
-#
-# CONFIG_PCI is not set
-# CONFIG_PCI_DOMAINS is not set
-
-#
-# Advanced setup
-#
-# CONFIG_ADVANCED_OPTIONS is not set
-
-#
-# Default settings for advanced configuration options are used
-#
-CONFIG_HIGHMEM_START=0xfe000000
-CONFIG_LOWMEM_SIZE=0x30000000
-CONFIG_KERNEL_START=0xc0000000
-CONFIG_TASK_SIZE=0x80000000
-CONFIG_BOOT_LOAD=0x00400000
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-
-#
-# Memory Technology Devices (MTD)
-#
-# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_SCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-
-#
-# Macintosh device drivers
-#
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
-# CONFIG_OAKNET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-# CONFIG_INPUT is not set
-
-#
-# Userland interfaces
-#
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-# CONFIG_SERIO is not set
-# CONFIG_SERIO_I8042 is not set
-
-#
-# Input Device Drivers
-#
-
-#
-# Character devices
-#
-# CONFIG_VT is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_CPM=y
-CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_SERIAL_CPM_SCC1 is not set
-# CONFIG_SERIAL_CPM_SCC2 is not set
-# CONFIG_SERIAL_CPM_SCC3 is not set
-# CONFIG_SERIAL_CPM_SCC4 is not set
-CONFIG_SERIAL_CPM_SMC1=y
-# CONFIG_SERIAL_CPM_SMC2 is not set
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-# CONFIG_EXT3_FS_POSIX_ACL is not set
-# CONFIG_EXT3_FS_SECURITY is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-# CONFIG_EXPORTFS is not set
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-
-#
-# Native Language Support
-#
-# CONFIG_NLS is not set
-# CONFIG_SCC_ENET is not set
-CONFIG_FEC_ENET=y
-# CONFIG_USE_MDIO is not set
-
-#
-# CPM2 Options
-#
-# CONFIG_FCC1_ENET is not set
-# CONFIG_FCC2_ENET is not set
-CONFIG_FCC3_ENET=y
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC32 is not set
-# CONFIG_LIBCRC32C is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_KGDB_CONSOLE is not set
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
#
# Non-8250 serial port support
#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_PMACZILOG=y
+# CONFIG_SERIAL_CORE is not set
+# CONFIG_SERIAL_PMACZILOG is not set
# CONFIG_SERIAL_PMACZILOG_CONSOLE is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
# Makefile for the linux kernel.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+ifdef CONFIG_4xx
+EXTRA_AFLAGS := -Wa,-m405
+endif
+ifdef CONFIG_E500
+EXTRA_AFLAGS := -Wa,-me500
+endif
+
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
/* All of the bits we have to set.....
*/
- ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK
-BEGIN_FTR_SECTION
- ori r11,r11,HID0_BTIC
-END_FTR_SECTION_IFCLR(CPU_FTR_NO_BTIC)
+ ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
#endif
/* We need to mark all pages as being coherent if we're SMP or we
- * have a 754x and an MPC107 host bridge.
- */
+ * have a 754x and an MPC107 host bridge. */
#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
#else
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_L3_DISABLE_NAP | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_L3_DISABLE_NAP,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
- CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_L3_DISABLE_NAP | CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_L3_DISABLE_NAP | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
- COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- 32, 32,
- __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.0 */
- 0xffffffff, 0x80020100, "7447/7457",
- CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
- COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- 32, 32,
- __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.1 */
- 0xffffffff, 0x80020101, "7447/7457",
- CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
- CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
+ CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
},
- { /* 7447/7457 Rev 1.2 and later */
- 0xffff0000, 0x80020000, "7447/7457",
+ { /* 7457 */
+ 0xffff0000, 0x80020000, "7457",
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
+ CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
32, 32,
__setup_cpu_603
},
- { /* All G2_LE (603e core, plus some) have the same pvr */
- 0x7fff0000, 0x00820000, "G2_LE",
+ { /* 8280 is a G2_LE (603e core, plus some) */
+ 0x7fff0000, 0x00820000, "8280",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB |
CPU_FTR_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC,
no_page:
return NULL;
}
-EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(__dma_free_coherent);
+EXPORT_SYMBOL(dma_free_coherent);
/*
* Initialise the consistent memory allocation.
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
- mtspr SPRN_RPA,r1
+ mtspr RPA,r1
mfspr r3,IMISS
tlbli r3
mfspr r3,SRR1 /* Need to restore CR0 */
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
- mtspr SPRN_RPA,r1
+ mtspr RPA,r1
mfspr r3,DMISS
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
li r1,0xe15 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? 2: 0 */
- mtspr SPRN_RPA,r1
+ mtspr RPA,r1
mfspr r3,DMISS
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+ ori r3,r3,PPC44x_TLB_TS /* Translation state 1 */
+
+ li r0,1 /* TLB slot 1 */
+
+ tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+ tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
+ tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
/* Force context change */
isync
#endif /* CONFIG_SERIAL_TEXT_DEBUG */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
- mfspr r6,SPRN_SPEFSCR
- stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
cache_bitmask |= (1<<i);
return (void *)(&malloc_cache[i]);
}
- return NULL;
+ return 0;
}
void irq_kfree(void *ptr)
if (!shared) {
desc->depth = 0;
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
- if (desc->handler) {
- if (desc->handler->startup)
- desc->handler->startup(irq);
- else if (desc->handler->enable)
- desc->handler->enable(irq);
- }
+ unmask_irq(irq);
}
spin_unlock_irqrestore(&desc->lock,flags);
int i;
/* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", NULL);
+ root_irq_dir = proc_mkdir("irq", 0);
/* create /proc/irq/prof_cpu_mask */
entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* 268 reserved for sys_kexec_load */
- .long sys_ioprio_set
- .long sys_ioprio_get
--- /dev/null
+/*
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ *
+ *
+ * Dynamic DMA mapping support.
+ *
+ * swiped from i386
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC;
+
+ if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
+ gfp |= GFP_DMA;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ ret = consistent_alloc(gfp, size, dma_handle);
+#else
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+#endif
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ *dma_handle = virt_to_bus(ret);
+#endif
+ }
+ return ret;
+}
+
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ consistent_free(vaddr);
+#else
+ free_pages((unsigned long)vaddr, get_order(size));
+#endif
+}
struct pci_dev* dev;
unsigned int *class_code, *reg;
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
- reg = (unsigned int *)get_property(node, "reg", NULL);
+ reg = (unsigned int *)get_property(node, "reg", 0);
if (!reg)
continue;
dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
continue;
make_one_node_map(node, hose->first_busno);
}
- of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
+ of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", 0);
if (of_prop_map)
memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
#ifdef DEBUG
* a fake root for all functions of a multi-function device,
* we go down them as well.
*/
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
strcmp(node->name, "multifunc-device"))
unsigned int *reg;
u8* fdata = (u8*)data;
- reg = (unsigned int *) get_property(node, "reg", NULL);
+ reg = (unsigned int *) get_property(node, "reg", 0);
if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
&& ((reg[0] >> 16) & 0xff) == fdata[0])
return 1;
if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
find_OF_pci_device_filter, (void *)node))
return -ENODEV;
- reg = (unsigned int *) get_property(node, "reg", NULL);
+ reg = (unsigned int *) get_property(node, "reg", 0);
if (!reg)
return -ENODEV;
*bus = (reg[0] >> 16) & 0xff;
} else {
/* error condition */
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
*buf = 0;
return buf;
}
} else {
/* error condition */
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return mem;
}
} else {
/* error condition */
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return (numChars);
}
#include <linux/ctype.h>
#include <linux/threads.h>
#include <linux/smp_lock.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <asm/system.h>
#include <asm/reg.h>
-static int ppc_htab_show(struct seq_file *m, void *v);
+static ssize_t ppc_htab_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos);
static ssize_t ppc_htab_write(struct file * file, const char __user * buffer,
size_t count, loff_t *ppos);
+static long long ppc_htab_lseek(struct file * file, loff_t offset, int orig);
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern unsigned int primary_pteg_full;
extern unsigned int htab_hash_searches;
-static int ppc_htab_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_htab_show, NULL);
-}
-
struct file_operations ppc_htab_operations = {
- .open = ppc_htab_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_htab_write,
- .release = single_release,
+ .llseek = ppc_htab_lseek,
+ .read = ppc_htab_read,
+ .write = ppc_htab_write,
};
static char *pmc1_lookup(unsigned long mmcr0)
* is _REALLY_ slow (see the nested for loops below) but nothing
* in here should be really timing critical. -- Cort
*/
-static int ppc_htab_show(struct seq_file *m, void *v)
+static ssize_t ppc_htab_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
{
unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0;
+ int n = 0;
#if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE)
unsigned int kptes = 0, uptes = 0;
PTE *ptr;
#endif /* CONFIG_PPC_STD_MMU */
+ char buffer[512];
+
+ if (count < 0)
+ return -EINVAL;
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_604_PERF_MON) {
mmcr0 = mfspr(SPRN_MMCR0);
pmc1 = mfspr(SPRN_PMC1);
pmc2 = mfspr(SPRN_PMC2);
- seq_printf(m,
+ n += sprintf( buffer + n,
"604 Performance Monitoring\n"
"MMCR0\t\t: %08lx %s%s ",
mmcr0,
( mmcr0>>28 & 0x2 ) ? "(user mode counted)" : "",
( mmcr0>>28 & 0x4 ) ? "(kernel mode counted)" : "");
- seq_printf(m,
+ n += sprintf( buffer + n,
"\nPMC1\t\t: %08lx (%s)\n"
"PMC2\t\t: %08lx (%s)\n",
pmc1, pmc1_lookup(mmcr0),
#ifdef CONFIG_PPC_STD_MMU
/* if we don't have a htab */
- if ( Hash_size == 0 ) {
- seq_printf(m, "No Hash Table used\n");
- return 0;
+ if ( Hash_size == 0 )
+ {
+ n += sprintf( buffer + n, "No Hash Table used\n");
+ goto return_string;
}
#ifndef CONFIG_PPC64BRIDGE
}
#endif
- seq_printf(m,
+ n += sprintf( buffer + n,
"PTE Hash Table Information\n"
"Size\t\t: %luKb\n"
"Buckets\t\t: %lu\n"
#endif
);
- seq_printf(m,
+ n += sprintf( buffer + n,
"Reloads\t\t: %lu\n"
"Preloads\t: %lu\n"
"Searches\t: %u\n"
"Evicts\t\t: %lu\n",
htab_reloads, htab_preloads, htab_hash_searches,
primary_pteg_full, htab_evicts);
+return_string:
#endif /* CONFIG_PPC_STD_MMU */
- seq_printf(m,
+ n += sprintf( buffer + n,
"Non-error misses: %lu\n"
"Error misses\t: %lu\n",
pte_misses, pte_errors);
- return 0;
+ if (*ppos >= strlen(buffer))
+ return 0;
+ if (n > strlen(buffer) - *ppos)
+ n = strlen(buffer) - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user(buf, buffer + *ppos, n))
+ return -EFAULT;
+ *ppos += n;
+ return n;
}
/*
unsigned long tmp;
char buffer[16];
- if (!capable(CAP_SYS_ADMIN))
+ if ( current->uid != 0 )
return -EACCES;
if (strncpy_from_user(buffer, ubuffer, 15))
return -EFAULT;
#endif /* CONFIG_PPC_STD_MMU */
}
+
+static long long
+ppc_htab_lseek(struct file * file, loff_t offset, int orig)
+{
+ long long ret = -EINVAL;
+
+ lock_kernel();
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ break;
+ case 1:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ }
+ unlock_kernel();
+ return ret;
+}
+
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer_arg, size_t *lenp, loff_t *ppos)
+ void __user *buffer_arg, size_t *lenp)
{
int vleft, first=1, len, left, val;
char __user *buffer = (char __user *) buffer_arg;
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR))
return -EFAULT;
- if ( /*!table->maxlen ||*/ (*ppos && !write)) {
+ if ( /*!table->maxlen ||*/ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
}
if (!write && !first && left) {
- if(put_user('\n', (char __user *) buffer))
+ if(put_user('\n', (char *) buffer))
return -EFAULT;
left--, buffer++;
}
if (write) {
- char __user *s = (char __user *) buffer;
+ p = (char *) buffer;
while (left) {
char c;
- if(get_user(c, s++))
+ if(get_user(c, p++))
return -EFAULT;
if (!isspace(c))
break;
if (write && first)
return -EINVAL;
*lenp -= left;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
}
regs->gpr[1] = sp;
regs->msr = MSR_USER;
if (last_task_used_math == current)
- last_task_used_math = NULL;
+ last_task_used_math = 0;
if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
+ last_task_used_altivec = 0;
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
#endif
else
val = __unpack_fe01(tsk->thread.fpexc_mode);
- return put_user(val, (unsigned int __user *) adr);
+ return put_user(val, (unsigned int *) adr);
}
int sys_clone(unsigned long clone_flags, unsigned long usp,
/*
* Get contents of AltiVec register state in task TASK
*/
-static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
+static inline int get_vrregs(unsigned long *data, struct task_struct *task)
{
int i, j;
/*
* Write contents of AltiVec register state into task TASK.
*/
-static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
+static inline int set_vrregs(struct task_struct *task, unsigned long *data)
{
int i, j;
ret = -EIO;
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp,(unsigned long *) data);
break;
}
preempt_enable();
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
}
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp,(unsigned long *) data);
break;
}
if (child->thread.regs->msr & MSR_VEC)
giveup_altivec(child);
preempt_enable();
- ret = get_vrregs((unsigned long __user *)data, child);
+ ret = get_vrregs((unsigned long *)data, child);
break;
case PTRACE_SETVRREGS:
if (child->thread.regs->msr & MSR_VEC)
giveup_altivec(child);
preempt_enable();
- ret = set_vrregs(child, (unsigned long __user *)data);
+ ret = set_vrregs(child, (unsigned long *)data);
break;
#endif
#ifdef CONFIG_SPE
/* Get the child spe register state. */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
- ret = get_evrregs((unsigned long __user *)data, child);
+ ret = get_evrregs((unsigned long *)data, child);
break;
case PTRACE_SETEVRREGS:
* of register state from memory */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
- ret = set_evrregs(child, (unsigned long __user *)data);
+ ret = set_evrregs(child, (unsigned long *)data);
break;
#endif
}
__setup("l2cr=", ppc_setup_l2cr);
-#ifdef CONFIG_GENERIC_NVRAM
+#ifdef CONFIG_NVRAM
/* Generic nvram hooks used by drivers/char/gen_nvram.c */
unsigned char nvram_read_byte(int addr)
#ifdef CONFIG_XMON
xmon_map_scc();
if (strstr(cmd_line, "xmon"))
- xmon(NULL);
+ xmon(0);
#endif /* CONFIG_XMON */
if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
* altivec/spe instructions at some point.
*/
static int
-save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
+save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
{
/* save general and floating-point registers */
CHECK_FULL_REGS(regs);
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
- if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
+ if (__put_user(current->thread.vrsave, (u32 *)&frame->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
memset(¤t->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
+ if (__get_user(current->thread.vrsave, (u32 *)&sr->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
{
sigset_t set;
- struct mcontext __user *mcp;
+ struct mcontext *mcp;
if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
|| __get_user(mcp, &ucp->uc_regs))
if (new_ctx == NULL)
return 0;
if (verify_area(VERIFY_READ, new_ctx, sizeof(*new_ctx))
- || __get_user(tmp, (u8 __user *) new_ctx)
- || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
+ || __get_user(tmp, (u8 *) new_ctx)
+ || __get_user(tmp, (u8 *) (new_ctx + 1) - 1))
return -EFAULT;
/*
/* create a stack frame for the caller of the handler */
newsp -= __SIGNAL_FRAMESIZE;
- if (verify_area(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
+ if (verify_area(VERIFY_WRITE, (void *) newsp, origsp - newsp))
goto badframe;
#if _NSIG != 64
set.sig[1] = sigctx._unused[3];
restore_sigmask(&set);
- sr = (struct mcontext __user *) sigctx.regs;
+ sr = (struct mcontext *) sigctx.regs;
if (verify_area(VERIFY_READ, sr, sizeof(*sr))
|| restore_user_regs(regs, sr, 1))
goto badframe;
break;
case SEMTIMEDOP:
ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, (const struct timespec __user *) fifth);
+ second, (const struct timespec *) fifth);
break;
case SEMGET:
ret = sys_semget (first, second, third);
if (!ptr)
break;
if ((ret = verify_area (VERIFY_READ, ptr, sizeof(long)))
- || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
+ || (ret = get_user(fourth.__pad, (void *__user *)ptr)))
break;
ret = sys_semctl (first, second, third, fourth);
break;
* sys_select() with the appropriate args. -- Cort
*/
int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
+ppc_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
{
if ( (unsigned long)n >= 4096 )
{
unsigned long __user *buffer = (unsigned long __user *)n;
if (verify_area(VERIFY_READ, buffer, 5*sizeof(unsigned long))
|| __get_user(n, buffer)
- || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
- || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
- || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
- || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
+ || __get_user(inp, ((fd_set **)(buffer+1)))
+ || __get_user(outp, ((fd_set **)(buffer+2)))
+ || __get_user(exp, ((fd_set **)(buffer+3)))
+ || __get_user(tvp, ((struct timeval **)(buffer+4))))
return -EFAULT;
}
return sys_select(n, inp, outp, exp, tvp);
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
- info.si_addr = (void __user *) addr;
+ info.si_addr = (void *) addr;
force_sig_info(signr, &info, current);
}
unsigned int va, vb, vc, vd;
vector128 *vrs;
- if (get_user(instr, (unsigned int __user *) regs->nip))
+ if (get_user(instr, (unsigned int *) regs->nip))
return -EFAULT;
if ((instr >> 26) != 4)
return -EINVAL; /* not an altivec instruction */
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
info->alignment = alignment;
/* Validate size */
if (size <= 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
/* The region must be aligned */
s = (unsigned long)start;
e = e & ~m;
if (assure_empty(info, 1) < 0)
- return ERR_PTR(-ENOMEM);
+ return NULL;
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
/* Perfect fit */
if (bs == s && be == e) {
/* Validate size */
if (size <= 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 1) < 0)
- return ERR_PTR(-ENOMEM);
+ return NULL;
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
/* Just fits */
if (blk->size == size) {
/* Validate size */
if (size <= 0)
- return ERR_PTR(-EINVAL);
+ return NULL;
/* The region must be aligned */
s = (unsigned long)start;
e = e & ~m;
if (assure_empty(info, 2) < 0)
- return ERR_PTR(-ENOMEM);
+ return NULL;
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return ERR_PTR(-ENOMEM);
+ return NULL;
/* Perfect fit */
if (bs == s && be == e) {
}
/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ * Configure PPC44x TLB for AS0 exception processing.
*/
-void __init MMU_init_hw(void)
-{
- flush_instruction_cache();
-}
-
-unsigned long __init mmu_mapin_ram(void)
+static void __init
+ppc44x_tlb_config(void)
{
unsigned int pinned_tlbs = 1;
int i;
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
}
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+ flush_instruction_cache();
+
+ ppc44x_tlb_config();
+}
+
+/* TODO: Add large page lowmem mapping support */
+unsigned long __init mmu_mapin_ram(void)
+{
+ unsigned long v, s, f = _PAGE_GUARDED;
+ phys_addr_t p;
+
+ v = KERNELBASE;
+ p = PPC_MEMSTART;
+
+ for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
+ if ((char *) v >= _stext && (char *) v < etext)
+ f |= _PAGE_RAM_TEXT;
+ else
+ f |= _PAGE_RAM;
+ map_page(v, p, f);
+ v += PAGE_SIZE;
+ p += PAGE_SIZE;
+ }
+
+ if (ppc_md.progress)
+ ppc_md.progress("MMU:mmu_mapin_ram done", 0x401);
- return total_lowmem;
+ return s;
}
# Makefile for the linux ppc-specific parts of the memory manager.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+
obj-y := fault.o init.o mem_pieces.o \
mmu_context.o pgtable.o
{
unsigned int inst;
- if (get_user(inst, (unsigned int __user *)regs->nip))
+ if (get_user(inst, (unsigned int *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = code;
- info.si_addr = (void __user *) address;
+ info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, current);
return 0;
}
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
+ info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
return SIGBUS;
depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
default y
-config PPC4xx_DMA
- bool "PPC4xx DMA controller support"
- depends on 4xx
-
-config PPC4xx_EDMA
- bool
- depends on !STB03xxx && PPC4xx_DMA
- default y
-
config PM
bool "Power Management support (EXPERIMENTAL)"
depends on 4xx && EXPERIMENTAL
#define UART0_IO_BASE (u8 *) 0xE0000200
#define UART1_IO_BASE (u8 *) 0xE0000300
-/* external Epson SG-615P */
-#define BASE_BAUD 691200
+#define BASE_BAUD 33000000/3/16
#define STD_UART_OP(num) \
{ 0, BASE_BAUD, 0, UART##num##_INT, \
default MPC8540_ADS
config MPC8540_ADS
- bool "Freescale MPC8540 ADS"
+ bool "MPC8540ADS"
help
This option enables support for the MPC 8540 ADS evaluation board.
-config MPC8555_CDS
- bool "Freescale MPC8555 CDS"
- help
- This option enablese support for the MPC8555 CDS evaluation board.
-
-config MPC8560_ADS
- bool "Freescale MPC8560 ADS"
- help
- This option enables support for the MPC 8560 ADS evaluation board.
-
config SBC8560
bool "WindRiver PowerQUICC III SBC8560"
help
depends on MPC8540_ADS
default y
-config MPC8555
- bool
- depends on MPC8555_CDS
- default y
-
config MPC8560
bool
- depends on SBC8560 || MPC8560_ADS
- default y
-
-config 85xx_PCI2
- bool "Supprt for 2nd PCI host controller"
- depends on MPC8555_CDS
+ depends on SBC8560
default y
config FSL_OCP
config PPC_GEN550
bool
- depends on MPC8540 || SBC8560 || MPC8555
+ depends on MPC8540 || SBC8560
default y
endmenu
#
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads_common.o mpc8540_ads.o
-obj-$(CONFIG_MPC8555_CDS) += mpc85xx_cds_common.o
-obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads_common.o mpc8560_ads.o
obj-$(CONFIG_SBC8560) += sbc85xx.o sbc8560.o
obj-$(CONFIG_MPC8540) += mpc8540.o
-obj-$(CONFIG_MPC8555) += mpc8555.o
obj-$(CONFIG_MPC8560) += mpc8560.o
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
-#include <linux/initrd.h>
#include <linux/module.h>
#include <asm/system.h>
#define __MACH_MPC8540ADS_H__
#include <linux/config.h>
+#include <linux/serial.h>
#include <linux/initrd.h>
#include <syslib/ppc85xx_setup.h>
#include <platforms/85xx/mpc85xx_ads_common.h>
+++ /dev/null
-/*
- * arch/ppc/platform/85xx/mpc8555.c
- *
- * MPC8555 I/O descriptions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <asm/mpc85xx.h>
-#include <asm/ocp.h>
-
-/* These should be defined in platform code */
-extern struct ocp_gfar_data mpc85xx_tsec1_def;
-extern struct ocp_gfar_data mpc85xx_tsec2_def;
-extern struct ocp_mpc_i2c_data mpc85xx_i2c1_def;
-
-/* We use offsets for paddr since we do not know at compile time
- * what CCSRBAR is, platform code should fix this up in
- * setup_arch
- *
- * Only the first IRQ is given even if a device has
- * multiple lines associated with ita
- */
-struct ocp_def core_ocp[] = {
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_IIC,
- .index = 0,
- .paddr = MPC85xx_IIC1_OFFSET,
- .irq = MPC85xx_IRQ_IIC1,
- .pm = OCP_CPM_NA,
- .additions = &mpc85xx_i2c1_def,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_16550,
- .index = 0,
- .paddr = MPC85xx_UART0_OFFSET,
- .irq = MPC85xx_IRQ_DUART,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_16550,
- .index = 1,
- .paddr = MPC85xx_UART1_OFFSET,
- .irq = MPC85xx_IRQ_DUART,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_GFAR,
- .index = 0,
- .paddr = MPC85xx_ENET1_OFFSET,
- .irq = MPC85xx_IRQ_TSEC1_TX,
- .pm = OCP_CPM_NA,
- .additions = &mpc85xx_tsec1_def,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_GFAR,
- .index = 1,
- .paddr = MPC85xx_ENET2_OFFSET,
- .irq = MPC85xx_IRQ_TSEC2_TX,
- .pm = OCP_CPM_NA,
- .additions = &mpc85xx_tsec2_def,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_DMA,
- .index = 0,
- .paddr = MPC85xx_DMA_OFFSET,
- .irq = MPC85xx_IRQ_DMA0,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_PERFMON,
- .index = 0,
- .paddr = MPC85xx_PERFMON_OFFSET,
- .irq = MPC85xx_IRQ_PERFMON,
- .pm = OCP_CPM_NA,
- },
- { .vendor = OCP_VENDOR_INVALID
- }
-};
+++ /dev/null
-/*
- * arch/ppc/platforms/mpc8555_cds.h
- *
- * MPC8555CDS board definitions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MACH_MPC8555CDS_H__
-#define __MACH_MPC8555CDS_H__
-
-#include <linux/config.h>
-#include <linux/serial.h>
-#include <platforms/85xx/mpc85xx_cds_common.h>
-
-#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
-
-#endif /* __MACH_MPC8555CDS_H__ */
+++ /dev/null
-/*
- * arch/ppc/platforms/85xx/mpc8560_ads.c
- *
- * MPC8560ADS board specific routines
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/serial.h>
-#include <linux/tty.h> /* for linux/serial_core.h */
-#include <linux/serial_core.h>
-#include <linux/initrd.h>
-#include <linux/module.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/atomic.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/open_pic.h>
-#include <asm/bootinfo.h>
-#include <asm/pci-bridge.h>
-#include <asm/mpc85xx.h>
-#include <asm/irq.h>
-#include <asm/immap_85xx.h>
-#include <asm/kgdb.h>
-#include <asm/ocp.h>
-#include <asm/cpm2.h>
-#include <mm/mmu_decl.h>
-
-#include <syslib/cpm2_pic.h>
-#include <syslib/ppc85xx_common.h>
-#include <syslib/ppc85xx_setup.h>
-
-extern void cpm2_reset(void);
-
-struct ocp_gfar_data mpc85xx_tsec1_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC1_TX,
- .interruptError = MPC85xx_IRQ_TSEC1_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC1_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR
- | GFAR_HAS_RMON | GFAR_HAS_COALESCE
- | GFAR_HAS_PHY_INTR),
- .phyid = 0,
- .phyregidx = 0,
-};
-
-struct ocp_gfar_data mpc85xx_tsec2_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC2_TX,
- .interruptError = MPC85xx_IRQ_TSEC2_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC2_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR
- | GFAR_HAS_RMON | GFAR_HAS_COALESCE
- | GFAR_HAS_PHY_INTR),
- .phyid = 1,
- .phyregidx = 0,
-};
-
-struct ocp_fs_i2c_data mpc85xx_i2c1_def = {
- .flags = FS_I2C_SEPARATE_DFSRR,
-};
-
-/* ************************************************************************
- *
- * Setup the architecture
- *
- */
-
-static void __init
-mpc8560ads_setup_arch(void)
-{
- struct ocp_def *def;
- struct ocp_gfar_data *einfo;
- bd_t *binfo = (bd_t *) __res;
- unsigned int freq;
-
- cpm2_reset();
-
- /* get the core frequency */
- freq = binfo->bi_intfreq;
-
- if (ppc_md.progress)
- ppc_md.progress("mpc8560ads_setup_arch()", 0);
-
- /* Set loops_per_jiffy to a half-way reasonable value,
- for use until calibrate_delay gets called. */
- loops_per_jiffy = freq / HZ;
-
-#ifdef CONFIG_PCI
- /* setup PCI host bridges */
- mpc85xx_setup_hose();
-#endif
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6);
- }
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- ROOT_DEV = Root_RAM0;
- else
-#endif
-#ifdef CONFIG_ROOT_NFS
- ROOT_DEV = Root_NFS;
-#else
- ROOT_DEV = Root_HDA1;
-#endif
-
- ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base));
-}
-
-static irqreturn_t cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs)
-{
- while ((irq = cpm2_get_irq(regs)) >= 0) {
- ppc_irq_dispatch_handler(regs, irq);
- }
- return IRQ_HANDLED;
-}
-
-static void __init
-mpc8560_ads_init_IRQ(void)
-{
- int i;
- volatile cpm2_map_t *immap = cpm2_immr;
-
- /* Setup OpenPIC */
- mpc85xx_ads_init_IRQ();
-
- /* disable all CPM interupts */
- immap->im_intctl.ic_simrh = 0x0;
- immap->im_intctl.ic_simrl = 0x0;
-
- for (i = CPM_IRQ_OFFSET; i < (NR_CPM_INTS + CPM_IRQ_OFFSET); i++)
- irq_desc[i].handler = &cpm2_pic;
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- immap->im_intctl.ic_sicr = 0;
- immap->im_intctl.ic_scprrh = 0x05309770;
- immap->im_intctl.ic_scprrl = 0x05309770;
-
- request_irq(MPC85xx_IRQ_CPM, cpm2_cascade, SA_INTERRUPT, "cpm2_cascade", NULL);
-
- return;
-}
-
-
-
-/* ************************************************************************ */
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* parse_bootinfo must always be called first */
- parse_bootinfo(find_bootinfo());
-
- /*
- * If we were passed in a board information, copy it into the
- * residual data area.
- */
- if (r3) {
- memcpy((void *) __res, (void *) (r3 + KERNELBASE),
- sizeof (bd_t));
-
- }
-#if defined(CONFIG_BLK_DEV_INITRD)
- /*
- * If the init RAM disk has been configured in, and there's a valid
- * starting address for it, set it up.
- */
- if (r4) {
- initrd_start = r4 + KERNELBASE;
- initrd_end = r5 + KERNELBASE;
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-
- /* Copy the kernel command line arguments to a safe place. */
-
- if (r6) {
- *(char *) (r7 + KERNELBASE) = 0;
- strcpy(cmd_line, (char *) (r6 + KERNELBASE));
- }
-
- /* setup the PowerPC module struct */
- ppc_md.setup_arch = mpc8560ads_setup_arch;
- ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
-
- ppc_md.init_IRQ = mpc8560_ads_init_IRQ;
- ppc_md.get_irq = openpic_get_irq;
-
- ppc_md.restart = mpc85xx_restart;
- ppc_md.power_off = mpc85xx_power_off;
- ppc_md.halt = mpc85xx_halt;
-
- ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
-
- ppc_md.time_init = NULL;
- ppc_md.set_rtc_time = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
-
- if (ppc_md.progress)
- ppc_md.progress("mpc8560ads_init(): exit", 0);
-
- return;
-}
+++ /dev/null
-/*
- * arch/ppc/platforms/mpc8560_ads.h
- *
- * MPC8540ADS board definitions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MACH_MPC8560ADS_H
-#define __MACH_MPC8560ADS_H
-
-#include <linux/config.h>
-#include <syslib/ppc85xx_setup.h>
-#include <platforms/85xx/mpc85xx_ads_common.h>
-
-#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
-#define PHY_INTERRUPT MPC85xx_IRQ_EXT7
-
-#endif /* __MACH_MPC8560ADS_H */
+++ /dev/null
-/*
- * arch/ppc/platform/85xx/mpc85xx_cds_common.c
- *
- * MPC85xx CDS board specific routines
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/serial.h>
-#include <linux/module.h>
-#include <linux/root_dev.h>
-#include <linux/initrd.h>
-#include <linux/tty.h>
-#include <linux/serial_core.h>
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/atomic.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/open_pic.h>
-#include <asm/bootinfo.h>
-#include <asm/pci-bridge.h>
-#include <asm/mpc85xx.h>
-#include <asm/irq.h>
-#include <asm/immap_85xx.h>
-#include <asm/immap_cpm2.h>
-#include <asm/ocp.h>
-#include <asm/kgdb.h>
-
-#include <mm/mmu_decl.h>
-#include <syslib/cpm2_pic.h>
-#include <syslib/ppc85xx_common.h>
-#include <syslib/ppc85xx_setup.h>
-
-
-#ifndef CONFIG_PCI
-unsigned long isa_io_base = 0;
-unsigned long isa_mem_base = 0;
-#endif
-
-extern unsigned long total_memory; /* in mm/init */
-
-unsigned char __res[sizeof (bd_t)];
-
-static int cds_pci_slot = 2;
-static volatile u8 * cadmus;
-
-/* Internal interrupts are all Level Sensitive, and Positive Polarity */
-
-static u_char mpc85xx_cds_openpic_initsenses[] __initdata = {
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
-#if defined(CONFIG_PCI)
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 0: PCI1 slot */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 1: PCI1 slot */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI1 slot */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI1 slot */
-#else
- 0x0, /* External 0: */
- 0x0, /* External 1: */
- 0x0, /* External 2: */
- 0x0, /* External 3: */
-#endif
- 0x0, /* External 4: */
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
- 0x0, /* External 6: */
- 0x0, /* External 7: */
- 0x0, /* External 8: */
- 0x0, /* External 9: */
- 0x0, /* External 10: */
-#if defined(CONFIG_85xx_PCI2) && defined(CONFIG_PCI)
- (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 11: PCI2 slot 0 */
-#else
- 0x0, /* External 11: */
-#endif
-};
-
-struct ocp_gfar_data mpc85xx_tsec1_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC1_TX,
- .interruptError = MPC85xx_IRQ_TSEC1_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC1_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR |
- GFAR_HAS_PHY_INTR),
- .phyid = 0,
- .phyregidx = 0,
-};
-
-struct ocp_gfar_data mpc85xx_tsec2_def = {
- .interruptTransmit = MPC85xx_IRQ_TSEC2_TX,
- .interruptError = MPC85xx_IRQ_TSEC2_ERROR,
- .interruptReceive = MPC85xx_IRQ_TSEC2_RX,
- .interruptPHY = MPC85xx_IRQ_EXT5,
- .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR |
- GFAR_HAS_PHY_INTR),
- .phyid = 1,
- .phyregidx = 0,
-};
-
-struct ocp_fs_i2c_data mpc85xx_i2c1_def = {
- .flags = FS_I2C_SEPARATE_DFSRR,
-};
-
-/* ************************************************************************ */
-int
-mpc85xx_cds_show_cpuinfo(struct seq_file *m)
-{
- uint pvid, svid, phid1;
- uint memsize = total_memory;
- bd_t *binfo = (bd_t *) __res;
- unsigned int freq;
-
- /* get the core frequency */
- freq = binfo->bi_intfreq;
-
- pvid = mfspr(PVR);
- svid = mfspr(SVR);
-
- seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
- seq_printf(m, "Machine\t\t: CDS (%x)\n", cadmus[CM_VER]);
- seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000,
- freq % 1000000);
- seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
- seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-
- /* Display cpu Pll setting */
- phid1 = mfspr(HID1);
- seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-
- /* Display the amount of memory */
- seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
-
- return 0;
-}
-
-#ifdef CONFIG_CPM2
-static void cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs)
-{
- while((irq = cpm2_get_irq(regs)) >= 0)
- {
- ppc_irq_dispatch_handler(regs,irq);
- }
-}
-#endif /* CONFIG_CPM2 */
-
-void __init
-mpc85xx_cds_init_IRQ(void)
-{
- bd_t *binfo = (bd_t *) __res;
-#ifdef CONFIG_CPM2
- volatile cpm2_map_t *immap = cpm2_immr;
- int i;
-#endif
-
- /* Determine the Physical Address of the OpenPIC regs */
- phys_addr_t OpenPIC_PAddr = binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
- OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
- OpenPIC_InitSenses = mpc85xx_cds_openpic_initsenses;
- OpenPIC_NumInitSenses = sizeof (mpc85xx_cds_openpic_initsenses);
-
- /* Skip reserved space and internal sources */
- openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
- /* Map PIC IRQs 0-11 */
- openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000);
-
- /* we let openpic interrupts starting from an offset, to
- * leave space for cascading interrupts underneath.
- */
- openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
-
-#ifdef CONFIG_CPM2
- /* disable all CPM interupts */
- immap->im_intctl.ic_simrh = 0x0;
- immap->im_intctl.ic_simrl = 0x0;
-
- for (i = CPM_IRQ_OFFSET; i < (NR_CPM_INTS + CPM_IRQ_OFFSET); i++)
- irq_desc[i].handler = &cpm2_pic;
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- immap->im_intctl.ic_sicr = 0;
- immap->im_intctl.ic_scprrh = 0x05309770;
- immap->im_intctl.ic_scprrl = 0x05309770;
-
- request_irq(MPC85xx_IRQ_CPM, cpm2_cascade, SA_INTERRUPT, "cpm2_cascade", NULL);
-#endif
-
- return;
-}
-
-#ifdef CONFIG_PCI
-/*
- * interrupt routing
- */
-int
-mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
-{
- struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-
- if (!hose->index)
- {
- /* Handle PCI1 interrupts */
- char pci_irq_table[][4] =
- /*
- * PCI IDSEL/INTPIN->INTLINE
- * A B C D
- */
-
- /* Note IRQ assignment for slots is based on which slot the elysium is
- * in -- in this setup elysium is in slot #2 (this PIRQA as first
- * interrupt on slot */
- {
- { 0, 1, 2, 3 }, /* 16 - PMC */
- { 3, 0, 0, 0 }, /* 17 P2P (Tsi320) */
- { 0, 1, 2, 3 }, /* 18 - Slot 1 */
- { 1, 2, 3, 0 }, /* 19 - Slot 2 */
- { 2, 3, 0, 1 }, /* 20 - Slot 3 */
- { 3, 0, 1, 2 }, /* 21 - Slot 4 */
- };
-
- const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4;
- int i, j;
-
- for (i = 0; i < 6; i++)
- for (j = 0; j < 4; j++)
- pci_irq_table[i][j] =
- ((pci_irq_table[i][j] + 5 -
- cds_pci_slot) & 0x3) + PIRQ0A;
-
- return PCI_IRQ_TABLE_LOOKUP;
- } else {
- /* Handle PCI2 interrupts (if we have one) */
- char pci_irq_table[][4] =
- {
- /*
- * We only have one slot and one interrupt
- * going to PIRQA - PIRQD */
- { PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */
- };
-
- const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4;
-
- return PCI_IRQ_TABLE_LOOKUP;
- }
-}
-
-#define ARCADIA_HOST_BRIDGE_IDSEL 17
-#define ARCADIA_2ND_BRIDGE_IDSEL 3
-
-int
-mpc85xx_exclude_device(u_char bus, u_char devfn)
-{
- if (bus == 0 && PCI_SLOT(devfn) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-#if CONFIG_85xx_PCI2
- /* With the current code we know PCI2 will be bus 2, however this may
- * not be guarnteed */
- if (bus == 2 && PCI_SLOT(devfn) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-#endif
- /* We explicitly do not go past the Tundra 320 Bridge */
- if (bus == 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
- return PCIBIOS_DEVICE_NOT_FOUND;
- else
- return PCIBIOS_SUCCESSFUL;
-}
-#endif /* CONFIG_PCI */
-
-/* ************************************************************************
- *
- * Setup the architecture
- *
- */
-static void __init
-mpc85xx_cds_setup_arch(void)
-{
- struct ocp_def *def;
- struct ocp_gfar_data *einfo;
- bd_t *binfo = (bd_t *) __res;
- unsigned int freq;
-
- /* get the core frequency */
- freq = binfo->bi_intfreq;
-
- printk("mpc85xx_cds_setup_arch\n");
-
-#ifdef CONFIG_CPM2
- cpm2_reset();
-#endif
-
- cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE);
- cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1;
- printk("CDS Version = %x in PCI slot %d\n", cadmus[CM_VER], cds_pci_slot);
-
- /* Set loops_per_jiffy to a half-way reasonable value,
- for use until calibrate_delay gets called. */
- loops_per_jiffy = freq / HZ;
-
-#ifdef CONFIG_PCI
- /* setup PCI host bridges */
- mpc85xx_setup_hose();
-#endif
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
-#ifdef CONFIG_SERIAL_8250
- mpc85xx_early_serial_map();
-#endif
-
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- /* Invalidate the entry we stole earlier the serial ports
- * should be properly mapped */
- invalidate_tlbcam_entry(NUM_TLBCAMS - 1);
-#endif
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6);
- }
-
- def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1);
- if (def) {
- einfo = (struct ocp_gfar_data *) def->additions;
- memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- ROOT_DEV = Root_RAM0;
- else
-#endif
-#ifdef CONFIG_ROOT_NFS
- ROOT_DEV = Root_NFS;
-#else
- ROOT_DEV = Root_HDA1;
-#endif
-
- ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base));
-}
-
-/* ************************************************************************ */
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* parse_bootinfo must always be called first */
- parse_bootinfo(find_bootinfo());
-
- /*
- * If we were passed in a board information, copy it into the
- * residual data area.
- */
- if (r3) {
- memcpy((void *) __res, (void *) (r3 + KERNELBASE),
- sizeof (bd_t));
-
- }
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- {
- bd_t *binfo = (bd_t *) __res;
-
- /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
- settlbcam(NUM_TLBCAMS - 1, binfo->bi_immr_base,
- binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
-
- }
-#endif
-
-#if defined(CONFIG_BLK_DEV_INITRD)
- /*
- * If the init RAM disk has been configured in, and there's a valid
- * starting address for it, set it up.
- */
- if (r4) {
- initrd_start = r4 + KERNELBASE;
- initrd_end = r5 + KERNELBASE;
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-
- /* Copy the kernel command line arguments to a safe place. */
-
- if (r6) {
- *(char *) (r7 + KERNELBASE) = 0;
- strcpy(cmd_line, (char *) (r6 + KERNELBASE));
- }
-
- /* setup the PowerPC module struct */
- ppc_md.setup_arch = mpc85xx_cds_setup_arch;
- ppc_md.show_cpuinfo = mpc85xx_cds_show_cpuinfo;
-
- ppc_md.init_IRQ = mpc85xx_cds_init_IRQ;
- ppc_md.get_irq = openpic_get_irq;
-
- ppc_md.restart = mpc85xx_restart;
- ppc_md.power_off = mpc85xx_power_off;
- ppc_md.halt = mpc85xx_halt;
-
- ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
-
- ppc_md.time_init = NULL;
- ppc_md.set_rtc_time = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
-
-#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
- ppc_md.progress = gen550_progress;
-#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
-
- if (ppc_md.progress)
- ppc_md.progress("mpc85xx_cds_init(): exit", 0);
-
- return;
-}
+++ /dev/null
-/*
- * arch/ppc/platforms/85xx/mpc85xx_cds_common.h
- *
- * MPC85xx CDS board definitions
- *
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
- *
- * Copyright 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MACH_MPC85XX_CDS_H__
-#define __MACH_MPC85XX_CDS_H__
-
-#include <linux/config.h>
-#include <linux/serial.h>
-#include <asm/ppcboot.h>
-#include <linux/initrd.h>
-#include <syslib/ppc85xx_setup.h>
-
-#define BOARD_CCSRBAR ((uint)0xe0000000)
-#define CCSRBAR_SIZE ((uint)1024*1024)
-
-/* CADMUS info */
-#define CADMUS_BASE (0xf8004000)
-#define CADMUS_SIZE (256)
-#define CM_VER (0)
-#define CM_CSR (1)
-#define CM_RST (2)
-
-/* PCI config */
-#define PCI1_CFG_ADDR_OFFSET (0x8000)
-#define PCI1_CFG_DATA_OFFSET (0x8004)
-
-#define PCI2_CFG_ADDR_OFFSET (0x9000)
-#define PCI2_CFG_DATA_OFFSET (0x9004)
-
-/* PCI interrupt controller */
-#define PIRQ0A MPC85xx_IRQ_EXT0
-#define PIRQ0B MPC85xx_IRQ_EXT1
-#define PIRQ0C MPC85xx_IRQ_EXT2
-#define PIRQ0D MPC85xx_IRQ_EXT3
-#define PIRQ1A MPC85xx_IRQ_EXT11
-
-/* PCI 1 memory map */
-#define MPC85XX_PCI1_LOWER_IO 0x00000000
-#define MPC85XX_PCI1_UPPER_IO 0x00ffffff
-
-#define MPC85XX_PCI1_LOWER_MEM 0x80000000
-#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff
-
-#define MPC85XX_PCI1_IO_BASE 0xe2000000
-#define MPC85XX_PCI1_MEM_OFFSET 0x00000000
-
-#define MPC85XX_PCI1_IO_SIZE 0x01000000
-
-/* PCI 2 memory map */
-#define MPC85XX_PCI2_LOWER_IO 0x01000000
-#define MPC85XX_PCI2_UPPER_IO 0x01ffffff
-
-#define MPC85XX_PCI2_LOWER_MEM 0xa0000000
-#define MPC85XX_PCI2_UPPER_MEM 0xbfffffff
-
-#define MPC85XX_PCI2_IO_BASE 0xe3000000
-#define MPC85XX_PCI2_MEM_OFFSET 0x00000000
-
-#define MPC85XX_PCI2_IO_SIZE 0x01000000
-
-#define SERIAL_PORT_DFNS \
- STD_UART_OP(0) \
- STD_UART_OP(1)
-
-#endif /* __MACH_MPC85XX_CDS_H__ */
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
-#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/initrd.h>
#define __MACH_SBC8560_H__
#include <linux/config.h>
+#include <linux/serial.h>
#include <platforms/85xx/sbc85xx.h>
-
-#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
#ifdef CONFIG_SERIAL_MANY_PORTS
#define RS_TABLE_SIZE 64
# Makefile for the linux kernel.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+ifdef CONFIG_40x
+EXTRA_AFLAGS := -Wa,-m405
+endif
+
# Extra CFLAGS so we don't have to do relative includes
CFLAGS_pmac_setup.o += -Iarch/$(ARCH)/mm
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac_cpufreq.o
endif
obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o
+obj-$(CONFIG_PPC_RTAS) += error_log.o proc_rtas.o
obj-$(CONFIG_PREP_RESIDUAL) += residual.o
obj-$(CONFIG_ADIR) += adir_setup.o adir_pic.o adir_pci.o
obj-$(CONFIG_EST8260) += est8260_setup.o
obj-$(CONFIG_PQ2ADS) += pq2ads_setup.o
obj-$(CONFIG_TQM8260) += tqm8260_setup.o
-obj-$(CONFIG_EV64260) += ev64260_setup.o
+obj-$(CONFIG_EV64260) += ev64260.o
+obj-$(CONFIG_DMV182) += dmv182.o
obj-$(CONFIG_GEMINI) += gemini_pci.o gemini_setup.o gemini_prom.o
obj-$(CONFIG_K2) += k2.o
obj-$(CONFIG_LOPEC) += lopec_setup.o lopec_pci.o
obj-$(CONFIG_PPLUS) += pplus.o
obj-$(CONFIG_PRPMC750) += prpmc750.o
obj-$(CONFIG_PRPMC800) += prpmc800.o
-obj-$(CONFIG_RPX8260) += rpx8260.o
obj-$(CONFIG_SANDPOINT) += sandpoint.o
obj-$(CONFIG_SBC82xx) += sbc82xx.o
obj-$(CONFIG_SPRUCE) += spruce.o
-obj-$(CONFIG_LITE5200) += lite5200.o mpc5200.o
ifeq ($(CONFIG_SMP),y)
obj-$(CONFIG_PPC_PMAC) += pmac_smp.o
--- /dev/null
+/*
+ * arch/ppc/platforms/dmv182p.c
+ * Setup code for the Dy-4 SVME/DMV-182
+ *
+ * Copyright (C) 2004 TimeSys Corporation
+ * Copyright (C) 2004 Red Hat, Inc.
+ *
+ * Original 2.4 port by Scott Wood <scott.wood@timesys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/delay.h>
+
+#include <asm/serial.h>
+#include <asm/bootinfo.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/mv64x60.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/todc.h>
+#include <linux/tty.h> /* for linux/serial_core.h */
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include "dmv182.h"
+
+extern int mv64360_get_irq(struct pt_regs *regs);
+extern void mv64360_init_irq(void);
+
+extern void gen550_progress(char *s, unsigned short hex);
+extern void gen550_init(int, struct uart_port *);
+
+static void __init dmv182_setup_peripherals(void);
+static void __init dmv182_setup_bridge(void);
+
+static struct mv64x60_handle bh;
+
+TODC_ALLOC();
+
+static void __init dmv182_map_io(void)
+{
+ io_block_mapping((unsigned long)dmv182_board_io_virt,
+ dmv182_board_io_phys, 0x10000000, _PAGE_IO);
+}
+
+// This sets up BAT3 to cover the serial port and Discovery chip.
+
+static void __init dmv182_setup_bats(void)
+{
+ int tmp1, tmp2;
+
+ asm volatile("lis %0, 0xe000;"
+ "ori %0, %0, 0x002a;"
+ "lis %1, 0xf000;"
+ "ori %1, %1, 0x1ffe;"
+ "mtspr %2, %0;"
+ "mtspr %3, %1"
+ : "=r" (tmp1), "=r" (tmp2)
+ : "i" (DBAT3L), "i" (DBAT3U)
+ : "memory");
+}
+
+static u8 *const irqstat = dmv182_fpga_io + 0x80;
+static u8 *const irqmask = dmv182_fpga_io + 0x81;
+
+// These two functions transform an IRQ number into
+// byte and bit indices into the above arrays.
+
+static inline int irqreg(unsigned int irq)
+{
+ return ((irq - 96) >> 3) * 3;
+}
+
+static inline int irqbit(unsigned int irq)
+{
+ return (irq - 96) & 7;
+}
+
+// FIXME: CPU1 and affinity support
+// The Marvell code doesn't appear to support anything
+// other than doorbells on CPU1 at the moment.
+
+static void dmv182_mask_irq(unsigned int irq)
+{
+ irqmask[irqreg(irq)] &= ~(1 << irqbit(irq));
+}
+
+static void dmv182_unmask_irq(unsigned int irq)
+{
+ irqmask[irqreg(irq)] |= 1 << irqbit(irq);
+}
+
+static unsigned int dmv182_startup_irq(unsigned int irq)
+{
+ dmv182_unmask_irq(irq);
+ return 0;
+}
+
+struct hw_interrupt_type dmv182_pic = {
+ .typename = " DMV182_PIC ",
+ .startup = dmv182_startup_irq,
+ .shutdown = dmv182_mask_irq,
+ .enable = dmv182_unmask_irq,
+ .disable = dmv182_mask_irq,
+ .ack = dmv182_mask_irq,
+ .end = dmv182_unmask_irq,
+ .set_affinity = NULL
+};
+
+atomic_t spurious_interrupts;
+
+static irqreturn_t dmv182_cascade(int irq, void *dev_id, struct pt_regs *regs)
+{
+ int i, j;
+ int cpu = smp_processor_id();
+ int irqs;
+
+ for (i = 0, j = 96; i < 24; i += 3, j += 8) {
+ irqs = irqstat[i] & irqmask[i + cpu];
+
+ if (irqs)
+ break;
+ }
+
+ if (i < 24) {
+ ppc_irq_dispatch_handler(regs, j + ffs(irqs) - 1);
+ return IRQ_HANDLED;
+ }
+
+ atomic_inc(&spurious_interrupts);
+ return IRQ_NONE;
+}
+
+#ifdef CONFIG_SMP
+static irqreturn_t dmv182_doorbell(int irq, void *dev_id, struct pt_regs *regs);
+#endif
+
+static void __init dmv182_init_irq(void)
+{
+ int i;
+
+ if (ppc_md.progress)
+ ppc_md.progress("dmv182_init_irq", 0x1821);
+
+ for (i = 96; i < 160; i++) {
+ dmv182_mask_irq(i);
+ irqmask[irqreg(i) + 1] &= ~(1 << irqbit(i));
+ irq_desc[i].handler = &dmv182_pic;
+ irq_desc[i].status = IRQ_LEVEL | IRQ_DISABLED;
+ }
+
+ mv64360_init_irq();
+
+ if (request_irq(94, dmv182_cascade, SA_INTERRUPT,
+ "DMV182 CPU0 cascade", NULL) < 0)
+ {
+ panic("Could not request CPU0 cascade IRQ\n");
+ }
+
+#ifdef CONFIG_SMP
+#if 0
+ if (request_irq(95, dmv182_cascade, SA_INTERRUPT,
+ "DMV182 CPU1 cascade", NULL) < 0)
+ {
+ panic("Could not request CPU1 cascade IRQ\n");
+ }
+#endif
+
+ if (request_irq(60, dmv182_doorbell, SA_INTERRUPT,
+ "CPU0 doorbell", NULL) < 0)
+ {
+ panic("Could not request CPU1 doorbell IRQ\n");
+ }
+
+ if (request_irq(28, dmv182_doorbell, SA_INTERRUPT,
+ "CPU1 doorbell", NULL) < 0)
+ {
+ panic("Could not request CPU1 doorbell IRQ\n");
+ }
+
+ // Clear and unmask all doorbell interrupts.
+
+ mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff);
+ mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0xff);
+ mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff);
+ mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff);
+#endif
+}
+
+// It's really device numbers, not idsels, but we have
+// to call it that so the PCI_IRQ_TABLE_LOOKUP will work.
+
+static int __init dmv182_map_irq(struct pci_dev *dev,
+ unsigned char idsel,
+ unsigned char pin)
+{
+ struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
+
+#if 0
+ printk("map irq: hose %d, bus %d, slot %d, first %d\n", hose->index,
+ dev->bus->number, idsel, hose->first_busno);
+#endif
+
+ if (hose->index != 0 && hose->index != 1) {
+ printk(KERN_ERR "map_irq: unknown hose %d\n", hose->index);
+ return 0;
+ }
+
+ // Some of this is guesswork...
+ // In particular, I don't know if the ABCD mappings are right,
+ // and I don't know which IPM goes with which slot (the manual
+ // merely says "IPM" for both).
+
+ if (hose->index == 0) {
+ static u8 pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { DMV182_IRQ_PMC1A, DMV182_IRQ_PMC1B,
+ DMV182_IRQ_PMC1C, DMV182_IRQ_PMC1D }, // PMC Slot 1 A
+ { DMV182_IRQ_PMC1A, DMV182_IRQ_PMC1B,
+ DMV182_IRQ_PMC1C, DMV182_IRQ_PMC1D }, // PMC Slot 1 B
+ };
+
+ const int min_idsel = 4, max_idsel = 5, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ } else if (dev->bus->parent && dev->bus->primary == hose->first_busno &&
+ dev->bus->self->devfn == 0x10) {
+ static u8 pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { DMV182_IRQ_IPM0, DMV182_IRQ_IPM0,
+ DMV182_IRQ_IPM0, DMV182_IRQ_IPM0 }, // IPM... 0?
+ { DMV182_IRQ_IPM1, DMV182_IRQ_IPM1,
+ DMV182_IRQ_IPM1, DMV182_IRQ_IPM1 }, // IPM... 1?
+ { DMV182_IRQ_USB_A, DMV182_IRQ_USB_B,
+ DMV182_IRQ_USB_C, DMV182_IRQ_USB_SMI }, // USB
+ { DMV182_IRQ_VME_CPU0, DMV182_IRQ_VME_CPU1, 0, 0 }, // VME
+ };
+
+ const int min_idsel = 1, max_idsel = 4, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ } else {
+ static u8 pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { DMV182_IRQ_PMC2A, DMV182_IRQ_PMC2B,
+ DMV182_IRQ_PMC2C, DMV182_IRQ_PMC2D }, // PMC Slot 2 A
+ { DMV182_IRQ_PMC2A, DMV182_IRQ_PMC2B,
+ DMV182_IRQ_PMC2C, DMV182_IRQ_PMC2D }, // PMC Slot 2 B
+ };
+
+ const int min_idsel = 4, max_idsel = 5, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ }
+}
+
+static unsigned char dmv182_pci_swizzle(struct pci_dev *dev,
+ unsigned char *pinp)
+{
+ struct pci_controller *hose = dev->sysdata;
+
+ // The devices under this particular bridge have their IRQs
+ // directly routed to the PIC, rather than through the parent
+ // bus. Thus, don't swizzle them. The bus is determined by
+ // the devfn of the parent, rather than its own bus number,
+ // in case a PMC card is added that has its own bridge(s),
+ // causing the numbering to change.
+
+ if (hose->index == 1 && dev->bus->parent &&
+ dev->bus->primary == hose->first_busno &&
+ dev->bus->self->devfn == 0x10)
+ return PCI_SLOT(dev->devfn);
+
+ return common_swizzle(dev, pinp);
+}
+
+static unsigned long __init
+dmv182_pci_bridge_reserve_space(struct pci_controller *hose,
+ unsigned char bus, unsigned char devfn)
+{
+ // Reserve 768 MiB for the bus containing VME. This
+ // will allow one to map the entire RAM of a 512 MiB
+ // card over VME, while still allowing space for other
+ // stuff on the bridge.
+ if (hose->first_busno == bus && devfn == 0x10)
+ return 0x30000000;
+
+ return 0;
+}
+
+static void __init dmv182_setup_caches(void)
+{
+#if 0 // This actually causes the TimeSys 2.4 port to blow up too, for me
+
+ // Why can't L2CR be set by generic 745x code?
+ // And what's with the underscore?
+ _set_L2CR(0xc0000000);
+
+ _set_L3CR(0x9e8a0180);
+#endif
+}
+
+#ifdef CONFIG_SERIAL_8250
+static void __init dmv182_early_serial_map(void)
+{
+ struct uart_port uart_req;
+ void *membase = ioremap(0xe0010000, PAGE_SIZE);
+
+ /* Setup serial port access */
+ memset(&uart_req, 0, sizeof (uart_req));
+ uart_req.irq = DMV182_IRQ_SERIAL_CH1;
+ uart_req.flags = 0;
+ uart_req.type = PORT_16550;
+ uart_req.uartclk = BASE_BAUD * 16;
+ uart_req.iotype = SERIAL_IO_MEM;
+ uart_req.mapbase = (unsigned long)dmv182_fpga_io + 0x18;
+ uart_req.membase = membase + 0x18;
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+ gen550_init(0, &uart_req);
+#endif
+
+ if (early_serial_setup(&uart_req) != 0)
+ printk("Early serial init of port 0 failed\n");
+
+ /* Assume early_serial_setup() doesn't modify uart_req */
+ uart_req.line = 1;
+ uart_req.mapbase = (unsigned long)dmv182_fpga_io + 0x20;
+ uart_req.membase = membase + 0x20;
+ uart_req.irq = DMV182_IRQ_SERIAL_CH2;
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+ gen550_init(1, &uart_req);
+#endif
+
+ if (early_serial_setup(&uart_req) != 0)
+ printk("Early serial init of port 1 failed\n");
+}
+#endif
+
+static void __init dmv182_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("dmv182_setup_arch", 0x1820);
+
+ ppc_md.pci_swizzle = dmv182_pci_swizzle;
+
+ dmv182_setup_caches();
+
+ // Enable snooping.
+// MV_SET_REG_BITS(MV64360_CPU_MASTER_CONTROL, (1 << 12) | (1 << 13));
+
+ // Set up the RTC.
+ dmv182_setup_bridge();
+ dmv182_setup_peripherals();
+
+#ifdef CONFIG_SERIAL_8250
+ dmv182_early_serial_map();
+#endif
+ if (ppc_md.progress)
+ ppc_md.progress("dmv182_setup_arch end", 0x182f);
+}
+
+static void __init dmv182_calibrate_decr(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("dmv182_calibrate_decr", 0x1822);
+
+ tb_ticks_per_jiffy = 25000000 / HZ;
+ tb_to_us = mulhwu_scale_factor(25000000, 1000000);
+}
+
+static void dmv182_halt(void)
+{
+ local_irq_disable();
+ for(;;);
+}
+
+static void dmv182_restart(char *cmd)
+{
+ unsigned long reg;
+ volatile unsigned long *ptr = NULL;
+ struct pci_dev *dev;
+
+ local_irq_disable();
+
+ /*
+ * The best way to reset the board is through the Universe VME.
+ * Since the VME driver may or may not be loaded, we can't rely
+ * on that, so the best way I can think of in resetting the board
+ * is to search all the PCI devices looking for the Universe chip
+ * and write to its command register to reset the board.
+ */
+ dev = pci_find_device(PCI_VENDOR_ID_TUNDRA, 0, NULL);
+ if (dev) {
+ printk("Found VME device %s\n",dev->slot_name);
+
+ for (reg = 0; reg < 6; reg++) {
+ struct resource *res = dev->resource + reg;
+ if ((res->flags & PCI_BASE_ADDRESS_SPACE) ==
+ PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ ptr = ioremap(res->start + 0x404, sizeof(ptr)); /* CTRL_REG */
+ break;
+ }
+ }
+ }
+
+ if (!ptr) {
+ printk("No VME device found to reset board\n");
+ return;
+ }
+
+ printk("**** resetting board through VME ****\n");
+ mdelay(10);
+
+ reg = *ptr;
+ reg |= 0x8000; /* reset only the board and not the entire chassis. */
+ *ptr = reg;
+
+ for(;;);
+}
+
+void board_get_mac(int port, u8 *addr)
+{
+ if (port < 1 || port > 2) {
+ printk(KERN_ERR "Unknown port %d in board_get_mac()...\n", port);
+ return;
+ }
+
+ memcpy(addr, (u8 *)dmv182_nvram + 8 + (2 - port) * 6, 6);
+ printk(KERN_NOTICE "Ethernet port %d MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ port, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+}
+
+#ifdef CONFIG_SMP
+
+static int dmv182_smp_probe(void)
+{
+ return 2;
+}
+
+void __secondary_start(void);
+
+static void dmv182_kick_cpu(int cpu)
+{
+ BUG_ON(cpu != 1);
+
+ *(u32 *)(PAGE_OFFSET + 4) = (u32)__secondary_start - PAGE_OFFSET;
+ wmb();
+ *(u32 *)(PAGE_OFFSET + 0) = 0x38a3fd19;
+ wmb();
+
+ /* Set MaskBR1 to allow CPU1 to get access to the bus. */
+ mv64x60_modify(&bh, MV64x60_CPU_MASTER_CNTL, 0, 1<<9);
+}
+
+static void dmv182_setup_cpu(int cpu)
+{
+ int whoami = mv64x60_read(&bh, MV64360_WHO_AM_I);
+
+ if (cpu != whoami) {
+ printk("CPU %d whoami %d\n", cpu, whoami);
+ BUG();
+ }
+
+ // Enable broadcasting of synchronization and cache/tlb
+ // flushing/invalidation instructions
+
+ mtspr(SPRN_HID1, mfspr(SPRN_HID1) | HID1_ABE | HID1_SYNCBE);
+ asm volatile("sync; isync" : : : "memory");
+
+ if (cpu == 1)
+ dmv182_setup_caches();
+}
+
+static void dmv182_message_pass(int target, int msg, ulong data, int wait)
+{
+ int i;
+ int reg;
+
+ if (unlikely(msg < 0 || msg > 7)) {
+ printk(KERN_ERR "dmv182_message_pass: bad message %x\n", msg);
+ return;
+ }
+
+ for_each_online_cpu(i) {
+ reg = MV64360_CPUx_DOORBELL(i);
+
+ if (target == MSG_ALL ||
+ (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) ||
+ target == i)
+ mv64x60_modify(&bh, reg, 1 << msg, 1 << msg);
+ }
+}
+
+static irqreturn_t dmv182_doorbell(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u32 bits = mv64x60_read(&bh, MV64360_CPUx_DOORBELL(smp_processor_id()));
+
+ bits &= 0xff;
+
+ mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR +
+ smp_processor_id() * 0x10, bits);
+
+ while (bits) {
+ int msg = __ilog2(bits);;
+ smp_message_recv(msg, regs);
+ bits &= ~(1 << msg);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct smp_ops_t dmv182_smp_ops = {
+ .probe = dmv182_smp_probe,
+ .kick_cpu = dmv182_kick_cpu,
+ .setup_cpu = dmv182_setup_cpu,
+ .message_pass = dmv182_message_pass,
+ .give_timebase = smp_generic_give_timebase,
+ .take_timebase = smp_generic_take_timebase,
+};
+
+#endif
+
+static void __init dmv182_setup_bridge(void)
+{
+ mv64x60_setup_info_t si;
+
+ memset(&si, 0, sizeof(si));
+
+ si.phys_reg_base = CONFIG_MV64X60_NEW_BASE;
+ si.map_irq = dmv182_map_irq;
+
+ si.pci_0.enable_bus = 1;
+ si.pci_0.enumerate_bus = 1;
+ si.pci_0.pci_io.cpu_base = 0xa0000000;
+ si.pci_0.pci_io.pci_base_hi = 0;
+ si.pci_0.pci_io.pci_base_lo = 0;
+ si.pci_0.pci_io.size = 0x01000000;
+ si.pci_0.pci_io.swap = 0x01000000; /* XXXX No swapping */
+ si.pci_0.pci_mem[0].cpu_base = 0x80000000;
+ si.pci_0.pci_mem[0].pci_base_hi = 0;
+ si.pci_0.pci_mem[0].pci_base_lo = 0x80000000;
+ si.pci_0.pci_mem[0].size = 0x10000000;
+ si.pci_0.pci_mem[0].swap = 0x01000000; /* XXXX No swapping */
+ si.pci_0.pci_mem[1].cpu_base = 0;
+ si.pci_0.pci_mem[1].pci_base_hi = 0;
+ si.pci_0.pci_mem[1].pci_base_lo = 0;
+ si.pci_0.pci_mem[1].size = 0; /* Don't use this window */
+ si.pci_0.pci_mem[1].swap = 0;
+ si.pci_0.pci_mem[2].cpu_base = 0;
+ si.pci_0.pci_mem[2].pci_base_hi = 0;
+ si.pci_0.pci_mem[2].pci_base_lo = 0;
+ si.pci_0.pci_mem[2].size = 0; /* Don't use this window */
+ si.pci_0.pci_mem[1].swap = 0;
+ si.pci_0.pci_cmd_bits = 0;
+ si.pci_0.latency_timer = 0x8;
+
+ si.pci_1.enable_bus = 1;
+ si.pci_1.enumerate_bus = 1;
+ si.pci_1.pci_io.cpu_base = 0xa1000000;
+ si.pci_1.pci_io.pci_base_hi = 0;
+ si.pci_1.pci_io.pci_base_lo = 0x01000000;
+ si.pci_1.pci_io.size = 0x01000000;
+ si.pci_1.pci_io.swap = 0x01000000; /* XXXX No swapping */
+ si.pci_1.pci_mem[0].cpu_base = 0x90000000;
+ si.pci_1.pci_mem[0].pci_base_hi = 0;
+ si.pci_1.pci_mem[0].pci_base_lo = 0x90000000;
+ si.pci_1.pci_mem[0].size = 0x10000000;
+ si.pci_1.pci_mem[0].swap = 0x01000000; /* XXXX No swapping */
+ si.pci_1.pci_mem[1].cpu_base = 0;
+ si.pci_1.pci_mem[1].pci_base_hi = 0;
+ si.pci_1.pci_mem[1].pci_base_lo = 0;
+ si.pci_1.pci_mem[1].size = 0; /* Don't use this window */
+ si.pci_1.pci_mem[1].swap = 0;
+ si.pci_1.pci_mem[2].cpu_base = 0;
+ si.pci_1.pci_mem[2].pci_base_hi = 0;
+ si.pci_1.pci_mem[2].pci_base_lo = 0;
+ si.pci_1.pci_mem[2].size = 0; /* Don't use this window */
+ si.pci_1.pci_mem[1].swap = 0;
+ si.pci_1.pci_cmd_bits = 0;
+ si.pci_1.latency_timer = 0x8;
+ si.pci_1.pci_cmd_bits = 0;
+ si.pci_1.latency_timer = 0x8;
+
+ si.window_preserve_mask_32 = 0x1f0;
+#if 0
+ for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) {
+ si.cpu_prot_options[i] = 0;
+// si.cpu_snoop_options[i] = GT64260_CPU_SNOOP_WB;
+ si.pci_0.acc_cntl_options[i] =
+ /* Breaks PCI (especially slot 4)
+ GT64260_PCI_ACC_CNTL_PREFETCHEN |
+ */
+ GT64260_PCI_ACC_CNTL_DREADEN |
+ GT64260_PCI_ACC_CNTL_RDPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDLINEPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDMULPREFETCH |
+ GT64260_PCI_ACC_CNTL_SWAP_NONE |
+ GT64260_PCI_ACC_CNTL_MBURST_32_BTYES;
+ si.pci_0.snoop_options[i] = GT64260_PCI_SNOOP_WB;
+ si.pci_1.acc_cntl_options[i] =
+ /* Breaks PCI (especially slot 4)
+ GT64260_PCI_ACC_CNTL_PREFETCHEN |
+ */
+ GT64260_PCI_ACC_CNTL_DREADEN |
+ GT64260_PCI_ACC_CNTL_RDPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDLINEPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDMULPREFETCH |
+ GT64260_PCI_ACC_CNTL_SWAP_NONE |
+ GT64260_PCI_ACC_CNTL_MBURST_32_BTYES;
+// si.pci_1.snoop_options[i] = GT64260_PCI_SNOOP_WB;
+ }
+#endif
+
+ mv64x60_pci_exclude_bridge = 0;
+
+ /* Lookup PCI host bridges */
+ if (mv64x60_init(&bh, &si)) {
+ printk("Bridge initialization failed.\n");
+ }
+
+ return;
+}
+
+static void __init dmv182_setup_peripherals(void)
+{
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
+ 0xf0000000, 0x08000000, 0); // FLASH
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
+ 0xe0010000, 0x10000, 0); // I/O FPGA
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
+ 0xe0000000, 0x10000, 0); // EPLD
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_2_WIN,
+ 0xe0020000, 0x10000, 0); // RTC
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_3_WIN,
+ 0xe0030000, 0x10000, 0); // NVRAM
+
+ TODC_INIT(TODC_TYPE_DS1501, 0, 0, dmv182_rtc, 8);
+}
+
+unsigned long __init dmv182_find_end_of_memory(void)
+{
+#if 0
+ return mv64x60_get_mem_size(0xfff00000 /*CONFIG_MV64X60_NEW_BASE*/,
+ MV64x60_TYPE_MV64360);
+#endif
+ /* But it dies if we enable more than 512MiB. Debug later... */
+ return 0x20000000;
+}
+
+void __init platform_init(unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6,
+ unsigned long r7)
+{
+ parse_bootinfo(find_bootinfo());
+
+ dmv182_setup_bats();
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG)
+ ppc_md.progress = gen550_progress;
+#endif
+ ppc_md.setup_io_mappings = dmv182_map_io;
+ ppc_md.find_end_of_memory = dmv182_find_end_of_memory;
+ ppc_md.setup_arch = dmv182_setup_arch;
+ ppc_md.init_IRQ = dmv182_init_irq;
+ ppc_md.get_irq = mv64360_get_irq;
+ ppc_md.calibrate_decr = dmv182_calibrate_decr;
+// ppc_md.pci_bridge_reserve_space = dmv182_pci_bridge_reserve_space;
+
+ ppc_md.halt = dmv182_halt;
+ ppc_md.power_off = dmv182_halt;
+ ppc_md.restart = dmv182_restart;
+#ifdef CONFIG_SMP
+ ppc_md.smp_ops = &dmv182_smp_ops;
+#endif
+#ifdef CONFIG_GEN_RTC
+ ppc_md.time_init = todc_time_init;
+ ppc_md.set_rtc_time = todc_set_rtc_time;
+ ppc_md.get_rtc_time = todc_get_rtc_time;
+
+ ppc_md.nvram_read_val = todc_direct_read_val;
+ ppc_md.nvram_write_val = todc_direct_write_val;
+#endif
+}
--- /dev/null
+#ifndef __DMV182_H
+#define __DMV182_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+#define dmv182_board_io_phys 0xe0000000
+#define dmv182_board_io_size 0x00040000
+
+#ifdef __BOOTER__
+#define dmv182_board_io_virt ((u8 *)dmv182_board_io_phys)
+#else
+#define dmv182_board_io_virt ((u8 *)0xf0000000)
+#endif
+
+#define dmv182_fpga_io (dmv182_board_io_virt + 0x10000)
+#define dmv182_rtc (dmv182_board_io_virt + 0x20000)
+#define dmv182_nvram (dmv182_board_io_virt + 0x30000)
+
+// This has to go above the mv64360 interrupts, as even though
+// the mv64360 code can handle relocating its interrupt range,
+// the device drivers themselves are oblivious to this.
+
+#define DMV182_IRQ_TEMPA 96
+#define DMV182_IRQ_TEMPB 97
+#define DMV182_IRQ_TEMPC 98
+#define DMV182_IRQ_TEMPD 99
+#define DMV182_IRQ_PMC1A 100
+#define DMV182_IRQ_PMC1B 101
+#define DMV182_IRQ_PMC1C 102
+#define DMV182_IRQ_PMC1D 103
+#define DMV182_IRQ_PMC2A 104
+#define DMV182_IRQ_PMC2B 105
+#define DMV182_IRQ_PMC2C 106
+#define DMV182_IRQ_PMC2D 107
+#define DMV182_IRQ_ENET_PHY2 108
+#define DMV182_IRQ_ENET_PHY1 109
+#define DMV182_IRQ_IPM0 110
+#define DMV182_IRQ_IPM1 111
+#define DMV182_IRQ_USB_A 112
+#define DMV182_IRQ_USB_B 113
+#define DMV182_IRQ_USB_C 114
+#define DMV182_IRQ_USB_SMI 115
+#define DMV182_IRQ_RTC 116
+#define DMV182_IRQ_WDOG_CPU0 117
+#define DMV182_IRQ_WDOG_CPU1 118
+#define DMV182_IRQ_TIMER0_CPU0 120
+#define DMV182_IRQ_TIMER1_CPU0 121
+#define DMV182_IRQ_TIMER2_CPU0 122
+#define DMV182_IRQ_TIMER0_CPU1 123
+#define DMV182_IRQ_TIMER1_CPU1 124
+#define DMV182_IRQ_TIMER2_CPU1 125
+#define DMV182_IRQ_SERIAL_CH1 126
+#define DMV182_IRQ_SERIAL_CH2 127
+#define DMV182_IRQ_VME_CPU0 128
+#define DMV182_IRQ_VME_CPU1 129
+
+// 28 FPGA interrupts starting from here
+#define DMV182_IRQ_FPGA 132
+
+#endif
--- /dev/null
+#ifndef __DMV182_SERIAL_H
+#define __DMV182_SERIAL_H
+
+#include <linux/serial.h>
+#include <platforms/dmv182.h>
+
+#define BASE_BAUD (36864000 / 16)
+#define RS_TABLE_SIZE 2
+
+#define STD_UART_OP(num) \
+ { .baud_base = BASE_BAUD, \
+ .irq = DMV182_IRQ_SERIAL_CH##num, \
+ .flags = ASYNC_SKIP_TEST | ASYNC_BUGGY_UART, \
+ .iomem_base = dmv182_fpga_io + 0x18 + 8 * (num - 1), \
+ .io_type = SERIAL_IO_MEM },
+
+#define SERIAL_PORT_DFNS STD_UART_OP(1) STD_UART_OP(2)
+
+#endif
--- /dev/null
+/*
+ * arch/ppc/kernel/error_log.c
+ *
+ * Copyright (c) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * Error processing of errors found by rtas even-scan routine
+ * which is done with every heartbeat. (chrp_setup.c)
+ */
+
+#include <linux/sched.h>
+
+#include <asm/prom.h>
+
+#include "error_log.h"
+
+/* ****************************************************************** */
+/*
+ * EVENT-SCAN
+ * The whole stuff below here doesn't take any action when it found
+ * an error, it just prints as much information as possible and
+ * then its up to the user to decide what to do.
+ *
+ * Returns 0 if no errors were found
+ * Returns 1 if there may be more errors
+ */
+int ppc_rtas_errorlog_scan(void)
+{
+const char *_errlog_severity[] = {
+#ifdef VERBOSE_ERRORS
+ "No Error\n\t\
+Should require no further information",
+ "Event\n\t\
+This is not really an error, it is an event. I use events\n\t\
+to communicate with RTAS back and forth.",
+ "Warning\n\t\
+Indicates a non-state-losing error, either fully recovered\n\t\
+by RTAS or not needing recovery. Ignore it.",
+ "Error sync\n\t\
+May only be fatal to a certain program or thread. Recovery\n\t\
+and continuation is possible, if I only had a handler for\n\t\
+this. Less serious",
+ "Error\n\t\
+Less serious, but still causing a loss of data and state.\n\t\
+I can't tell you exactly what to do, You have to decide\n\t\
+with help from the target and initiator field, what kind\n\t\
+of further actions may take place.",
+ "Fatal\n\t\
+Represent a permanent hardware failure and I believe this\n\t\
+affects my overall performance and behaviour. I would not\n\t\
+attempt to continue normal operation."
+#else
+ "No Error",
+ "Event",
+ "Warning",
+ "Error sync",
+ "Error",
+ "Fatal"
+#endif /* VERBOSE_ERRORS */
+};
+
+#if 0 /* unused?? */
+const char *_errlog_disposition[] = {
+#ifdef VERBOSE_ERRORS
+ "Fully recovered\n\t\
+There was an error, but it is fully recovered by RTAS.",
+ "Limited recovery\n\t\
+RTAS was able to recover the state of the machine, but some\n\t\
+feature of the machine has been disabled or lost (for example\n\t\
+error checking) or performance may suffer.",
+ "Not recovered\n\t\
+Whether RTAS did not try to recover anything or recovery failed:\n\t\
+HOUSTON, WE HAVE A PROBLEM!"
+#else
+ "Fully recovered",
+ "Limited recovery",
+ "Not recovered"
+#endif /* VERBOSE_ERRORS */
+};
+#endif
+
+const char *_errlog_extended[] = {
+#ifdef VERBOSE_ERRORS
+ "Not present\n\t\
+Sad, the RTAS call didn't return an extended error log.",
+ "Present\n\t\
+The extended log is present and hopefully it contains a lot of\n\t\
+useful information, which leads to the solution of the problem."
+#else
+ "Not present",
+ "Present"
+#endif /* VERBOSE_ERRORS */
+};
+
+const char *_errlog_initiator[] = {
+ "Unknown or not applicable",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power management"
+};
+
+const char *_errlog_target[] = {
+ "Unknown or not applicable",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power management"
+};
+ rtas_error_log error_log;
+ char logdata[1024];
+ int error;
+#if 0 /* unused?? */
+ int retries = 0; /* if HW error, try 10 times */
+#endif
+
+ error = call_rtas ("event-scan", 4, 1, (unsigned long *)&error_log,
+ INTERNAL_ERROR | EPOW_WARNING,
+ 0, __pa(logdata), 1024);
+
+ if (error == 1) /* no errors found */
+ return 0;
+
+ if (error == -1) {
+ printk(KERN_ERR "Unable to get errors. Do you a favor and throw this box away\n");
+ return 0;
+ }
+ if (error_log.version != 1)
+ printk(KERN_WARNING "Unknown version (%d), please implement me\n",
+ error_log.version);
+
+ switch (error_log.disposition) {
+ case DISP_FULLY_RECOVERED:
+ /* there was an error, but everything is fine now */
+ return 0;
+ case DISP_NOT_RECOVERED:
+ printk("We have a really serious Problem!\n");
+ case DISP_LIMITED_RECOVERY:
+ printk("Error classification\n");
+ printk("Severity : %s\n",
+ ppc_rtas_errorlog_check_severity (error_log));
+ printk("Initiator : %s\n",
+ ppc_rtas_errorlog_check_initiator (error_log));
+ printk("Target : %s\n",
+ ppc_rtas_errorlog_check_target (error_log));
+ printk("Type : %s\n",
+ ppc_rtas_errorlog_check_type (error_log));
+ printk("Ext. log : %s\n",
+ ppc_rtas_errorlog_check_extended (error_log));
+ if (error_log.extended)
+ ppc_rtas_errorlog_disect_extended (logdata);
+ return 1;
+ default:
+ /* nothing */
+ break;
+ }
+ return 0;
+}
+/* ****************************************************************** */
+const char * ppc_rtas_errorlog_check_type (rtas_error_log error_log)
+{
+ const char *_errlog_type[] = {
+ "unknown type",
+ "too many tries failed",
+ "TCE error",
+ "RTAS device failed",
+ "target timed out",
+ "parity error on data", /* 5 */
+ "parity error on address",
+ "parity error on external cache",
+ "access to invalid address",
+ "uncorrectable ECC error",
+ "corrected ECC error" /* 10 */
+ };
+ if (error_log.type == TYPE_EPOW)
+ return "EPOW";
+ if (error_log.type >= TYPE_PMGM_POWER_SW_ON)
+ return "PowerMGM Event (not handled right now)";
+ return _errlog_type[error_log.type];
+}
+
--- /dev/null
+#ifndef __ERROR_LOG_H__
+#define __ERROR_LOG_H__
+
+#define VERBOSE_ERRORS 1 /* Maybe I enlarge the kernel too much */
+#undef VERBOSE_ERRORS
+
+/* Event classes */
+/* XXX: Endianess correct? NOW*/
+#define INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define EPOW_WARNING 0x40000000 /* set bit 1 */
+#define POWERMGM_EVENTS 0x20000000 /* set bit 2 */
+
+/* event-scan returns */
+#define SEVERITY_FATAL 0x5
+#define SEVERITY_ERROR 0x4
+#define SEVERITY_ERROR_SYNC 0x3
+#define SEVERITY_WARNING 0x2
+#define SEVERITY_EVENT 0x1
+#define SEVERITY_NO_ERROR 0x0
+#define DISP_FULLY_RECOVERED 0x0
+#define DISP_LIMITED_RECOVERY 0x1
+#define DISP_NOT_RECOVERED 0x2
+#define PART_PRESENT 0x0
+#define PART_NOT_PRESENT 0x1
+#define INITIATOR_UNKNOWN 0x0
+#define INITIATOR_CPU 0x1
+#define INITIATOR_PCI 0x2
+#define INITIATOR_ISA 0x3
+#define INITIATOR_MEMORY 0x4
+#define INITIATOR_POWERMGM 0x5
+#define TARGET_UNKNOWN 0x0
+#define TARGET_CPU 0x1
+#define TARGET_PCI 0x2
+#define TARGET_ISA 0x3
+#define TARGET_MEMORY 0x4
+#define TARGET_POWERMGM 0x5
+#define TYPE_RETRY 0x01
+#define TYPE_TCE_ERR 0x02
+#define TYPE_INTERN_DEV_FAIL 0x03
+#define TYPE_TIMEOUT 0x04
+#define TYPE_DATA_PARITY 0x05
+#define TYPE_ADDR_PARITY 0x06
+#define TYPE_CACHE_PARITY 0x07
+#define TYPE_ADDR_INVALID 0x08
+#define TYPE_ECC_UNCORR 0x09
+#define TYPE_ECC_CORR 0x0a
+#define TYPE_EPOW 0x40
+/* I don't add PowerMGM events right now, this is a different topic */
+#define TYPE_PMGM_POWER_SW_ON 0x60
+#define TYPE_PMGM_POWER_SW_OFF 0x61
+#define TYPE_PMGM_LID_OPEN 0x62
+#define TYPE_PMGM_LID_CLOSE 0x63
+#define TYPE_PMGM_SLEEP_BTN 0x64
+#define TYPE_PMGM_WAKE_BTN 0x65
+#define TYPE_PMGM_BATTERY_WARN 0x66
+#define TYPE_PMGM_BATTERY_CRIT 0x67
+#define TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define TYPE_PMGM_SWITCH_TO_AC 0x69
+#define TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define TYPE_PMGM_RING_INDICATE 0x6d
+#define TYPE_PMGM_LAN_ATTENTION 0x6e
+#define TYPE_PMGM_TIME_ALARM 0x6f
+#define TYPE_PMGM_CONFIG_CHANGE 0x70
+#define TYPE_PMGM_SERVICE_PROC 0x71
+
+typedef struct _rtas_error_log {
+ unsigned long version:8; /* Architectural version */
+ unsigned long severity:3; /* Severity level of error */
+ unsigned long disposition:2; /* Degree of recovery */
+ unsigned long extended:1; /* extended log present? */
+ unsigned long /* reserved */ :2; /* Reserved for future use */
+ unsigned long initiator:4; /* Initiator of event */
+ unsigned long target:4; /* Target of failed operation */
+ unsigned long type:8; /* General event or error*/
+ unsigned long extended_log_length:32; /* length in bytes */
+} rtas_error_log;
+
+/* ****************************************************************** */
+#define ppc_rtas_errorlog_check_severity(x) \
+ (_errlog_severity[x.severity])
+#define ppc_rtas_errorlog_check_target(x) \
+ (_errlog_target[x.target])
+#define ppc_rtas_errorlog_check_initiator(x) \
+ (_errlog_initiator[x.initiator])
+#define ppc_rtas_errorlog_check_extended(x) \
+ (_errlog_extended[x.extended])
+#define ppc_rtas_errorlog_disect_extended(x) \
+ do { /* implement me */ } while(0)
+extern const char * ppc_rtas_errorlog_check_type (rtas_error_log error_log);
+extern int ppc_rtas_errorlog_scan(void);
+
+
+#endif /* __ERROR_LOG_H__ */
--- /dev/null
+/*
+ * arch/ppc/platforms/ev64260.c
+ *
+ * Board setup routines for the Marvell/Galileo EV-64260-BP Evaluation Board.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+/*
+ * The EV-64260-BP port is the result of hard work from many people from
+ * many companies. In particular, employees of Marvell/Galileo, Mission
+ * Critical Linux, Xyterra, and MontaVista Software were heavily involved.
+ *
+ * Note: I have not been able to get *all* PCI slots to work reliably
+ * at 66 MHz. I recommend setting jumpers J15 & J16 to short pins 1&2
+ * so that 33 MHz is used. --MAG
+ * Note: The 750CXe and 7450 are not stable with a 125MHz or 133MHz TCLK/SYSCLK.
+ * At 100MHz, they are solid.
+ */
+#include <linux/config.h>
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/irq.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/console.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#if !defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#endif
+#include <asm/bootinfo.h>
+#include <asm/machdep.h>
+#include <asm/mv64x60.h>
+#include <asm/ppcboot.h>
+#include <asm/todc.h>
+#include <asm/time.h>
+#include <asm/ocp.h>
+
+#include <platforms/ev64260.h>
+
+#define BOARD_VENDOR "Marvell/Galileo"
+#define BOARD_MACHINE "EV-64260-BP"
+
+/* Set IDE controllers into Native mode? */
+/* XXXX
+#define SET_PCI_IDE_NATIVE
+*/
+
+ulong ev64260_mem_size = 0;
+bd_t ppcboot_bd;
+int ppcboot_bd_valid=0;
+
+static mv64x60_handle_t bh;
+
+#if !defined(CONFIG_SERIAL_MPSC_CONSOLE)
+extern void gen550_progress(char *, unsigned short);
+extern void gen550_init(int, struct serial_struct *);
+#endif
+
+static const unsigned int cpu_7xx[16] = { /* 7xx & 74xx (but not 745x) */
+ 18, 15, 14, 2, 4, 13, 5, 9, 6, 11, 8, 10, 16, 12, 7, 0
+};
+static const unsigned int cpu_745x[2][16] = { /* PLL_EXT 0 & 1 */
+ { 1, 15, 14, 2, 4, 13, 5, 9, 6, 11, 8, 10, 16, 12, 7, 0 },
+ { 0, 30, 0, 2, 0, 26, 0, 18, 0, 22, 20, 24, 28, 32, 0, 0 }
+};
+
+
+TODC_ALLOC();
+
+static int
+ev64260_get_bus_speed(void)
+{
+ int speed;
+
+ if (ppcboot_bd_valid) {
+ speed = ppcboot_bd.bi_busfreq;
+ }
+ else {
+ speed = 100000000; /* Only 100MHz is stable */
+ }
+
+ return speed;
+}
+
+static int
+ev64260_get_cpu_speed(void)
+{
+ unsigned long pvr, hid1, pll_ext;
+
+ pvr = PVR_VER(mfspr(PVR));
+
+ if (pvr != PVR_VER(PVR_7450)) {
+ hid1 = mfspr(HID1) >> 28;
+ return ev64260_get_bus_speed() * cpu_7xx[hid1]/2;
+ }
+ else {
+ hid1 = (mfspr(HID1) & 0x0001e000) >> 13;
+ pll_ext = 0; /* No way to read; must get from schematic */
+ return ev64260_get_bus_speed() * cpu_745x[pll_ext][hid1]/2;
+ }
+}
+
+unsigned long __init
+ev64260_find_end_of_memory(void)
+{
+ if(!ppcboot_bd_valid) {
+ return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE,
+ MV64x60_TYPE_GT64260A);
+ }
+ return ppcboot_bd.bi_memsize;
+}
+
+#if 0 /* XXXX */
+#ifdef SET_PCI_IDE_NATIVE
+static void __init
+set_pci_native_mode(void)
+{
+ struct pci_dev *dev;
+
+ /* Better way of doing this ??? */
+ pci_for_each_dev(dev) {
+ int class = dev->class >> 8;
+
+ /* enable pci native mode */
+ if (class == PCI_CLASS_STORAGE_IDE) {
+ u8 reg;
+
+ pci_read_config_byte(dev, 0x9, ®);
+ if (reg == 0x8a) {
+ printk("PCI: Enabling PCI IDE native mode on %s\n", dev->slot_name);
+ pci_write_config_byte(dev, 0x9, 0x8f);
+
+ /* let the pci code set this device up after we change it */
+ pci_setup_device(dev);
+ } else if (reg != 0x8f) {
+ printk("PCI: IDE chip in unknown mode 0x%02x on %s", reg, dev->slot_name);
+ }
+ }
+ }
+}
+#endif
+#endif
+
+static void __init
+ev64260_pci_fixups(void)
+{
+#ifdef SET_PCI_IDE_NATIVE
+ set_pci_native_mode();
+#endif
+}
+
+
+/*
+ * Marvell/Galileo EV-64260-BP Evaluation Board PCI interrupt routing.
+ * Note: By playing with J8 and JP1-4, you can get 2 IRQ's from the first
+ * PCI bus (in which cast, INTPIN B would be EV64260_PCI_1_IRQ).
+ * This is the most IRQs you can get from one bus with this board, though.
+ */
+static int __init
+ev64260_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+ struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
+
+ if (hose->index == 0) {
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ {EV64260_PCI_0_IRQ,0,0,0}, /* IDSEL 7 - PCI bus 0 */
+ {EV64260_PCI_0_IRQ,0,0,0}, /* IDSEL 8 - PCI bus 0 */
+ };
+
+ const long min_idsel = 7, max_idsel = 8, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ }
+ else {
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { EV64260_PCI_1_IRQ,0,0,0}, /* IDSEL 7 - PCI bus 1 */
+ { EV64260_PCI_1_IRQ,0,0,0}, /* IDSEL 8 - PCI bus 1 */
+ };
+
+ const long min_idsel = 7, max_idsel = 8, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ }
+}
+
+static void __init
+ev64260_setup_peripherals(void)
+{
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
+ EV64260_EMB_FLASH_BASE, EV64260_EMB_FLASH_SIZE, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
+ EV64260_EXT_SRAM_BASE, EV64260_EXT_SRAM_SIZE, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
+ EV64260_TODC_BASE, EV64260_TODC_SIZE, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_2_WIN,
+ EV64260_UART_BASE, EV64260_UART_SIZE, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_3_WIN,
+ EV64260_EXT_FLASH_BASE, EV64260_EXT_FLASH_SIZE, 0);
+
+ TODC_INIT(TODC_TYPE_DS1501, 0, 0,
+ ioremap(EV64260_TODC_BASE, EV64260_TODC_SIZE), 8);
+
+ mv64x60_clr_bits(&bh, MV64x60_CPU_CONFIG, ((1<<28) | (1<<29)));
+ mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1<<27));
+
+ if (ev64260_get_bus_speed() > 100000000) {
+ mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1<<23));
+ }
+
+ mv64x60_set_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL,((1<<0) | (1<<3)));
+ mv64x60_set_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL,((1<<0) | (1<<3)));
+
+ /*
+ * Enabling of PCI internal-vs-external arbitration
+ * is a platform- and errata-dependent decision.
+ */
+ if (bh.type == MV64x60_TYPE_GT64260A ) {
+ mv64x60_set_bits(&bh, MV64x60_PCI0_ARBITER_CNTL, (1<<31));
+ mv64x60_set_bits(&bh, MV64x60_PCI1_ARBITER_CNTL, (1<<31));
+ }
+
+ mv64x60_set_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1<<9)); /* Only 1 cpu */
+
+ /*
+ * The EV-64260-BP uses several Multi-Purpose Pins (MPP) on the 64260
+ * bridge as interrupt inputs (via the General Purpose Ports (GPP)
+ * register). Need to route the MPP inputs to the GPP and set the
+ * polarity correctly.
+ *
+ * In MPP Control 2 Register
+ * MPP 21 -> GPP 21 (DUART channel A intr) bits 20-23 -> 0
+ * MPP 22 -> GPP 22 (DUART channel B intr) bits 24-27 -> 0
+ */
+ mv64x60_clr_bits(&bh, MV64x60_MPP_CNTL_2, (0xf<<20) | (0xf<<24) );
+
+ /*
+ * In MPP Control 3 Register
+ * MPP 26 -> GPP 26 (RTC INT) bits 8-11 -> 0
+ * MPP 27 -> GPP 27 (PCI 0 INTA) bits 12-15 -> 0
+ * MPP 29 -> GPP 29 (PCI 1 INTA) bits 20-23 -> 0
+ */
+ mv64x60_clr_bits(&bh, MV64x60_MPP_CNTL_3,
+ (0xf<<8) | (0xf<<12) | (0xf<<20));
+
+#define GPP_EXTERNAL_INTERRUPTS \
+ ((1<<21) | (1<<22) | (1<<26) | (1<<27) | (1<<29))
+ /* DUART & PCI interrupts are inputs */
+ mv64x60_clr_bits(&bh, MV64x60_GPP_IO_CNTL, GPP_EXTERNAL_INTERRUPTS);
+ /* DUART & PCI interrupts are active low */
+ mv64x60_set_bits(&bh, MV64x60_GPP_LEVEL_CNTL, GPP_EXTERNAL_INTERRUPTS);
+
+ /* Clear any pending interrupts for these inputs and enable them. */
+ mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~GPP_EXTERNAL_INTERRUPTS);
+ mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, GPP_EXTERNAL_INTERRUPTS);
+
+ /*
+ * Set MPSC Multiplex RMII
+ * NOTE: ethernet driver modifies bit 0 and 1
+ */
+ mv64x60_write(&bh, GT64260_MPP_SERIAL_PORTS_MULTIPLEX, 0x00001102);
+ return;
+}
+
+
+static void __init
+ev64260_setup_bridge(void)
+{
+ mv64x60_setup_info_t si;
+ int i;
+
+ memset(&si, 0, sizeof(si));
+
+ si.phys_reg_base = CONFIG_MV64X60_NEW_BASE;
+ si.map_irq = ev64260_map_irq;
+
+ si.pci_0.enable_bus = 1;
+ si.pci_0.enumerate_bus = 1;
+ si.pci_0.pci_io.cpu_base = 0xa0000000;
+ si.pci_0.pci_io.pci_base_hi = 0;
+ si.pci_0.pci_io.pci_base_lo = 0;
+ si.pci_0.pci_io.size = 0x01000000;
+ si.pci_0.pci_io.swap = 0x01000000; /* XXXX No swapping */
+ si.pci_0.pci_mem[0].cpu_base = 0x80000000;
+ si.pci_0.pci_mem[0].pci_base_hi = 0;
+ si.pci_0.pci_mem[0].pci_base_lo = 0x80000000;
+ si.pci_0.pci_mem[0].size = 0x10000000;
+ si.pci_0.pci_mem[0].swap = 0x01000000; /* XXXX No swapping */
+ si.pci_0.pci_mem[1].cpu_base = 0;
+ si.pci_0.pci_mem[1].pci_base_hi = 0;
+ si.pci_0.pci_mem[1].pci_base_lo = 0;
+ si.pci_0.pci_mem[1].size = 0; /* Don't use this window */
+ si.pci_0.pci_mem[1].swap = 0;
+ si.pci_0.pci_mem[2].cpu_base = 0;
+ si.pci_0.pci_mem[2].pci_base_hi = 0;
+ si.pci_0.pci_mem[2].pci_base_lo = 0;
+ si.pci_0.pci_mem[2].size = 0; /* Don't use this window */
+ si.pci_0.pci_mem[1].swap = 0;
+ si.pci_0.pci_cmd_bits = 0;
+ si.pci_0.latency_timer = 0x8;
+
+ si.pci_1.enable_bus = 1;
+ si.pci_1.enumerate_bus = 1;
+ si.pci_1.pci_io.cpu_base = 0xa1000000;
+ si.pci_1.pci_io.pci_base_hi = 0;
+ si.pci_1.pci_io.pci_base_lo = 0x01000000;
+ si.pci_1.pci_io.size = 0x01000000;
+ si.pci_1.pci_io.swap = 0x01000000; /* XXXX No swapping */
+ si.pci_1.pci_mem[0].cpu_base = 0x90000000;
+ si.pci_1.pci_mem[0].pci_base_hi = 0;
+ si.pci_1.pci_mem[0].pci_base_lo = 0x90000000;
+ si.pci_1.pci_mem[0].size = 0x10000000;
+ si.pci_1.pci_mem[0].swap = 0x01000000; /* XXXX No swapping */
+ si.pci_1.pci_mem[1].cpu_base = 0;
+ si.pci_1.pci_mem[1].pci_base_hi = 0;
+ si.pci_1.pci_mem[1].pci_base_lo = 0;
+ si.pci_1.pci_mem[1].size = 0; /* Don't use this window */
+ si.pci_1.pci_mem[1].swap = 0;
+ si.pci_1.pci_mem[2].cpu_base = 0;
+ si.pci_1.pci_mem[2].pci_base_hi = 0;
+ si.pci_1.pci_mem[2].pci_base_lo = 0;
+ si.pci_1.pci_mem[2].size = 0; /* Don't use this window */
+ si.pci_1.pci_mem[1].swap = 0;
+ si.pci_1.pci_cmd_bits = 0;
+ si.pci_1.latency_timer = 0x8;
+ si.pci_1.pci_cmd_bits = 0;
+ si.pci_1.latency_timer = 0x8;
+
+ for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) {
+ si.cpu_prot_options[i] = 0;
+ si.cpu_snoop_options[i] = GT64260_CPU_SNOOP_WB;
+ si.pci_0.acc_cntl_options[i] =
+ /* Breaks PCI (especially slot 4)
+ GT64260_PCI_ACC_CNTL_PREFETCHEN |
+ */
+ GT64260_PCI_ACC_CNTL_DREADEN |
+ GT64260_PCI_ACC_CNTL_RDPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDLINEPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDMULPREFETCH |
+ GT64260_PCI_ACC_CNTL_SWAP_NONE |
+ GT64260_PCI_ACC_CNTL_MBURST_32_BTYES;
+ si.pci_0.snoop_options[i] = GT64260_PCI_SNOOP_WB;
+ si.pci_1.acc_cntl_options[i] =
+ /* Breaks PCI (especially slot 4)
+ GT64260_PCI_ACC_CNTL_PREFETCHEN |
+ */
+ GT64260_PCI_ACC_CNTL_DREADEN |
+ GT64260_PCI_ACC_CNTL_RDPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDLINEPREFETCH |
+ GT64260_PCI_ACC_CNTL_RDMULPREFETCH |
+ GT64260_PCI_ACC_CNTL_SWAP_NONE |
+ GT64260_PCI_ACC_CNTL_MBURST_32_BTYES;
+ si.pci_1.snoop_options[i] = GT64260_PCI_SNOOP_WB;
+ }
+
+ /* Lookup PCI host bridges */
+ if (mv64x60_init(&bh, &si)) {
+ printk("Bridge initialization failed.\n");
+ }
+
+ return;
+}
+
+#if defined(CONFIG_SERIAL_8250) && !defined(CONFIG_SERIAL_MPSC_CONSOLE)
+static void __init
+ev64260_early_serial_map(void)
+{
+ struct uart_port port;
+ static char first_time = 1;
+
+ if (first_time) {
+ memset(&port, 0, sizeof(port));
+
+ port.membase = ioremap(EV64260_SERIAL_0, EV64260_UART_SIZE);
+ port.irq = EV64260_UART_0_IRQ;
+ port.uartclk = BASE_BAUD * 16;
+ port.regshift = 2;
+ port.iotype = SERIAL_IO_MEM;
+ port.flags = STD_COM_FLAGS;
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+ gen550_init(0, &port);
+#endif
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 0 failed\n");
+ }
+
+#if 0 /* XXXX */
+ /* Assume early_serial_setup() doesn't modify port */
+ port.membase = ioremap(EV64260_SERIAL_1, EV64260_UART_SIZE);
+ port.irq = EV64260_UART_1_IRQ;
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+ gen550_init(1, &port);
+#endif
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 1 failed\n");
+ }
+#endif
+
+ first_time = 0;
+ }
+
+ return;
+}
+#elif defined(CONFIG_SERIAL_MPSC_CONSOLE)
+static void __init
+ev64260_early_serial_map(void)
+{
+#ifdef CONFIG_KGDB
+ static char first_time = 1;
+
+
+#if defined(CONFIG_KGDB_TTYS0)
+#define KGDB_PORT 0
+#elif defined(CONFIG_KGDB_TTYS1)
+#define KGDB_PORT 1
+#else
+#error "Invalid kgdb_tty port"
+#endif
+
+ if (first_time) {
+ gt_early_mpsc_init(KGDB_PORT, B9600|CS8|CREAD|HUPCL|CLOCAL);
+ first_time = 0;
+ }
+
+ return;
+#endif
+}
+#endif
+
+static void __init
+ev64260_fixup_ocp(void)
+{
+#if defined(CONFIG_SERIAL_MPSC)
+ struct ocp_device *dev;
+ mv64x60_ocp_mpsc_data_t *dp;
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 0))
+ != NULL) {
+ dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+
+ dp->max_idle = 40; /* XXXX what should this be? */
+ dp->default_baud = EV64260_DEFAULT_BAUD;
+ dp->brg_clk_src = EV64260_MPSC_CLK_SRC;
+ dp->brg_clk_freq = EV64260_MPSC_CLK_FREQ;
+ }
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 1))
+ != NULL) {
+ dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+
+ dp->max_idle = 40; /* XXXX what should this be? */
+ dp->default_baud = 9600; /* XXXX */
+ dp->brg_clk_src = EV64260_MPSC_CLK_SRC;
+ dp->brg_clk_freq = EV64260_MPSC_CLK_FREQ;
+ }
+#endif
+
+ return;
+}
+
+static void __init
+ev64260_setup_arch(void)
+{
+ if ( ppc_md.progress )
+ ppc_md.progress("ev64260_setup_arch: enter", 0);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_SDA2;
+#endif
+
+ if ( ppc_md.progress )
+ ppc_md.progress("ev64260_setup_arch: Enabling L2 cache", 0);
+
+ /* Enable L2 and L3 caches (if 745x) */
+ _set_L2CR(_get_L2CR() | L2CR_L2E);
+ _set_L3CR(_get_L3CR() | L3CR_L3E);
+
+ if ( ppc_md.progress )
+ ppc_md.progress("ev64260_setup_arch: Initializing bridge", 0);
+
+ ev64260_setup_bridge(); /* set up PCI bridge(s) */
+ ev64260_setup_peripherals(); /* set up chip selects/GPP/MPP etc */
+
+ if ( ppc_md.progress )
+ ppc_md.progress("ev64260_setup_arch: bridge init complete", 0);
+
+ /* Set OCP values to reflect this board's setup */
+ ev64260_fixup_ocp();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
+ ev64260_early_serial_map();
+#endif
+
+ printk(BOARD_VENDOR " " BOARD_MACHINE "\n");
+ printk("EV-64260-BP port (C) 2001 MontaVista Software, Inc. (source@mvista.com)\n");
+
+ if ( ppc_md.progress )
+ ppc_md.progress("ev64260_setup_arch: exit", 0);
+
+ return;
+}
+
+static void
+ev64260_reset_board(void *addr)
+{
+ local_irq_disable();
+
+ /* disable and invalidate the L2 cache */
+ _set_L2CR(0);
+ _set_L2CR(0x200000);
+
+ /* flush and disable L1 I/D cache */
+ __asm__ __volatile__
+ ("mfspr 3,1008\n\t"
+ "ori 5,5,0xcc00\n\t"
+ "ori 4,3,0xc00\n\t"
+ "andc 5,3,5\n\t"
+ "sync\n\t"
+ "mtspr 1008,4\n\t"
+ "isync\n\t"
+ "sync\n\t"
+ "mtspr 1008,5\n\t"
+ "isync\n\t"
+ "sync\n\t");
+
+ /* unmap any other random cs's that might overlap with bootcs */
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN, 0, 0, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN, 0, 0, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_2_WIN, 0, 0, 0);
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_3_WIN, 0, 0, 0);
+
+ /* map bootrom back in to gt @ reset defaults */
+ mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
+ 0xff800000, 8*1024*1024, 0);
+
+ /* move gt reg base back to default, setup default pci0 swapping
+ * config... */
+ mv64x60_write(&bh, MV64x60_INTERNAL_SPACE_DECODE,
+ (1<<24) | MV64x60_INTERNAL_SPACE_DEFAULT_ADDR>>20);
+
+ /* NOTE: FROM NOW ON no more GT_REGS accesses.. 0x1 is not mapped
+ * via BAT or MMU, and MSR IR/DR is ON */
+#if 0
+ /* BROKEN... IR/DR is still on !! won't work!! */
+ /* Set exception prefix high - to the firmware */
+ _nmask_and_or_msr(0, MSR_IP);
+
+ out_8((u_char *)EV64260_BOARD_MODRST_REG, 0x01);
+#else
+ /* SRR0 has system reset vector, SRR1 has default MSR value */
+ /* rfi restores MSR from SRR1 and sets the PC to the SRR0 value */
+ /* NOTE: assumes reset vector is at 0xfff00100 */
+ __asm__ __volatile__
+ ("mtspr 26, %0\n\t"
+ "li 4,(1<<6)\n\t"
+ "mtspr 27,4\n\t"
+ "rfi\n\t"
+ :: "r" (addr):"r4");
+#endif
+ return;
+}
+
+static void
+ev64260_restart(char *cmd)
+{
+ volatile ulong i = 10000000;
+
+ ev64260_reset_board((void *)0xfff00100);
+
+ while (i-- > 0);
+ panic("restart failed\n");
+}
+
+static void
+ev64260_halt(void)
+{
+ local_irq_disable();
+ while (1);
+ /* NOTREACHED */
+}
+
+static void
+ev64260_power_off(void)
+{
+ ev64260_halt();
+ /* NOTREACHED */
+}
+
+static int
+ev64260_show_cpuinfo(struct seq_file *m)
+{
+ uint pvid;
+
+ pvid = mfspr(PVR);
+ seq_printf(m, "vendor\t\t: " BOARD_VENDOR "\n");
+ seq_printf(m, "machine\t\t: " BOARD_MACHINE "\n");
+ seq_printf(m, "cpu MHz\t\t: %d\n", ev64260_get_cpu_speed()/1000/1000);
+ seq_printf(m, "bus MHz\t\t: %d\n", ev64260_get_bus_speed()/1000/1000);
+
+ return 0;
+}
+
+/* DS1501 RTC has too much variation to use RTC for calibration */
+static void __init
+ev64260_calibrate_decr(void)
+{
+ ulong freq;
+
+ freq = ev64260_get_bus_speed()/4;
+
+ printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ freq/1000000, freq%1000000);
+
+ tb_ticks_per_jiffy = freq / HZ;
+ tb_to_us = mulhwu_scale_factor(freq, 1000000);
+
+ return;
+}
+
+#if 0 /* XXXX */
+#ifdef CONFIG_USE_PPCBOOT
+static void parse_ppcbootinfo(unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ bd_t *bd=NULL;
+ char *cmdline_start=NULL;
+ int cmdline_len=0;
+
+ if(r3) {
+ if((r3 & 0xf0000000) == 0) r3 += KERNELBASE;
+ if((r3 & 0xf0000000) == KERNELBASE) {
+ bd=(void *)r3;
+
+ /* hack for ppcboot loaders that report freqs in Mhz */
+ if(bd->bi_intfreq<1000000) bd->bi_intfreq*=1000000;
+ if(bd->bi_busfreq<1000000) bd->bi_busfreq*=1000000;
+
+ memcpy(&ppcboot_bd,bd,sizeof(ppcboot_bd));
+ ppcboot_bd_valid=1;
+ }
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if(r4 && r5 && r5>r4) {
+ if((r4 & 0xf0000000) == 0) r4 += KERNELBASE;
+ if((r5 & 0xf0000000) == 0) r5 += KERNELBASE;
+ if((r4 & 0xf0000000) == KERNELBASE) {
+ initrd_start=r4;
+ initrd_end=r5;
+ initrd_below_start_ok = 1;
+ }
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ if(r6 && r7 && r7>r6) {
+ if((r6 & 0xf0000000) == 0) r6 += KERNELBASE;
+ if((r7 & 0xf0000000) == 0) r7 += KERNELBASE;
+ if((r6 & 0xf0000000) == KERNELBASE) {
+ cmdline_start=(void *)r6;
+ cmdline_len=(r7-r6);
+ strncpy(cmd_line,cmdline_start,cmdline_len);
+ }
+ }
+
+ if(ppcboot_bd_valid) {
+ printk("found bd_t @%p\n", bd);
+ printk("memstart=%08lx\n", bd->bi_memstart);
+ printk("memsize=%08lx\n", bd->bi_memsize);
+ printk("enetaddr=%02x%02x%02x%02x%02x%02x\n",
+ bd->bi_enetaddr[0],
+ bd->bi_enetaddr[1],
+ bd->bi_enetaddr[2],
+ bd->bi_enetaddr[3],
+ bd->bi_enetaddr[4],
+ bd->bi_enetaddr[5]
+ );
+ printk("intfreq=%ld\n", bd->bi_intfreq);
+ printk("busfreq=%ld\n", bd->bi_busfreq);
+ printk("baudrate=%ld\n", bd->bi_baudrate);
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if(initrd_start) {
+ printk("found initrd @%lx-%lx\n", initrd_start, initrd_end);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ if(cmdline_start && cmdline_len) {
+ printk("found cmdline: '%s'\n", cmd_line);
+ }
+}
+#endif /* USE PPC_BOOT */
+#endif
+
+#if 0 /* XXXX */
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+static int
+ev64260_ide_check_region(ide_ioreg_t from, unsigned int extent)
+{
+ return check_region(from, extent);
+}
+
+static void
+ev64260_ide_request_region(ide_ioreg_t from,
+ unsigned int extent,
+ const char *name)
+{
+ request_region(from, extent, name);
+ return;
+}
+
+static void
+ev64260_ide_release_region(ide_ioreg_t from,
+ unsigned int extent)
+{
+ release_region(from, extent);
+ return;
+}
+
+static void __init
+ev64260_ide_pci_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port,
+ ide_ioreg_t ctrl_port, int *irq)
+{
+ struct pci_dev *dev;
+#if 1 /* NTL */
+ int i;
+
+ //printk("regs %d to %d @ 0x%x\n", IDE_DATA_OFFSET, IDE_STATUS_OFFSET, data_port);
+ for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+ hw->io_ports[i] = data_port;
+ data_port++;
+ }
+
+ //printk("ctrl %d @ 0x%x\n", IDE_CONTROL_OFFSET, ctrl_port);
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+#endif
+
+ pci_for_each_dev(dev) {
+ if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) ||
+ ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) {
+ hw->irq = dev->irq;
+
+ if (irq != NULL) {
+ *irq = dev->irq;
+ }
+ }
+ }
+
+ return;
+}
+#endif
+#endif
+
+#if !defined(CONFIG_USE_PPCBOOT)
+/*
+ * Set BAT 3 to map 0xfb000000 to 0xfc000000 of physical memory space.
+ */
+static __inline__ void
+ev64260_set_bat(void)
+{
+ mb();
+ mtspr(DBAT1U, 0xfb0001fe);
+ mtspr(DBAT1L, 0xfb00002a);
+ mb();
+
+ return;
+}
+#endif
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+static void __init
+ev64260_map_io(void)
+{
+ io_block_mapping(0xfb000000, 0xfb000000, 0x01000000, _PAGE_IO);
+}
+#endif
+
+void __init
+platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ extern int initrd_below_start_ok;
+
+ initrd_start=initrd_end=0;
+ initrd_below_start_ok=0;
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ ppcboot_bd_valid=0;
+ memset(&ppcboot_bd,0,sizeof(ppcboot_bd));
+
+#ifdef CONFIG_USE_PPCBOOT
+ parse_ppcbootinfo(r3, r4, r5, r6, r7);
+#else
+ parse_bootinfo(find_bootinfo());
+#endif
+
+ isa_mem_base = 0;
+ isa_io_base = 0xa0000000; /* XXXX */
+ pci_dram_offset = 0x80000000; /* XXXX */
+
+ loops_per_jiffy = ev64260_get_cpu_speed() / HZ;
+
+ ppc_md.setup_arch = ev64260_setup_arch;
+ ppc_md.show_cpuinfo = ev64260_show_cpuinfo;
+ ppc_md.init_IRQ = gt64260_init_irq;
+ ppc_md.get_irq = gt64260_get_irq;
+
+ ppc_md.pcibios_fixup = ev64260_pci_fixups;
+
+ ppc_md.restart = ev64260_restart;
+ ppc_md.power_off = ev64260_power_off;
+ ppc_md.halt = ev64260_halt;
+
+ ppc_md.find_end_of_memory = ev64260_find_end_of_memory;
+
+ ppc_md.init = NULL;
+
+ ppc_md.time_init = todc_time_init;
+ ppc_md.set_rtc_time = todc_set_rtc_time;
+ ppc_md.get_rtc_time = todc_get_rtc_time;
+
+ ppc_md.nvram_read_val = todc_direct_read_val;
+ ppc_md.nvram_write_val = todc_direct_write_val;
+
+ ppc_md.calibrate_decr = ev64260_calibrate_decr;
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+ ppc_ide_md.ide_init_hwif = ev64260_ide_pci_init_hwif_ports;
+#endif
+
+ bh.p_base = CONFIG_MV64X60_NEW_BASE;
+
+#if !defined(CONFIG_USE_PPCBOOT)
+ ev64260_set_bat();
+#endif
+
+#ifdef CONFIG_SERIAL_8250
+#if defined(CONFIG_SERIAL_TEXT_DEBUG)
+ ppc_md.setup_io_mappings = ev64260_map_io;
+ ppc_md.progress = gen550_progress;
+#endif
+#if defined(CONFIG_KGDB)
+ ppc_md.setup_io_mappings = ev64260_map_io;
+ ppc_md.early_serial_map = ev64260_early_serial_map;
+#endif
+#elif defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#ifdef CONFIG_SERIAL_TEXT_DEBUG
+ ppc_md.setup_io_mappings = ev64260_map_io;
+ ppc_md.progress = gt64260_mpsc_progress;
+#endif /* CONFIG_SERIAL_TEXT_DEBUG */
+#ifdef CONFIG_KGDB
+ ppc_md.setup_io_mappings = ev64260_map_io;
+ ppc_md.early_serial_map = ev64260_early_serial_map;
+#endif /* CONFIG_KGDB */
+
+#endif
+
+ return;
+}
/*
* arch/ppc/platforms/ev64260.h
- *
+ *
* Definitions for Marvell/Galileo EV-64260-BP Evaluation Board.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
- * 2001 (c) MontaVista, Software, Inc. This file is licensed under
+ * 2001-2002 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
/*
- * The GT64260 has 2 PCI buses each with 1 window from the CPU bus to
+ * The MV64x60 has 2 PCI buses each with 1 window from the CPU bus to
* PCI I/O space and 4 windows from the CPU bus to PCI MEM space.
* We'll only use one PCI MEM window on each PCI bus.
+ *
+ * This is the CPU physical memory map (windows must be at least 1MB and start
+ * on a boundary that is a multiple of the window size):
+ *
+ * 0xfc000000-0xffffffff - External FLASH on device module
+ * 0xfbf00000-0xfbffffff - Embedded (on board) FLASH
+ * 0xfbe00000-0xfbefffff - GT64260 Registers (preferably)
+ * but really a config option
+ * 0xfbd00000-0xfbdfffff - External SRAM on device module
+ * 0xfbc00000-0xfbcfffff - TODC chip on device module
+ * 0xfbb00000-0xfbbfffff - External UART on device module
+ * 0xa2000000-0xfbafffff - <hole>
+ * 0xa1000000-0xa1ffffff - PCI 1 I/O (defined in gt64260.h)
+ * 0xa0000000-0xa0ffffff - PCI 0 I/O (defined in gt64260.h)
+ * 0x90000000-0x9fffffff - PCI 1 MEM (defined in gt64260.h)
+ * 0x80000000-0x8fffffff - PCI 0 MEM (defined in gt64260.h)
*/
#ifndef __PPC_PLATFORMS_EV64260_H
#define __PPC_PLATFORMS_EV64260_H
-#define EV64260_BRIDGE_REG_BASE 0xf8000000
-#define EV64260_BRIDGE_REG_BASE_TO_TOP 0x08000000U
+#ifndef MAX
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+/*
+ * CPU Physical Memory Map setup.
+ */
+#define EV64260_EXT_FLASH_BASE 0xfc000000
+#define EV64260_EMB_FLASH_BASE 0xfbf00000
+#define EV64260_EXT_SRAM_BASE 0xfbd00000
+#define EV64260_TODC_BASE 0xfbc00000
+#define EV64260_UART_BASE 0xfbb00000
-#define EV64260_TODC_BASE 0xfc800000
-#define EV64260_TODC_LEN 0x00800000
-#define EV64260_TODC_END (EV64260_TODC_BASE + \
- EV64260_TODC_LEN - 1)
+#define EV64260_EXT_FLASH_SIZE_ACTUAL 0x04000000 /* <= 64MB Extern FLASH */
+#define EV64260_EMB_FLASH_SIZE_ACTUAL 0x00080000 /* 512KB of Embed FLASH */
+#define EV64260_EXT_SRAM_SIZE_ACTUAL 0x00100000 /* 1MB SDRAM */
+#define EV64260_TODC_SIZE_ACTUAL 0x00000020 /* 32 bytes for TODC */
+#define EV64260_UART_SIZE_ACTUAL 0x00000040 /* 64 bytes for DUART */
+
+#define EV64260_EXT_FLASH_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
+ EV64260_EXT_FLASH_SIZE_ACTUAL)
+#define EV64260_EMB_FLASH_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
+ EV64260_EMB_FLASH_SIZE_ACTUAL)
+#define EV64260_EXT_SRAM_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
+ EV64260_EXT_SRAM_SIZE_ACTUAL)
+#define EV64260_TODC_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
+ EV64260_TODC_SIZE_ACTUAL)
+#if 0 /* XXXX blows up assembler in bootloader */
+#define EV64260_UART_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
+ EV64260_UART_SIZE_ACTUAL)
+#else
+#define EV64260_UART_SIZE GT64260_WINDOW_SIZE_MIN
+#endif
+#define EV64260_UART_END ((EV64260_UART_BASE + \
+ EV64260_UART_SIZE - 1) & 0xfff00000)
+
+/*
+ * Board-specific IRQ info
+ */
+#define EV64260_UART_0_IRQ 85
+#define EV64260_UART_1_IRQ 86
+#define EV64260_PCI_0_IRQ 91
+#define EV64260_PCI_1_IRQ 93
-#define EV64260_UART_BASE 0xfd000000
-#define EV64260_UART_LEN 0x00800000
-#define EV64260_UART_END (EV64260_UART_BASE + \
- EV64260_UART_LEN - 1)
-/* Serial driver setup. */
+/*
+ * Serial port setup.
+ */
+#define EV64260_DEFAULT_BAUD 115200
+
+#if defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#define SERIAL_PORT_DFNS
+
+#define EV64260_MPSC_CLK_SRC 8 /* TCLK */
+#define EV64260_MPSC_CLK_FREQ 100000000 /* 100MHz clk */
+#else
#define EV64260_SERIAL_0 (EV64260_UART_BASE + 0x20)
#define EV64260_SERIAL_1 EV64260_UART_BASE
-#define BASE_BAUD ( 3686400 / 16 )
+#define BASE_BAUD (EV64260_DEFAULT_BAUD * 2)
#ifdef CONFIG_SERIAL_MANY_PORTS
#define RS_TABLE_SIZE 64
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST)
#endif
-#if !defined(CONFIG_GT64260_CONSOLE)
/* Required for bootloader's ns16550.c code */
#define STD_SERIAL_PORT_DFNS \
- { 0, BASE_BAUD, EV64260_SERIAL_0, 85, STD_COM_FLAGS, /* ttyS0 */\
- iomem_base: (u8 *)EV64260_SERIAL_0, \
+ { 0, BASE_BAUD, EV64260_SERIAL_0, EV64260_UART_0_IRQ, STD_COM_FLAGS, \
+ iomem_base: (u8 *)EV64260_SERIAL_0, /* ttyS0 */ \
+ iomem_reg_shift: 2, \
+ io_type: SERIAL_IO_MEM },
+
+#if 0
+ { 1, BASE_BAUD, EV64260_SERIAL_1, EV64260_UART_1_IRQ, STD_COM_FLAGS, \
+ iomem_base: (u8 *)EV64260_SERIAL_1, /* ttyS1 */ \
iomem_reg_shift: 2, \
io_type: SERIAL_IO_MEM },
+#endif
#define SERIAL_PORT_DFNS \
STD_SERIAL_PORT_DFNS
-#else
-#define SERIAL_PORT_DFNS
#endif
-
#endif /* __PPC_PLATFORMS_EV64260_H */
+++ /dev/null
-/*
- * arch/ppc/platforms/ev64260_setup.c
- *
- * Board setup routines for the Marvell/Galileo EV-64260-BP Evaluation Board.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/*
- * The EV-64260-BP port is the result of hard work from many people from
- * many companies. In particular, employees of Marvell/Galileo, Mission
- * Critical Linux, Xyterra, and MontaVista Software were heavily involved.
- */
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/initrd.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/ide.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#if !defined(CONFIG_GT64260_CONSOLE)
-#include <linux/serial.h>
-#endif
-
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/time.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/smp.h>
-#include <asm/todc.h>
-#include <asm/bootinfo.h>
-#include <asm/gt64260.h>
-#include <platforms/ev64260.h>
-
-
-extern char cmd_line[];
-unsigned long ev64260_find_end_of_memory(void);
-
-TODC_ALLOC();
-
-/*
- * Marvell/Galileo EV-64260-BP Evaluation Board PCI interrupt routing.
- */
-static int __init
-ev64260_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
-{
- struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-
- if (hose->index == 0) {
- static char pci_irq_table[][4] =
- /*
- * PCI IDSEL/INTPIN->INTLINE
- * A B C D
- */
- {
- { 91, 0, 0, 0 }, /* IDSEL 7 - PCI bus 0 */
- { 91, 0, 0, 0 }, /* IDSEL 8 - PCI bus 0 */
- };
-
- const long min_idsel = 7, max_idsel = 8, irqs_per_slot = 4;
- return PCI_IRQ_TABLE_LOOKUP;
- }
- else {
- static char pci_irq_table[][4] =
- /*
- * PCI IDSEL/INTPIN->INTLINE
- * A B C D
- */
- {
- { 93, 0, 0, 0 }, /* IDSEL 7 - PCI bus 1 */
- { 93, 0, 0, 0 }, /* IDSEL 8 - PCI bus 1 */
- };
-
- const long min_idsel = 7, max_idsel = 8, irqs_per_slot = 4;
- return PCI_IRQ_TABLE_LOOKUP;
- }
-}
-
-static void __init
-ev64260_setup_bridge(void)
-{
- gt64260_bridge_info_t info;
- int window;
-
- GT64260_BRIDGE_INFO_DEFAULT(&info, ev64260_find_end_of_memory());
-
- /* Lookup PCI host bridges */
- if (gt64260_find_bridges(EV64260_BRIDGE_REG_BASE,
- &info,
- ev64260_map_irq)) {
- printk("Bridge initialization failed.\n");
- }
-
- /*
- * Enabling of PCI internal-vs-external arbitration
- * is a platform- and errata-dependent decision.
- */
- if(gt64260_revision == GT64260) {
- /* FEr#35 */
- gt_clr_bits(GT64260_PCI_0_ARBITER_CNTL, (1<<31));
- gt_clr_bits(GT64260_PCI_1_ARBITER_CNTL, (1<<31));
- } else if( gt64260_revision == GT64260A ) {
- gt_set_bits(GT64260_PCI_0_ARBITER_CNTL, (1<<31));
- gt_set_bits(GT64260_PCI_1_ARBITER_CNTL, (1<<31));
- /* Make external GPP interrupts level sensitive */
- gt_set_bits(GT64260_COMM_ARBITER_CNTL, (1<<10));
- /* Doc Change 9: > 100 MHz so must be set */
- gt_set_bits(GT64260_CPU_CONFIG, (1<<23));
- }
-
- gt_set_bits(GT64260_CPU_MASTER_CNTL, (1<<9)); /* Only 1 cpu */
-
- /* SCS windows not disabled above, disable all but SCS 0 */
- for (window=1; window<GT64260_CPU_SCS_DECODE_WINDOWS; window++) {
- gt64260_cpu_scs_set_window(window, 0, 0);
- }
-
- /* Set up windows to RTC/TODC and DUART on device module (CS 1 & 2) */
- gt64260_cpu_cs_set_window(1, EV64260_TODC_BASE, EV64260_TODC_LEN);
- gt64260_cpu_cs_set_window(2, EV64260_UART_BASE, EV64260_UART_LEN);
-
- /*
- * The EV-64260-BP uses several Multi-Purpose Pins (MPP) on the 64260
- * bridge as interrupt inputs (via the General Purpose Ports (GPP)
- * register). Need to route the MPP inputs to the GPP and set the
- * polarity correctly.
- *
- * In MPP Control 2 Register
- * MPP 21 -> GPP 21 (DUART channel A intr)
- * MPP 22 -> GPP 22 (DUART channel B intr)
- *
- * In MPP Control 3 Register
- * MPP 27 -> GPP 27 (PCI 0 INTA)
- * MPP 29 -> GPP 29 (PCI 1 INTA)
- */
- gt_clr_bits(GT64260_MPP_CNTL_2,
- ((1<<20) | (1<<21) | (1<<22) | (1<<23) |
- (1<<24) | (1<<25) | (1<<26) | (1<<27)));
-
- gt_clr_bits(GT64260_MPP_CNTL_3,
- ((1<<12) | (1<<13) | (1<<14) | (1<<15) |
- (1<<20) | (1<<21) | (1<<22) | (1<<23)));
-
- gt_write(GT64260_GPP_LEVEL_CNTL, 0x000002c6);
-
- /* DUART & PCI interrupts are active low */
- gt_set_bits(GT64260_GPP_LEVEL_CNTL,
- ((1<<21) | (1<<22) | (1<<27) | (1<<29)));
-
- /* Clear any pending interrupts for these inputs and enable them. */
- gt_write(GT64260_GPP_INTR_CAUSE,
- ~((1<<21) | (1<<22) | (1<<27) | (1<<29)));
- gt_set_bits(GT64260_GPP_INTR_MASK,
- ((1<<21) | (1<<22)| (1<<27) | (1<<29)));
- gt_set_bits(GT64260_IC_CPU_INTR_MASK_HI, ((1<<26) | (1<<27)));
-
- /* Set MPSC Multiplex RMII */
- /* NOTE: ethernet driver modifies bit 0 and 1 */
- gt_write(GT64260_MPP_SERIAL_PORTS_MULTIPLEX, 0x00001102);
-
- return;
-}
-
-
-static void __init
-ev64260_setup_arch(void)
-{
-#if !defined(CONFIG_GT64260_CONSOLE)
- struct serial_struct serial_req;
-#endif
-
- if ( ppc_md.progress )
- ppc_md.progress("ev64260_setup_arch: enter", 0);
-
- loops_per_jiffy = 50000000 / HZ;
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start)
- ROOT_DEV = Root_RAM0;
- else
-#endif
-#ifdef CONFIG_ROOT_NFS
- ROOT_DEV = Root_NFS;
-#else
- ROOT_DEV = Root_SDA2;
-#endif
-
- if ( ppc_md.progress )
- ppc_md.progress("ev64260_setup_arch: find_bridges", 0);
-
- /*
- * Set up the L2CR register.
- * L2 cache was invalidated by bootloader.
- */
- switch (PVR_VER(mfspr(PVR))) {
- case PVR_VER(PVR_750):
- _set_L2CR(0xfd100000);
- break;
- case PVR_VER(PVR_7400):
- case PVR_VER(PVR_7410):
- _set_L2CR(0xcd100000);
- break;
- /* case PVR_VER(PVR_7450): */
- /* XXXX WHAT VALUE?? FIXME */
- break;
- }
-
- ev64260_setup_bridge();
-
- TODC_INIT(TODC_TYPE_DS1501, 0, 0, ioremap(EV64260_TODC_BASE,0x20), 8);
-
-#if !defined(CONFIG_GT64260_CONSOLE)
- memset(&serial_req, 0, sizeof(serial_req));
- serial_req.line = 0;
- serial_req.baud_base = BASE_BAUD;
- serial_req.port = 0;
- serial_req.irq = 85;
- serial_req.flags = STD_COM_FLAGS;
- serial_req.io_type = SERIAL_IO_MEM;
- serial_req.iomem_base = ioremap(EV64260_SERIAL_0, 0x20);
- serial_req.iomem_reg_shift = 2;
-
- if (early_serial_setup(&serial_req) != 0) {
- printk("Early serial init of port 0 failed\n");
- }
-
- /* Assume early_serial_setup() doesn't modify serial_req */
- serial_req.line = 1;
- serial_req.port = 1;
- serial_req.irq = 86;
- serial_req.iomem_base = ioremap(EV64260_SERIAL_1, 0x20);
-
- if (early_serial_setup(&serial_req) != 0) {
- printk("Early serial init of port 1 failed\n");
- }
-#endif
-
- printk("Marvell/Galileo EV-64260-BP Evaluation Board\n");
- printk("EV-64260-BP port (C) 2001 MontaVista Software, Inc. (source@mvista.com)\n");
-
- if ( ppc_md.progress )
- ppc_md.progress("ev64260_setup_arch: exit", 0);
-
- return;
-}
-
-static void __init
-ev64260_init_irq(void)
-{
- gt64260_init_irq();
-
- if(gt64260_revision != GT64260) {
- /* XXXX Kludge--need to fix gt64260_init_irq() interface */
- /* Mark PCI intrs level sensitive */
- irq_desc[91].status |= IRQ_LEVEL;
- irq_desc[93].status |= IRQ_LEVEL;
- }
-}
-
-unsigned long __init
-ev64260_find_end_of_memory(void)
-{
- return 32*1024*1024; /* XXXX FIXME */
-}
-
-static void
-ev64260_reset_board(void)
-{
- local_irq_disable();
-
- /* Set exception prefix high - to the firmware */
- _nmask_and_or_msr(0, MSR_IP);
-
- /* XXX FIXME */
- printk("XXXX **** trying to reset board ****\n");
- return;
-}
-
-static void
-ev64260_restart(char *cmd)
-{
- volatile ulong i = 10000000;
-
- ev64260_reset_board();
-
- while (i-- > 0);
- panic("restart failed\n");
-}
-
-static void
-ev64260_halt(void)
-{
- local_irq_disable();
- while (1);
- /* NOTREACHED */
-}
-
-static void
-ev64260_power_off(void)
-{
- ev64260_halt();
- /* NOTREACHED */
-}
-
-static int
-ev64260_show_cpuinfo(struct seq_file *m)
-{
- uint pvid;
-
- pvid = mfspr(PVR);
- seq_printf(m, "vendor\t\t: Marvell/Galileo\n");
- seq_printf(m, "machine\t\t: EV-64260-BP\n");
- seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n",
- pvid, (pvid & (1<<15) ? "IBM" : "Motorola"));
-
- return 0;
-}
-
-/* DS1501 RTC has too much variation to use RTC for calibration */
-static void __init
-ev64260_calibrate_decr(void)
-{
- ulong freq;
-
- freq = 100000000 / 4;
-
- printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
- freq/1000000, freq%1000000);
-
- tb_ticks_per_jiffy = freq / HZ;
- tb_to_us = mulhwu_scale_factor(freq, 1000000);
-
- return;
-}
-
-#if defined(CONFIG_SERIAL_TEXT_DEBUG)
-/*
- * Set BAT 3 to map 0xf0000000 to end of physical memory space.
- */
-static __inline__ void
-ev64260_set_bat(void)
-{
- unsigned long bat3u, bat3l;
- static int mapping_set = 0;
-
- if (!mapping_set) {
-
- __asm__ __volatile__(
- " lis %0,0xf000\n \
- ori %1,%0,0x002a\n \
- ori %0,%0,0x1ffe\n \
- mtspr 0x21e,%0\n \
- mtspr 0x21f,%1\n \
- isync\n \
- sync "
- : "=r" (bat3u), "=r" (bat3l));
-
- mapping_set = 1;
- }
-
- return;
-}
-
-#if !defined(CONFIG_GT64260_CONSOLE)
-#include <linux/serialP.h>
-#include <linux/serial_reg.h>
-#include <asm/serial.h>
-
-static struct serial_state rs_table[RS_TABLE_SIZE] = {
- SERIAL_PORT_DFNS /* Defined in <asm/serial.h> */
-};
-
-static void
-ev64260_16550_progress(char *s, unsigned short hex)
-{
- volatile char c;
- volatile unsigned long com_port;
- u16 shift;
-
- com_port = rs_table[0].port;
- shift = rs_table[0].iomem_reg_shift;
-
- while ((c = *s++) != 0) {
- while ((*((volatile unsigned char *)com_port +
- (UART_LSR << shift)) & UART_LSR_THRE) == 0)
- ;
- *(volatile unsigned char *)com_port = c;
-
- if (c == '\n') {
- while ((*((volatile unsigned char *)com_port +
- (UART_LSR << shift)) & UART_LSR_THRE) == 0)
- ;
- *(volatile unsigned char *)com_port = '\r';
- }
- }
-
- /* Move to next line on */
- while ((*((volatile unsigned char *)com_port +
- (UART_LSR << shift)) & UART_LSR_THRE) == 0)
- ;
- *(volatile unsigned char *)com_port = '\n';
- while ((*((volatile unsigned char *)com_port +
- (UART_LSR << shift)) & UART_LSR_THRE) == 0)
- ;
- *(volatile unsigned char *)com_port = '\r';
-
- return;
-}
-#endif /* !CONFIG_GT64260_CONSOLE */
-#endif /* CONFIG_SERIAL_TEXT_DEBUG */
-
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- parse_bootinfo(find_bootinfo());
-
- isa_mem_base = 0;
-
- ppc_md.setup_arch = ev64260_setup_arch;
- ppc_md.show_cpuinfo = ev64260_show_cpuinfo;
- ppc_md.irq_canonicalize = NULL;
- ppc_md.init_IRQ = ev64260_init_irq;
- ppc_md.get_irq = gt64260_get_irq;
- ppc_md.init = NULL;
-
- ppc_md.restart = ev64260_restart;
- ppc_md.power_off = ev64260_power_off;
- ppc_md.halt = ev64260_halt;
-
- ppc_md.find_end_of_memory = ev64260_find_end_of_memory;
-
- ppc_md.time_init = todc_time_init;
- ppc_md.set_rtc_time = todc_set_rtc_time;
- ppc_md.get_rtc_time = todc_get_rtc_time;
- ppc_md.calibrate_decr = ev64260_calibrate_decr;
-
- ppc_md.nvram_read_val = todc_direct_read_val;
- ppc_md.nvram_write_val = todc_direct_write_val;
-
- ppc_md.heartbeat = NULL;
- ppc_md.heartbeat_reset = 0;
- ppc_md.heartbeat_count = 0;
-
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- ev64260_set_bat();
-#ifdef CONFIG_GT64260_CONSOLE
- gt64260_base = EV64260_BRIDGE_REG_BASE;
- ppc_md.progress = gt64260_mpsc_progress; /* embedded UART */
-#else
- ppc_md.progress = ev64260_16550_progress; /* Dev module DUART */
-#endif
-#else /* !CONFIG_SERIAL_TEXT_DEBUG */
- ppc_md.progress = NULL;
-#endif /* CONFIG_SERIAL_TEXT_DEBUG */
-
- return;
-}
+++ /dev/null
-/*
- * arch/ppc/platforms/lite5200.c
- *
- * Platform support file for the Freescale LITE5200 based on MPC52xx.
- * A maximum of this file should be moved to syslib/mpc52xx_?????
- * so that new platform based on MPC52xx need a minimal platform file
- * ( avoid code duplication )
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Based on the 2.4 code written by Kent Borg,
- * Dale Farnsworth <dale.farnsworth@mvista.com> and
- * Wolfgang Denk <wd@denx.de>
- *
- * Copyright 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright 2003 Motorola Inc.
- * Copyright 2003 MontaVista Software Inc.
- * Copyright 2003 DENX Software Engineering (wd@denx.de)
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/config.h>
-#include <linux/initrd.h>
-#include <linux/seq_file.h>
-#include <linux/kdev_t.h>
-#include <linux/root_dev.h>
-#include <linux/console.h>
-
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/ocp.h>
-#include <asm/mpc52xx.h>
-
-
-/* Board data given by U-Boot */
-bd_t __res;
-EXPORT_SYMBOL(__res); /* For modules */
-
-
-/* ======================================================================== */
-/* OCP device definition */
-/* For board/shared resources like PSCs */
-/* ======================================================================== */
-/* Be sure not to load conficting devices : e.g. loading the UART drivers for
- * PSC1 and then also loading a AC97 for this same PSC.
- * For details about how to create an entry, look in the doc of the concerned
- * driver ( eg drivers/serial/mpc52xx_uart.c for the PSC in uart mode )
- */
-
-struct ocp_def board_ocp[] = {
- {
- .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_PSC_UART,
- .index = 0,
- .paddr = MPC52xx_PSC1,
- .irq = MPC52xx_PSC1_IRQ,
- .pm = OCP_CPM_NA,
- },
- { /* Terminating entry */
- .vendor = OCP_VENDOR_INVALID
- }
-};
-
-
-/* ======================================================================== */
-/* Platform specific code */
-/* ======================================================================== */
-
-static int
-icecube_show_cpuinfo(struct seq_file *m)
-{
- seq_printf(m, "machine\t\t: Freescale LITE5200\n");
- return 0;
-}
-
-static void __init
-icecube_setup_arch(void)
-{
-
- /* Add board OCP definitions */
- mpc52xx_add_board_devices(board_ocp);
-}
-
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* Generic MPC52xx platform initialization */
- /* TODO Create one and move a max of stuff in it.
- Put this init in the syslib */
-
- struct bi_record *bootinfo = find_bootinfo();
-
- if (bootinfo)
- parse_bootinfo(bootinfo);
- else {
- /* Load the bd_t board info structure */
- if (r3)
- memcpy((void*)&__res,(void*)(r3+KERNELBASE),
- sizeof(bd_t));
-
-#ifdef CONFIG_BLK_DEV_INITRD
- /* Load the initrd */
- if (r4) {
- initrd_start = r4 + KERNELBASE;
- initrd_end = r5 + KERNELBASE;
- }
-#endif
-
- /* Load the command line */
- if (r6) {
- *(char *)(r7+KERNELBASE) = 0;
- strcpy(cmd_line, (char *)(r6+KERNELBASE));
- }
- }
-
- /* BAT setup */
- mpc52xx_set_bat();
-
- /* No ISA bus AFAIK */
- isa_io_base = 0;
- isa_mem_base = 0;
-
- /* Setup the ppc_md struct */
- ppc_md.setup_arch = icecube_setup_arch;
- ppc_md.show_cpuinfo = icecube_show_cpuinfo;
- ppc_md.show_percpuinfo = NULL;
- ppc_md.init_IRQ = mpc52xx_init_irq;
- ppc_md.get_irq = mpc52xx_get_irq;
-
- ppc_md.find_end_of_memory = mpc52xx_find_end_of_memory;
- ppc_md.setup_io_mappings = mpc52xx_map_io;
-
- ppc_md.restart = mpc52xx_restart;
- ppc_md.power_off = mpc52xx_power_off;
- ppc_md.halt = mpc52xx_halt;
-
- /* No time keeper on the IceCube */
- ppc_md.time_init = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.set_rtc_time = NULL;
-
- ppc_md.calibrate_decr = mpc52xx_calibrate_decr;
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
- ppc_md.progress = mpc52xx_progress;
-#endif
-}
-
+++ /dev/null
-/*
- * arch/ppc/platforms/lite5200.h
- *
- * Definitions for Freescale LITE5200 : MPC52xx Standard Development
- * Platform board support
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __PLATFORMS_LITE5200_H__
-#define __PLATFORMS_LITE5200_H__
-
-/* Serial port used for low-level debug */
-#define MPC52xx_PF_CONSOLE_PORT 0 /* PSC1 */
-
-
-#endif /* __PLATFORMS_LITE5200_H__ */
+++ /dev/null
-/*
- * arch/ppc/platforms/mpc5200.c
- *
- * OCP Definitions for the boards based on MPC5200 processor. Contains
- * definitions for every common peripherals. (Mostly all but PSCs)
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Copyright 2004 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <asm/ocp.h>
-#include <asm/mpc52xx.h>
-
-/* Here is the core_ocp struct.
- * With all the devices common to all board. Even if port multiplexing is
- * not setup for them (if the user don't want them, just don't select the
- * config option). The potentially conflicting devices (like PSCs) goes in
- * board specific file.
- */
-struct ocp_def core_ocp[] = {
- { /* Terminating entry */
- .vendor = OCP_VENDOR_INVALID
- }
-};
static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
- u32 *reg;
/* OF only reports the high frequency */
hi_freq = cur_freq;
return 1;
}
- reg = (u32 *)get_property(volt_gpio_np, "reg", NULL);
+ u32 *reg = (u32 *)get_property(volt_gpio_np, "reg", NULL);
voltage_gpio = *reg;
set_speed_proc = dfs_set_cpu_speed;
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
* (iBook, G4, new IMacs, and all the recent Apple machines).
* It contains 3 controllers in one ASIC.
*
- * The U3 is the bridge used on G5 machines. It contains an
+ * The U3 is the bridge used on G5 machines. It contains on
* AGP bus which is dealt with the old UniNorth access routines
- * and a HyperTransport bus which uses its own set of access
+ * and an HyperTransport bus which uses its own set of access
* functions.
*/
continue;
if (0x0035 != *prop)
continue;
- prop = (u32 *)get_property(nec, "reg", NULL);
+ prop = (u32 *)get_property(nec, "reg", 0);
if (prop == NULL)
continue;
devfn = (prop[0] >> 8) & 0xff;
* any of the 0xfxxxxxxx "fine" memory regions to /ht.
* We need to fix that sooner or later by either parsing all child "ranges"
* properties or figuring out the U3 address space decoding logic and
- * then read its configuration register (if any).
+ * then read it's configuration register (if any).
*/
hose->io_base_phys = 0xf4000000 + 0x00400000;
hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
* default, gmac is not powered up, and so will be absent
* from the kernel initial PCI lookup.
*
- * Should be replaced by 2.4 new PCI mechanisms and really
- * register the device.
+ * Should be replaced by 2.4 new PCI mecanisms and really
+ * regiser the device.
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-/* When an irq gets requested for the first client, if it's an
- * edge interrupt, we clear any previous one on the controller
- */
-static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
-{
- unsigned long bit = 1UL << (irq_nr & 0x1f);
- int i = irq_nr >> 5;
-
- if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
- out_le32(&pmac_irq_hw[i]->ack, bit);
- set_bit(irq_nr, ppc_cached_irq_mask);
- pmac_set_irq_mask(irq_nr, 0);
-
- return 0;
-}
-
static void __pmac pmac_mask_irq(unsigned int irq_nr)
{
clear_bit(irq_nr, ppc_cached_irq_mask);
struct hw_interrupt_type pmac_pic = {
- .typename = " PMAC-PIC ",
- .startup = pmac_startup_irq,
- .enable = pmac_unmask_irq,
- .disable = pmac_mask_irq,
- .ack = pmac_mask_and_ack_irq,
- .end = pmac_end_irq,
+ " PMAC-PIC ",
+ NULL,
+ NULL,
+ pmac_unmask_irq,
+ pmac_mask_irq,
+ pmac_mask_and_ack_irq,
+ pmac_end_irq,
+ NULL
};
struct hw_interrupt_type gatwick_pic = {
- .typename = " GATWICK ",
- .startup = pmac_startup_irq,
- .enable = pmac_unmask_irq,
- .disable = pmac_mask_irq,
- .ack = pmac_mask_and_ack_irq,
- .end = pmac_end_irq,
+ " GATWICK ",
+ NULL,
+ NULL,
+ pmac_unmask_irq,
+ pmac_mask_irq,
+ pmac_mask_and_ack_irq,
+ pmac_end_irq,
+ NULL
};
static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
nmi_irq = pswitch->intrs[0].line;
openpic_init_nmi_irq(nmi_irq);
request_irq(nmi_irq, xmon_irq, 0,
- "NMI - XMON", NULL);
+ "NMI - XMON", 0);
}
}
#endif /* CONFIG_XMON */
for ( i = max_real_irqs ; i < max_irqs ; i++ )
irq_desc[i].handler = &gatwick_pic;
request_irq( irq_cascade, gatwick_action, SA_INTERRUPT,
- "cascade", NULL );
+ "cascade", 0 );
}
printk("System has %d possible interrupts\n", max_irqs);
if (max_irqs != max_real_irqs)
max_real_irqs);
#ifdef CONFIG_XMON
- request_irq(20, xmon_irq, 0, "NMI - XMON", NULL);
+ request_irq(20, xmon_irq, 0, "NMI - XMON", 0);
#endif /* CONFIG_XMON */
}
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
- if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", NULL))
+ if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", 0))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
}
/* Check the first PCI device to see if it is a Raven. */
- early_read_config_dword(NULL, 0, 0, PCI_VENDOR_ID, &devid);
+ early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &devid);
switch (devid & 0xffff0000) {
case MPIC_RAVEN_ID:
/* Read the memory base register. */
- early_read_config_dword(NULL, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
+ early_read_config_dword(0, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
if (pci_membase == 0) {
OpenPIC_Addr = NULL;
irq_desc[i].handler = &i8259_pic;
/* If we have a Raven PCI bridge or a Hawk PCI bridge / Memory
* controller, we poll (as they have a different int-ack address). */
- early_read_config_dword(NULL, 0, 0, PCI_VENDOR_ID, &pci_viddid);
+ early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &pci_viddid);
pci_did = (pci_viddid & 0xffff0000) >> 16;
if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA)
&& ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN)
--- /dev/null
+/*
+ * arch/ppc/platforms/proc_rtas.c
+ * Copyright (C) 2000 Tilmann Bitterberg
+ * (tilmann@bitterberg.de)
+ *
+ * RTAS (Runtime Abstraction Services) stuff
+ * Intention is to provide a clean user interface
+ * to use the RTAS.
+ *
+ * TODO:
+ * Split off a header file and maybe move it to a different
+ * location. Write Documentation on what the /proc/rtas/ entries
+ * actually do.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h> /* for ppc_md */
+#include <asm/time.h>
+
+/* Token for Sensors */
+#define KEY_SWITCH 0x0001
+#define ENCLOSURE_SWITCH 0x0002
+#define THERMAL_SENSOR 0x0003
+#define LID_STATUS 0x0004
+#define POWER_SOURCE 0x0005
+#define BATTERY_VOLTAGE 0x0006
+#define BATTERY_REMAINING 0x0007
+#define BATTERY_PERCENTAGE 0x0008
+#define EPOW_SENSOR 0x0009
+#define BATTERY_CYCLESTATE 0x000a
+#define BATTERY_CHARGING 0x000b
+
+/* IBM specific sensors */
+#define IBM_SURVEILLANCE 0x2328 /* 9000 */
+#define IBM_FANRPM 0x2329 /* 9001 */
+#define IBM_VOLTAGE 0x232a /* 9002 */
+#define IBM_DRCONNECTOR 0x232b /* 9003 */
+#define IBM_POWERSUPPLY 0x232c /* 9004 */
+#define IBM_INTQUEUE 0x232d /* 9005 */
+
+/* Status return values */
+#define SENSOR_CRITICAL_HIGH 13
+#define SENSOR_WARNING_HIGH 12
+#define SENSOR_NORMAL 11
+#define SENSOR_WARNING_LOW 10
+#define SENSOR_CRITICAL_LOW 9
+#define SENSOR_SUCCESS 0
+#define SENSOR_HW_ERROR -1
+#define SENSOR_BUSY -2
+#define SENSOR_NOT_EXIST -3
+#define SENSOR_DR_ENTITY -9000
+
+/* Location Codes */
+#define LOC_SCSI_DEV_ADDR 'A'
+#define LOC_SCSI_DEV_LOC 'B'
+#define LOC_CPU 'C'
+#define LOC_DISKETTE 'D'
+#define LOC_ETHERNET 'E'
+#define LOC_FAN 'F'
+#define LOC_GRAPHICS 'G'
+/* reserved / not used 'H' */
+#define LOC_IO_ADAPTER 'I'
+/* reserved / not used 'J' */
+#define LOC_KEYBOARD 'K'
+#define LOC_LCD 'L'
+#define LOC_MEMORY 'M'
+#define LOC_NV_MEMORY 'N'
+#define LOC_MOUSE 'O'
+#define LOC_PLANAR 'P'
+#define LOC_OTHER_IO 'Q'
+#define LOC_PARALLEL 'R'
+#define LOC_SERIAL 'S'
+#define LOC_DEAD_RING 'T'
+#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
+#define LOC_VOLTAGE 'V'
+#define LOC_SWITCH_ADAPTER 'W'
+#define LOC_OTHER 'X'
+#define LOC_FIRMWARE 'Y'
+#define LOC_SCSI 'Z'
+
+/* Tokens for indicators */
+#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
+#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
+#define SYSTEM_POWER_STATE 0x0003
+#define WARNING_LIGHT 0x0004
+#define DISK_ACTIVITY_LIGHT 0x0005
+#define HEX_DISPLAY_UNIT 0x0006
+#define BATTERY_WARNING_TIME 0x0007
+#define CONDITION_CYCLE_REQUEST 0x0008
+#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
+#define DR_ACTION 0x2329 /* 9001 */
+#define DR_INDICATOR 0x232a /* 9002 */
+/* 9003 - 9004: Vendor specific */
+#define GLOBAL_INTERRUPT_QUEUE 0x232d /* 9005 */
+/* 9006 - 9999: Vendor specific */
+
+/* other */
+#define MAX_SENSORS 17 /* I only know of 17 sensors */
+#define MAX_LINELENGTH 256
+#define SENSOR_PREFIX "ibm,sensor-"
+#define cel_to_fahr(x) ((x*9/5)+32)
+
+
+/* Globals */
+static struct proc_dir_entry *proc_rtas;
+static struct rtas_sensors sensors;
+static struct device_node *rtas;
+static unsigned long power_on_time = 0; /* Save the time the user set */
+static char progress_led[MAX_LINELENGTH];
+
+static unsigned long rtas_tone_frequency = 1000;
+static unsigned long rtas_tone_volume = 0;
+
+/* ****************STRUCTS******************************************* */
+struct individual_sensor {
+ unsigned int token;
+ unsigned int quant;
+};
+
+struct rtas_sensors {
+ struct individual_sensor sensor[MAX_SENSORS];
+ unsigned int quant;
+};
+
+/* ****************************************************************** */
+/* Declarations */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data);
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+
+struct file_operations ppc_rtas_poweron_operations = {
+ .read = ppc_rtas_poweron_read,
+ .write = ppc_rtas_poweron_write
+};
+struct file_operations ppc_rtas_progress_operations = {
+ .read = ppc_rtas_progress_read,
+ .write = ppc_rtas_progress_write
+};
+
+struct file_operations ppc_rtas_clock_operations = {
+ .read = ppc_rtas_clock_read,
+ .write = ppc_rtas_clock_write
+};
+
+struct file_operations ppc_rtas_tone_freq_operations = {
+ .read = ppc_rtas_tone_freq_read,
+ .write = ppc_rtas_tone_freq_write
+};
+struct file_operations ppc_rtas_tone_volume_operations = {
+ .read = ppc_rtas_tone_volume_read,
+ .write = ppc_rtas_tone_volume_write
+};
+
+int ppc_rtas_find_all_sensors (void);
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf);
+char * ppc_rtas_process_error(int error);
+int get_location_code(struct individual_sensor s, char * buf);
+int check_location_string (char *c, char * buf);
+int check_location (char *c, int idx, char * buf);
+
+/* ****************************************************************** */
+/* MAIN */
+/* ****************************************************************** */
+static int __init proc_rtas_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ rtas = find_devices("rtas");
+ if ((rtas == 0) || (_machine != _MACH_chrp)) {
+ return 1;
+ }
+
+ proc_rtas = proc_mkdir("rtas", 0);
+ if (proc_rtas == 0)
+ return 1;
+
+ /* /proc/rtas entries */
+
+ entry = create_proc_entry("progress", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_progress_operations;
+
+ entry = create_proc_entry("clock", S_IRUGO|S_IWUSR, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_clock_operations;
+
+ entry = create_proc_entry("poweron", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_poweron_operations;
+
+ create_proc_read_entry("sensors", S_IRUGO, proc_rtas,
+ ppc_rtas_sensor_read, NULL);
+
+ entry = create_proc_entry("frequency", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_freq_operations;
+
+ entry = create_proc_entry("volume", S_IWUSR|S_IRUGO, proc_rtas);
+ if (entry) entry->proc_fops = &ppc_rtas_tone_volume_operations;
+
+ return 0;
+}
+__initcall(proc_rtas_init);
+
+/* ****************************************************************** */
+/* POWER-ON-TIME */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_poweron_write: Invalid time\n");
+ return count;
+ }
+ power_on_time = nowtime; /* save the time */
+
+ to_tm(nowtime, &tm);
+
+ error = call_rtas("set-time-for-power-on", 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting poweron time returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ if (power_on_time == 0)
+ n = sprintf(buf, "Power on time not set\n");
+ else
+ n = sprintf(buf, "%lu\n", power_on_time);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* PROGRESS */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long hex;
+
+ strcpy(progress_led, buf); /* save the string */
+ /* Lets see if the user passed hexdigits */
+ hex = simple_strtoul(buf, NULL, 10);
+
+ ppc_md.progress ((char *)buf, hex);
+ return count;
+
+ /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n = 0;
+ if (progress_led != NULL)
+ n = sprintf (buf, "%s\n", progress_led);
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* CLOCK */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtc_time tm;
+ unsigned long nowtime;
+ char *dest;
+ int error;
+
+ nowtime = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_clock_write: Invalid time\n");
+ return count;
+ }
+
+ to_tm(nowtime, &tm);
+ error = call_rtas("set-time-of-day", 7, 1, NULL,
+ tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long *ret = kmalloc(4*8, GFP_KERNEL);
+ int n, error;
+
+ error = call_rtas("get-time-of-day", 0, 8, ret);
+
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+
+ if (error != 0){
+ printk(KERN_WARNING "error: reading the clock returned: %s\n",
+ ppc_rtas_process_error(error));
+ n = sprintf (buf, "0");
+ } else {
+ n = sprintf (buf, "%lu\n", mktime(year, mon, day, hour, min, sec));
+ }
+ kfree(ret);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+
+/* ****************************************************************** */
+/* SENSOR STUFF */
+/* ****************************************************************** */
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i,j,n;
+ unsigned long ret;
+ int state, error;
+ char buffer[MAX_LINELENGTH*MAX_SENSORS]; /* May not be enough */
+
+ if (count < 0)
+ return -EINVAL;
+
+ n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ n += sprintf ( buffer+n, "********************************************************\n");
+
+ if (ppc_rtas_find_all_sensors() != 0) {
+ n += sprintf ( buffer+n, "\nNo sensors are available\n");
+ goto return_string;
+ }
+
+ for (i=0; i<sensors.quant; i++) {
+ j = sensors.sensor[i].quant;
+ /* A sensor may have multiple instances */
+ while (j >= 0) {
+ error = call_rtas("get-sensor-state", 2, 2, &ret,
+ sensors.sensor[i].token, sensors.sensor[i].quant-j);
+ state = (int) ret;
+ n += ppc_rtas_process_sensor(sensors.sensor[i], state, error, buffer+n );
+ n += sprintf (buffer+n, "\n");
+ j--;
+ } /* while */
+ } /* for */
+
+return_string:
+ if (off >= strlen(buffer)) {
+ *eof = 1;
+ return 0;
+ }
+ if (n > strlen(buffer) - off)
+ n = strlen(buffer) - off;
+ if (n > count)
+ n = count;
+ else
+ *eof = 1;
+ memcpy(buf, buffer + off, n);
+ *start = buf;
+ return n;
+}
+
+/* ****************************************************************** */
+
+int ppc_rtas_find_all_sensors (void)
+{
+ unsigned long *utmp;
+ int len, i, j;
+
+ utmp = (unsigned long *) get_property(rtas, "rtas-sensors", &len);
+ if (utmp == NULL) {
+ printk (KERN_ERR "error: could not get rtas-sensors\n");
+ return 1;
+ }
+
+ sensors.quant = len / 8; /* int + int */
+
+ for (i=0, j=0; j<sensors.quant; i+=2, j++) {
+ sensors.sensor[j].token = utmp[i];
+ sensors.sensor[j].quant = utmp[i+1];
+ }
+ return 0;
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string of what rtas returned
+ */
+char * ppc_rtas_process_error(int error)
+{
+ switch (error) {
+ case SENSOR_CRITICAL_HIGH:
+ return "(critical high)";
+ case SENSOR_WARNING_HIGH:
+ return "(warning high)";
+ case SENSOR_NORMAL:
+ return "(normal)";
+ case SENSOR_WARNING_LOW:
+ return "(warning low)";
+ case SENSOR_CRITICAL_LOW:
+ return "(critical low)";
+ case SENSOR_SUCCESS:
+ return "(read ok)";
+ case SENSOR_HW_ERROR:
+ return "(hardware error)";
+ case SENSOR_BUSY:
+ return "(busy)";
+ case SENSOR_NOT_EXIST:
+ return "(non existant)";
+ case SENSOR_DR_ENTITY:
+ return "(dr entity removed)";
+ default:
+ return "(UNKNOWN)";
+ }
+}
+
+/* ****************************************************************** */
+/*
+ * Builds a string out of what the sensor said
+ */
+
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf)
+{
+ /* Defined return vales */
+ const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t", "Mainenance" };
+ const char * enclosure_switch[] = { "Closed", "Open" };
+ const char * lid_status[] = { " ", "Open", "Closed" };
+ const char * power_source[] = { "AC\t", "Battery", "AC & Battery" };
+ const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
+ const char * epow_sensor[] = {
+ "EPOW Reset", "Cooling warning", "Power warning",
+ "System shutdown", "System halt", "EPOW main enclosure",
+ "EPOW power off" };
+ const char * battery_cyclestate[] = { "None", "In progress", "Requested" };
+ const char * battery_charging[] = { "Charging", "Discharching", "No current flow" };
+ const char * ibm_drconnector[] = { "Empty", "Present" };
+ const char * ibm_intqueue[] = { "Disabled", "Enabled" };
+
+ int have_strings = 0;
+ int temperature = 0;
+ int unknown = 0;
+ int n = 0;
+
+ /* What kind of sensor do we have here? */
+ switch (s.token) {
+ case KEY_SWITCH:
+ n += sprintf(buf+n, "Key switch:\t");
+ n += sprintf(buf+n, "%s\t", key_switch[state]);
+ have_strings = 1;
+ break;
+ case ENCLOSURE_SWITCH:
+ n += sprintf(buf+n, "Enclosure switch:\t");
+ n += sprintf(buf+n, "%s\t", enclosure_switch[state]);
+ have_strings = 1;
+ break;
+ case THERMAL_SENSOR:
+ n += sprintf(buf+n, "Temp. (°C/°F):\t");
+ temperature = 1;
+ break;
+ case LID_STATUS:
+ n += sprintf(buf+n, "Lid status:\t");
+ n += sprintf(buf+n, "%s\t", lid_status[state]);
+ have_strings = 1;
+ break;
+ case POWER_SOURCE:
+ n += sprintf(buf+n, "Power source:\t");
+ n += sprintf(buf+n, "%s\t", power_source[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_VOLTAGE:
+ n += sprintf(buf+n, "Battery voltage:\t");
+ break;
+ case BATTERY_REMAINING:
+ n += sprintf(buf+n, "Battery remaining:\t");
+ n += sprintf(buf+n, "%s\t", battery_remaining[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_PERCENTAGE:
+ n += sprintf(buf+n, "Battery percentage:\t");
+ break;
+ case EPOW_SENSOR:
+ n += sprintf(buf+n, "EPOW Sensor:\t");
+ n += sprintf(buf+n, "%s\t", epow_sensor[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CYCLESTATE:
+ n += sprintf(buf+n, "Battery cyclestate:\t");
+ n += sprintf(buf+n, "%s\t", battery_cyclestate[state]);
+ have_strings = 1;
+ break;
+ case BATTERY_CHARGING:
+ n += sprintf(buf+n, "Battery Charging:\t");
+ n += sprintf(buf+n, "%s\t", battery_charging[state]);
+ have_strings = 1;
+ break;
+ case IBM_SURVEILLANCE:
+ n += sprintf(buf+n, "Surveillance:\t");
+ break;
+ case IBM_FANRPM:
+ n += sprintf(buf+n, "Fan (rpm):\t");
+ break;
+ case IBM_VOLTAGE:
+ n += sprintf(buf+n, "Voltage (mv):\t");
+ break;
+ case IBM_DRCONNECTOR:
+ n += sprintf(buf+n, "DR connector:\t");
+ n += sprintf(buf+n, "%s\t", ibm_drconnector[state]);
+ have_strings = 1;
+ break;
+ case IBM_POWERSUPPLY:
+ n += sprintf(buf+n, "Powersupply:\t");
+ break;
+ case IBM_INTQUEUE:
+ n += sprintf(buf+n, "Interrupt queue:\t");
+ n += sprintf(buf+n, "%s\t", ibm_intqueue[state]);
+ have_strings = 1;
+ break;
+ default:
+ n += sprintf(buf+n, "Unkown sensor (type %d), ignoring it\n",
+ s.token);
+ unknown = 1;
+ have_strings = 1;
+ break;
+ }
+ if (have_strings == 0) {
+ if (temperature) {
+ n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
+ } else
+ n += sprintf(buf+n, "%10d\t", state);
+ }
+ if (unknown == 0) {
+ n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
+ n += get_location_code(s, buf+n);
+ }
+ return n;
+}
+
+/* ****************************************************************** */
+
+int check_location (char *c, int idx, char * buf)
+{
+ int n = 0;
+
+ switch (*(c+idx)) {
+ case LOC_PLANAR:
+ n += sprintf ( buf, "Planar #%c", *(c+idx+1));
+ break;
+ case LOC_CPU:
+ n += sprintf ( buf, "CPU #%c", *(c+idx+1));
+ break;
+ case LOC_FAN:
+ n += sprintf ( buf, "Fan #%c", *(c+idx+1));
+ break;
+ case LOC_RACKMOUNTED:
+ n += sprintf ( buf, "Rack #%c", *(c+idx+1));
+ break;
+ case LOC_VOLTAGE:
+ n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
+ break;
+ case LOC_LCD:
+ n += sprintf ( buf, "LCD #%c", *(c+idx+1));
+ break;
+ case '.':
+ n += sprintf ( buf, "- %c", *(c+idx+1));
+ default:
+ n += sprintf ( buf, "Unknown location");
+ break;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+/*
+ * Format:
+ * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
+ * the '.' may be an abbrevation
+ */
+int check_location_string (char *c, char *buf)
+{
+ int n=0,i=0;
+
+ while (c[i]) {
+ if (isalpha(c[i]) || c[i] == '.') {
+ n += check_location(c, i, buf+n);
+ }
+ else if (c[i] == '/' || c[i] == '-')
+ n += sprintf(buf+n, " at ");
+ i++;
+ }
+ return n;
+}
+
+
+/* ****************************************************************** */
+
+int get_location_code(struct individual_sensor s, char * buffer)
+{
+ char rstr[512], tmp[10], tmp2[10];
+ int n=0, i=0, llen, len;
+ /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
+ char *ret;
+
+ static int pos = 0; /* remember position where buffer was */
+
+ /* construct the sensor number like 0003 */
+ /* fill with zeros */
+ n = sprintf(tmp, "%d", s.token);
+ len = strlen(tmp);
+ while (strlen(tmp) < 4)
+ n += sprintf (tmp+n, "0");
+
+ /* invert the string */
+ while (tmp[i]) {
+ if (i<len)
+ tmp2[4-len+i] = tmp[i];
+ else
+ tmp2[3-i] = tmp[i];
+ i++;
+ }
+ tmp2[4] = '\0';
+
+ sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
+
+ ret = (char *) get_property(rtas, rstr, &llen);
+
+ n=0;
+ if (ret[0] == '\0')
+ n += sprintf ( buffer+n, "--- ");/* does not have a location */
+ else {
+ char t[50];
+ ret += pos;
+
+ n += check_location_string(ret, buffer + n);
+ n += sprintf ( buffer+n, " ");
+ /* see how many characters we have printed */
+ sprintf ( t, "%s ", ret);
+
+ pos += strlen(t);
+ if (pos >= llen) pos=0;
+ }
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Frequency */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long freq;
+ char *dest;
+ int error;
+ freq = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
+ return count;
+ }
+ if (freq < 0) freq = 0;
+ rtas_tone_frequency = freq; /* save it for later */
+ error = call_rtas("set-indicator", 3, 1, NULL,
+ TONE_FREQUENCY, 0, freq);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_frequency);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
+/* ****************************************************************** */
+/* INDICATORS - Tone Volume */
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long volume;
+ char *dest;
+ int error;
+ volume = simple_strtoul(buf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
+ return count;
+ }
+ if (volume < 0) volume = 0;
+ if (volume > 100) volume = 100;
+
+ rtas_tone_volume = volume; /* save it for later */
+ error = call_rtas("set-indicator", 3, 1, NULL,
+ TONE_VOLUME, 0, volume);
+ if (error != 0)
+ printk(KERN_WARNING "error: setting tone volume returned: %s\n",
+ ppc_rtas_process_error(error));
+ return count;
+}
+/* ****************************************************************** */
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ int n;
+ n = sprintf(buf, "%lu\n", rtas_tone_volume);
+
+ if (*ppos >= strlen(buf))
+ return 0;
+ if (n > strlen(buf) - *ppos)
+ n = strlen(buf) - *ppos;
+ if (n > count)
+ n = count;
+ *ppos += n;
+ return n;
+}
!(n--) ) return res->Devices+i;
#undef Dev
}
- return NULL;
+ return 0;
}
PPC_DEVICE __init *residual_find_device_id(unsigned long BusMask,
!(n--) ) return res->Devices+i;
#undef Dev
}
- return NULL;
+ return 0;
}
PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
int n)
{
unsigned mask, masked_tag, size;
- if(!p) return NULL;
+ if(!p) return 0;
if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
masked_tag = packet_tag&mask;
for(; *p != END_TAG; p+=size) {
else
size=tag_small_count(*p)+1;
}
- return NULL; /* not found */
+ return 0; /* not found */
}
PnP_TAG_PACKET __init *PnP_find_small_vendor_packet(unsigned char *p,
return (PnP_TAG_PACKET *) p;
next = 1;
};
- return NULL; /* not found */
+ return 0; /* not found */
}
PnP_TAG_PACKET __init *PnP_find_large_vendor_packet(unsigned char *p,
return (PnP_TAG_PACKET *) p;
next = 1;
};
- return NULL; /* not found */
+ return 0; /* not found */
}
#ifdef CONFIG_PROC_PREPRESIDUAL
+++ /dev/null
-/*
- * arch/ppc/platforms/rpx8260.c
- *
- * RPC EP8260 platform support
- *
- * Author: Dan Malek <dan@embeddededge.com>
- * Derived from: pq2ads_setup.c by Kumar
- *
- * Copyright 2004 Embedded Edge, LLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/seq_file.h>
-
-#include <asm/mpc8260.h>
-#include <asm/machdep.h>
-
-static void (*callback_setup_arch)(void);
-
-extern unsigned char __res[sizeof(bd_t)];
-
-extern void m8260_init(unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, unsigned long r7);
-
-static int
-ep8260_show_cpuinfo(struct seq_file *m)
-{
- bd_t *binfo = (bd_t *)__res;
-
- seq_printf(m, "vendor\t\t: RPC\n"
- "machine\t\t: EP8260 PPC\n"
- "\n"
- "mem size\t\t: 0x%08x\n"
- "console baud\t\t: %d\n"
- "\n",
- binfo->bi_memsize,
- binfo->bi_baudrate);
- return 0;
-}
-
-static void __init
-ep8260_setup_arch(void)
-{
- printk("RPC EP8260 Port\n");
- callback_setup_arch();
-}
-
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- /* Generic 8260 platform initialization */
- m8260_init(r3, r4, r5, r6, r7);
-
- /* Anything special for this platform */
- ppc_md.show_cpuinfo = ep8260_show_cpuinfo;
-
- callback_setup_arch = ppc_md.setup_arch;
- ppc_md.setup_arch = ep8260_setup_arch;
-}
* Copyright (c) 2001 Dan Malek <dan@embeddededge.com>
*/
#ifdef __KERNEL__
-#ifndef __ASM_PLATFORMS_RPX8260_H__
-#define __ASM_PLATFORMS_RPX8260_H__
+#ifndef __ASM_PLATFORMS_RPXSUPER_H__
+#define __ASM_PLATFORMS_RPXSUPER_H__
/* A Board Information structure that is given to a program when
* prom starts it up.
#define BCSR4_EN_MII ((u_char)0x40) /* Enable PHY */
#define BCSR4_MII_READ ((u_char)0x04)
#define BCSR4_MII_MDC ((u_char)0x02)
-#define BCSR4_MII_MDIO ((u_char)0x01)
+#define BCSR4_MII_MDIO ((u_char)0x02)
#define BCSR13_FETH_IRQMASK ((u_char)0xf0)
#define BCSR15_FETH_IRQ ((u_char)0x20)
-#define PHY_INTERRUPT SIU_INT_IRQ7
-
-#endif /* __ASM_PLATFORMS_RPX8260_H__ */
+#endif /* __ASM_PLATFORMS_RPXSUPER_H__ */
#endif /* __KERNEL__ */
# Makefile for the linux kernel.
#
+ifdef CONFIG_PPC64BRIDGE
+EXTRA_AFLAGS := -Wa,-mppc64bridge
+endif
+ifdef CONFIG_4xx
+EXTRA_AFLAGS := -Wa,-m405
+endif
+ifdef CONFIG_E500
+EXTRA_AFLAGS := -Wa,-me500
+endif
+
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
obj-$(CONFIG_4xx) += ppc4xx_pic.o
obj-$(CONFIG_40x) += ppc4xx_setup.o
obj-$(CONFIG_GEN_RTC) += todc_time.o
-obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
-obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o
+obj-$(CONFIG_KGDB) += ppc4xx_kgdb.o
ifeq ($(CONFIG_40x),y)
obj-$(CONFIG_KGDB) += ppc4xx_kgdb.o
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o ppc405_pci.o
obj-$(CONFIG_ADIR) += i8259.o indirect_pci.o pci_auto.o \
todc_time.o
obj-$(CONFIG_EBONY) += indirect_pci.o pci_auto.o todc_time.o
-obj-$(CONFIG_EV64260) += gt64260_common.o gt64260_pic.o \
- indirect_pci.o todc_time.o pci_auto.o
+obj-$(CONFIG_EV64260) += indirect_pci.o todc_time.o pci_auto.o
+obj-$(CONFIG_DMV182) += indirect_pci.o todc_time.o pci_auto.o
obj-$(CONFIG_GEMINI) += open_pic.o indirect_pci.o
+obj-$(CONFIG_GT64260) += gt64260_pic.o
obj-$(CONFIG_K2) += i8259.o indirect_pci.o todc_time.o \
pci_auto.o
obj-$(CONFIG_LOPEC) += i8259.o pci_auto.o todc_time.o
open_pic.o i8259.o hawk_common.o
obj-$(CONFIG_MENF1) += todc_time.o i8259.o mpc10x_common.o \
pci_auto.o indirect_pci.o
+obj-$(CONFIG_MV64360) += mv64360_pic.o
+obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_ocp.o
obj-$(CONFIG_MVME5100) += open_pic.o todc_time.o indirect_pci.o \
i8259.o pci_auto.o hawk_common.o
obj-$(CONFIG_OCOTEA) += indirect_pci.o pci_auto.o todc_time.o
obj-$(CONFIG_SBC82xx) += todc_time.o
obj-$(CONFIG_SPRUCE) += cpc700_pic.o indirect_pci.o pci_auto.o \
todc_time.o
-obj-$(CONFIG_8260) += m8260_setup.o
+obj-$(CONFIG_8260) += m8260_setup.o cpm2_pic.o
obj-$(CONFIG_PCI_8260) += m8260_pci.o indirect_pci.o
obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o
-obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o
+obj-$(CONFIG_CPM2) += cpm2_common.o
ifeq ($(CONFIG_PPC_GEN550),y)
obj-$(CONFIG_KGDB) += gen550_kgdb.o gen550_dbg.o
obj-$(CONFIG_SERIAL_TEXT_DEBUG) += gen550_dbg.o
ifeq ($(CONFIG_85xx),y)
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o
endif
-obj-$(CONFIG_PPC_MPC52xx) += mpc52xx_setup.o mpc52xx_pic.o
*/
cpm2_map_t *cpm2_immr;
-#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount
- of space for CPM as it is larger
- than on PQ2 */
-
void
cpm2_reset(void)
{
- cpm2_immr = (cpm2_map_t *)ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
+ cpm2_immr = (cpm2_map_t *)CPM_MAP_ADDR;
/* Reclaim the DP memory for our use.
*/
* oversampled clock.
*/
void
-cpm_setbrg(uint brg, uint rate)
+cpm2_setbrg(uint brg, uint rate)
{
volatile uint *bp;
static void cpm2_dpinit(void)
{
+ void *dprambase = &((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase;
+
spin_lock_init(&cpm_dpmem_lock);
/* initialize the info header */
* varies with the processor and the microcode patches activated.
* But the following should be at least safe.
*/
- rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE,
+ rh_attach_region(&cpm_dpmem_info, dprambase + CPM_DATAONLY_BASE,
CPM_DATAONLY_SIZE);
}
-/* This function returns an index into the DPRAM area.
+/* This function used to return an index into the DPRAM area.
+ * Now it returns the actuall physical address of that area.
+ * use cpm2_dpram_offset() to get the index
*/
-uint cpm_dpalloc(uint size, uint align)
+void *cpm2_dpalloc(uint size, uint align)
{
void *start;
unsigned long flags;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc);
+EXPORT_SYMBOL(cpm2_dpalloc);
-int cpm_dpfree(uint offset)
+int cpm2_dpfree(void *addr)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- ret = rh_free(&cpm_dpmem_info, (void *)offset);
+ ret = rh_free(&cpm_dpmem_info, addr);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
-EXPORT_SYMBOL(cpm_dpfree);
+EXPORT_SYMBOL(cpm2_dpfree);
/* not sure if this is ever needed */
-uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
+void *cpm2_dpalloc_fixed(void *addr, uint size, uint align)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
cpm_dpmem_info.alignment = align;
- start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
+ start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return (uint)start;
+ return start;
}
-EXPORT_SYMBOL(cpm_dpalloc_fixed);
+EXPORT_SYMBOL(cpm2_dpalloc_fixed);
-void cpm_dpdump(void)
+void cpm2_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
-EXPORT_SYMBOL(cpm_dpdump);
+EXPORT_SYMBOL(cpm2_dpdump);
+
+uint cpm2_dpram_offset(void *addr)
+{
+ return (uint)((u_char *)addr -
+ ((uint)((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase));
+}
+EXPORT_SYMBOL(cpm2_dpram_offset);
-void *cpm_dpram_addr(uint offset)
+void *cpm2_dpram_addr(int offset)
{
- return (void *)&cpm2_immr->im_dprambase[offset];
+ return (void *)&((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase[offset];
}
-EXPORT_SYMBOL(cpm_dpram_addr);
+EXPORT_SYMBOL(cpm2_dpram_addr);
*
* Interrupt controller support for Galileo's GT64260.
*
- * Author: Chris Zankel <chris@mvista.com>
+ * Author: Chris Zankel <source@mvista.com>
* Modified by: Mark A. Greer <mgreer@mvista.com>
*
* Based on sources from Rabeeh Khoury / Galileo Technology
#include <asm/io.h>
#include <asm/system.h>
#include <asm/irq.h>
-#include <asm/gt64260.h>
+#include <asm/ocp.h>
+#include <asm/mv64x60.h>
/* ========================== forward declaration ========================== */
u32 gt64260_irq_base = 0; /* GT64260 handles the next 96 IRQs from here */
+static mv64x60_handle_t base_bh;
+static mv64x60_handle_t ic_bh;
+
/* gt64260_init_irq()
*
* This function initializes the interrupt controller. It assigns
__init void
gt64260_init_irq(void)
{
+ struct ocp_def *def;
int i;
+/* XXXX extract reg base, irq base from ocp */
+/* XXXX rewrite read/write macros to not use 'bh'?? */
+/* XXXX Have to use ocp b/c can pass arg to this routine */
+
if ( ppc_md.progress ) ppc_md.progress("gt64260_init_irq: enter", 0x0);
+ if ((def = ocp_get_one_device(OCP_VENDOR_MARVELL, OCP_FUNC_HB,
+ OCP_ANY_INDEX)) == NULL) {
+ /* XXXX SCREAM */
+ return;
+ }
+ base_bh.v_base = (u32)ioremap(def->paddr, 0x10000); /* XXXX */
+
+ if ((def = ocp_get_one_device(OCP_VENDOR_MARVELL, OCP_FUNC_PIC,
+ OCP_ANY_INDEX)) == NULL) {
+ /* XXXX SCREAM */
+ return;
+ }
+ ic_bh.v_base = (u32)ioremap(def->paddr, 0x1000); /* XXXX */
+
ppc_cached_irq_mask[0] = 0;
ppc_cached_irq_mask[1] = 0x0f000000; /* Enable GPP intrs */
ppc_cached_irq_mask[2] = 0;
/* disable all interrupts and clear current interrupts */
- gt_write(GT64260_GPP_INTR_MASK, ppc_cached_irq_mask[2]);
- gt_write(GT64260_GPP_INTR_CAUSE,0);
- gt_write(GT64260_IC_CPU_INTR_MASK_LO, ppc_cached_irq_mask[0]);
- gt_write(GT64260_IC_CPU_INTR_MASK_HI, ppc_cached_irq_mask[1]);
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK, ppc_cached_irq_mask[2]);
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_CAUSE,0);
+ mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_LO, ppc_cached_irq_mask[0]);
+ mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_HI, ppc_cached_irq_mask[1]);
/* use the gt64260 for all (possible) interrupt sources */
for( i = gt64260_irq_base; i < (gt64260_irq_base + 96); i++ ) {
}
-/* gt64260_get_irq()
+/*
+ * gt64260_get_irq()
*
* This function returns the lowest interrupt number of all interrupts that
* are currently asserted.
int irq;
int irq_gpp;
- irq = gt_read(GT64260_IC_MAIN_CAUSE_LO);
+ irq = mv64x60_read(&ic_bh, GT64260_IC_MAIN_CAUSE_LO);
irq = __ilog2((irq & 0x3dfffffe) & ppc_cached_irq_mask[0]);
if (irq == -1) {
- irq = gt_read(GT64260_IC_MAIN_CAUSE_HI);
+ irq = mv64x60_read(&ic_bh, GT64260_IC_MAIN_CAUSE_HI);
irq = __ilog2((irq & 0x0f000db7) & ppc_cached_irq_mask[1]);
if (irq == -1) {
irq = -2; /* bogus interrupt, should never happen */
} else {
if (irq >= 24) {
- irq_gpp = gt_read(GT64260_GPP_INTR_CAUSE);
+ irq_gpp = mv64x60_read(&base_bh, MV64x60_GPP_INTR_CAUSE);
irq_gpp = __ilog2(irq_gpp &
ppc_cached_irq_mask[2]);
irq = -2;
} else {
irq = irq_gpp + 64;
- gt_write(GT64260_GPP_INTR_CAUSE, ~(1<<(irq-64)));
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_CAUSE, ~(1<<(irq-64)));
}
} else {
irq += 32;
static void
gt64260_unmask_irq(unsigned int irq)
{
+ /* XXXX
+ printk("XXXX: *** unmask irq: %d\n", irq);
+ */
irq -= gt64260_irq_base;
if (irq > 31) {
if (irq > 63) {
/* unmask GPP irq */
- gt_write(GT64260_GPP_INTR_MASK,
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
ppc_cached_irq_mask[2] |= (1<<(irq-64)));
} else {
/* mask high interrupt register */
- gt_write(GT64260_IC_CPU_INTR_MASK_HI,
+ mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_HI,
ppc_cached_irq_mask[1] |= (1<<(irq-32)));
}
} else {
/* mask low interrupt register */
- gt_write(GT64260_IC_CPU_INTR_MASK_LO,
+ mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_LO,
ppc_cached_irq_mask[0] |= (1<<irq));
}
}
static void
gt64260_mask_irq(unsigned int irq)
{
+ /* XXXX
+ printk("XXXX: *** mask irq: %d\n", irq);
+ */
irq -= gt64260_irq_base;
if (irq > 31) {
if (irq > 63) {
/* mask GPP irq */
- gt_write(GT64260_GPP_INTR_MASK,
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
ppc_cached_irq_mask[2] &= ~(1<<(irq-64)));
} else {
/* mask high interrupt register */
- gt_write(GT64260_IC_CPU_INTR_MASK_HI,
+ mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_HI,
ppc_cached_irq_mask[1] &= ~(1<<(irq-32)));
}
} else {
/* mask low interrupt register */
- gt_write(GT64260_IC_CPU_INTR_MASK_LO,
+ mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_LO,
ppc_cached_irq_mask[0] &= ~(1<<irq));
}
volatile cpm2_map_t *immap = cpm2_immr;
/* allocate IDMA dpram */
- dpram_offset = cpm_dpalloc(sizeof(idma_dpram_t), 64);
- idma_dpram = cpm_dpram_addr(dpram_offset);
+ dpram_offset = cpm2_dpalloc(sizeof(idma_dpram_t), 64);
+ idma_dpram =
+ (volatile idma_dpram_t *)&immap->im_dprambase[dpram_offset];
/* initialize the IDMA parameter RAM */
memset((void *)idma_dpram, 0, sizeof(idma_dpram_t));
+++ /dev/null
-/*
- * arch/ppc/syslib/mpc52xx_pic.c
- *
- * Programmable Interrupt Controller functions for the Freescale MPC52xx
- * embedded CPU.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Based on (well, mostly copied from) the code from the 2.4 kernel by
- * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 Montavista Software, Inc
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/stddef.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/stddef.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/irq.h>
-#include <asm/mpc52xx.h>
-
-
-static struct mpc52xx_intr *intr;
-static struct mpc52xx_sdma *sdma;
-
-static void
-mpc52xx_ic_disable(unsigned int irq)
-{
- u32 val;
-
- if (irq == MPC52xx_IRQ0) {
- val = in_be32(&intr->ctrl);
- val &= ~(1 << 11);
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_IRQ1) {
- BUG();
- }
- else if (irq <= MPC52xx_IRQ3) {
- val = in_be32(&intr->ctrl);
- val &= ~(1 << (10 - (irq - MPC52xx_IRQ1)));
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_SDMA_IRQ_BASE) {
- val = in_be32(&intr->main_mask);
- val |= 1 << (16 - (irq - MPC52xx_MAIN_IRQ_BASE));
- out_be32(&intr->main_mask, val);
- }
- else if (irq < MPC52xx_PERP_IRQ_BASE) {
- val = in_be32(&sdma->IntMask);
- val |= 1 << (irq - MPC52xx_SDMA_IRQ_BASE);
- out_be32(&sdma->IntMask, val);
- }
- else {
- val = in_be32(&intr->per_mask);
- val |= 1 << (31 - (irq - MPC52xx_PERP_IRQ_BASE));
- out_be32(&intr->per_mask, val);
- }
-}
-
-static void
-mpc52xx_ic_enable(unsigned int irq)
-{
- u32 val;
-
- if (irq == MPC52xx_IRQ0) {
- val = in_be32(&intr->ctrl);
- val |= 1 << 11;
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_IRQ1) {
- BUG();
- }
- else if (irq <= MPC52xx_IRQ3) {
- val = in_be32(&intr->ctrl);
- val |= 1 << (10 - (irq - MPC52xx_IRQ1));
- out_be32(&intr->ctrl, val);
- }
- else if (irq < MPC52xx_SDMA_IRQ_BASE) {
- val = in_be32(&intr->main_mask);
- val &= ~(1 << (16 - (irq - MPC52xx_MAIN_IRQ_BASE)));
- out_be32(&intr->main_mask, val);
- }
- else if (irq < MPC52xx_PERP_IRQ_BASE) {
- val = in_be32(&sdma->IntMask);
- val &= ~(1 << (irq - MPC52xx_SDMA_IRQ_BASE));
- out_be32(&sdma->IntMask, val);
- }
- else {
- val = in_be32(&intr->per_mask);
- val &= ~(1 << (31 - (irq - MPC52xx_PERP_IRQ_BASE)));
- out_be32(&intr->per_mask, val);
- }
-}
-
-static void
-mpc52xx_ic_ack(unsigned int irq)
-{
- u32 val;
-
- /*
- * Only some irqs are reset here, others in interrupting hardware.
- */
-
- switch (irq) {
- case MPC52xx_IRQ0:
- val = in_be32(&intr->ctrl);
- val |= 0x08000000;
- out_be32(&intr->ctrl, val);
- break;
- case MPC52xx_CCS_IRQ:
- val = in_be32(&intr->enc_status);
- val |= 0x00000400;
- out_be32(&intr->enc_status, val);
- break;
- case MPC52xx_IRQ1:
- val = in_be32(&intr->ctrl);
- val |= 0x04000000;
- out_be32(&intr->ctrl, val);
- break;
- case MPC52xx_IRQ2:
- val = in_be32(&intr->ctrl);
- val |= 0x02000000;
- out_be32(&intr->ctrl, val);
- break;
- case MPC52xx_IRQ3:
- val = in_be32(&intr->ctrl);
- val |= 0x01000000;
- out_be32(&intr->ctrl, val);
- break;
- default:
- if (irq >= MPC52xx_SDMA_IRQ_BASE
- && irq < (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)) {
- out_be32(&sdma->IntPend,
- 1 << (irq - MPC52xx_SDMA_IRQ_BASE));
- }
- break;
- }
-}
-
-static void
-mpc52xx_ic_disable_and_ack(unsigned int irq)
-{
- mpc52xx_ic_disable(irq);
- mpc52xx_ic_ack(irq);
-}
-
-static void
-mpc52xx_ic_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- mpc52xx_ic_enable(irq);
-}
-
-static struct hw_interrupt_type mpc52xx_ic = {
- "MPC52xx",
- NULL, /* startup(irq) */
- NULL, /* shutdown(irq) */
- mpc52xx_ic_enable, /* enable(irq) */
- mpc52xx_ic_disable, /* disable(irq) */
- mpc52xx_ic_disable_and_ack, /* disable_and_ack(irq) */
- mpc52xx_ic_end, /* end(irq) */
- 0 /* set_affinity(irq, cpumask) SMP. */
-};
-
-void __init
-mpc52xx_init_irq(void)
-{
- int i;
-
- /* Remap the necessary zones */
- intr = (struct mpc52xx_intr *)
- ioremap(MPC52xx_INTR, sizeof(struct mpc52xx_intr));
- sdma = (struct mpc52xx_sdma *)
- ioremap(MPC52xx_SDMA, sizeof(struct mpc52xx_sdma));
-
- if ((intr==NULL) || (sdma==NULL))
- panic("Can't ioremap PIC/SDMA register for init_irq !");
-
- /* Disable all interrupt sources. */
- out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */
- out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */
- out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */
- out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
- out_be32(&intr->ctrl,
- 0x0f000000 | /* clear IRQ 0-3 */
- 0x00c00000 | /* IRQ0: level-sensitive, active low */
- 0x00001000 | /* MEE master external enable */
- 0x00000000 | /* 0 means disable IRQ 0-3 */
- 0x00000001); /* CEb route critical normally */
-
- /* Zero a bunch of the priority settings. */
- out_be32(&intr->per_pri1, 0);
- out_be32(&intr->per_pri2, 0);
- out_be32(&intr->per_pri3, 0);
- out_be32(&intr->main_pri1, 0);
- out_be32(&intr->main_pri2, 0);
-
- /* Initialize irq_desc[i].handler's with mpc52xx_ic. */
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].handler = &mpc52xx_ic;
- irq_desc[i].status = IRQ_LEVEL;
- }
-}
-
-int
-mpc52xx_get_irq(struct pt_regs *regs)
-{
- u32 status;
- int irq = -1;
-
- status = in_be32(&intr->enc_status);
-
- if (status & 0x00000400) { /* critical */
- irq = (status >> 8) & 0x3;
- if (irq == 2) /* high priority peripheral */
- goto peripheral;
- irq += MPC52xx_CRIT_IRQ_BASE;
- }
- else if (status & 0x00200000) { /* main */
- irq = (status >> 16) & 0x1f;
- if (irq == 4) /* low priority peripheral */
- goto peripheral;
- irq += MPC52xx_MAIN_IRQ_BASE;
- }
- else if (status & 0x20000000) { /* peripheral */
-peripheral:
- irq = (status >> 24) & 0x1f;
- if (irq == 0) { /* bestcomm */
- status = in_be32(&sdma->IntPend);
- irq = ffs(status) + MPC52xx_SDMA_IRQ_BASE-1;
- }
- else
- irq += MPC52xx_PERP_IRQ_BASE;
- }
-
- return irq;
-}
-
+++ /dev/null
-/*
- * arch/ppc/syslib/mpc52xx_common.c
- *
- * Common code for the boards based on Freescale MPC52xx embedded CPU.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Support for other bootloaders than UBoot by Dale Farnsworth
- * <dfarnsworth@mvista.com>
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 Montavista Software, Inc
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/config.h>
-
-#include <asm/time.h>
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-#include <asm/ocp.h>
-#include <asm/ppcboot.h>
-
-extern bd_t __res;
-
-static int core_mult[] = { /* CPU Frequency multiplier, taken */
- 0, 0, 0, 10, 20, 20, 25, 45, /* from the datasheet used to compute */
- 30, 55, 40, 50, 0, 60, 35, 0, /* CPU frequency from XLB freq and */
- 30, 25, 65, 10, 70, 20, 75, 45, /* external jumper config */
- 0, 55, 40, 50, 80, 60, 35, 0
-};
-
-void
-mpc52xx_restart(char *cmd)
-{
- struct mpc52xx_gpt* gpt0 = (struct mpc52xx_gpt*) MPC52xx_GPTx(0);
-
- local_irq_disable();
-
- /* Turn on the watchdog and wait for it to expire. It effectively
- does a reset */
- if (gpt0 != NULL) {
- out_be32(&gpt0->count, 0x000000ff);
- out_be32(&gpt0->mode, 0x00009004);
- } else
- printk(KERN_ERR "mpc52xx_restart: Unable to ioremap GPT0 registers, -> looping ...");
-
- while (1);
-}
-
-void
-mpc52xx_halt(void)
-{
- local_irq_disable();
-
- while (1);
-}
-
-void
-mpc52xx_power_off(void)
-{
- /* By default we don't have any way of shut down.
- If a specific board wants to, it can set the power down
- code to any hardware implementation dependent code */
- mpc52xx_halt();
-}
-
-
-void __init
-mpc52xx_set_bat(void)
-{
- /* Set BAT 2 to map the 0xf0000000 area */
- /* This mapping is used during mpc52xx_progress,
- * mpc52xx_find_end_of_memory, and UARTs/GPIO access for debug
- */
- mb();
- mtspr(DBAT2U, 0xf0001ffe);
- mtspr(DBAT2L, 0xf000002a);
- mb();
-}
-
-void __init
-mpc52xx_map_io(void)
-{
- /* Here we only map the MBAR */
- io_block_mapping(
- MPC52xx_MBAR_VIRT, MPC52xx_MBAR, MPC52xx_MBAR_SIZE, _PAGE_IO);
-}
-
-
-#ifdef CONFIG_SERIAL_TEXT_DEBUG
-#ifdef MPC52xx_PF_CONSOLE_PORT
-#define MPC52xx_CONSOLE MPC52xx_PSCx(MPC52xx_PF_CONSOLE_PORT)
-#else
-#error "mpc52xx PSC for console not selected"
-#endif
-
-void
-mpc52xx_progress(char *s, unsigned short hex)
-{
- struct mpc52xx_psc *psc = (struct mpc52xx_psc *)MPC52xx_CONSOLE;
- char c;
-
- /* Don't we need to disable serial interrupts ? */
-
- while ((c = *s++) != 0) {
- if (c == '\n') {
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXRDY)) ;
- out_8(&psc->mpc52xx_psc_buffer_8, '\r');
- }
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXRDY)) ;
- out_8(&psc->mpc52xx_psc_buffer_8, c);
- }
-}
-
-#endif /* CONFIG_SERIAL_TEXT_DEBUG */
-
-
-unsigned long __init
-mpc52xx_find_end_of_memory(void)
-{
- u32 ramsize = __res.bi_memsize;
-
- /*
- * if bootloader passed a memsize, just use it
- * else get size from sdram config registers
- */
- if (ramsize == 0) {
- struct mpc52xx_mmap_ctl *mmap_ctl;
- u32 sdram_config_0, sdram_config_1;
-
- /* Temp BAT2 mapping active when this is called ! */
- mmap_ctl = (struct mpc52xx_mmap_ctl*) MPC52xx_MMAP_CTL;
-
- sdram_config_0 = in_be32(&mmap_ctl->sdram0);
- sdram_config_1 = in_be32(&mmap_ctl->sdram1);
-
- if ((sdram_config_0 & 0x1f) >= 0x13)
- ramsize = 1 << ((sdram_config_0 & 0xf) + 17);
-
- if (((sdram_config_1 & 0x1f) >= 0x13) &&
- ((sdram_config_1 & 0xfff00000) == ramsize))
- ramsize += 1 << ((sdram_config_1 & 0xf) + 17);
-
- iounmap(mmap_ctl);
- }
-
- return ramsize;
-}
-
-void __init
-mpc52xx_calibrate_decr(void)
-{
- int current_time, previous_time;
- int tbl_start, tbl_end;
- unsigned int xlbfreq, cpufreq, ipbfreq, pcifreq, divisor;
-
- xlbfreq = __res.bi_busfreq;
- /* if bootloader didn't pass bus frequencies, calculate them */
- if (xlbfreq == 0) {
- /* Get RTC & Clock manager modules */
- struct mpc52xx_rtc *rtc;
- struct mpc52xx_cdm *cdm;
-
- rtc = (struct mpc52xx_rtc*)
- ioremap(MPC52xx_RTC, sizeof(struct mpc52xx_rtc));
- cdm = (struct mpc52xx_cdm*)
- ioremap(MPC52xx_CDM, sizeof(struct mpc52xx_cdm));
-
- if ((rtc==NULL) || (cdm==NULL))
- panic("Can't ioremap RTC/CDM while computing bus freq");
-
- /* Count bus clock during 1/64 sec */
- out_be32(&rtc->dividers, 0x8f1f0000); /* Set RTC 64x faster */
- previous_time = in_be32(&rtc->time);
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_start = get_tbl();
- previous_time = current_time;
- while ((current_time = in_be32(&rtc->time)) == previous_time) ;
- tbl_end = get_tbl();
- out_be32(&rtc->dividers, 0xffff0000); /* Restore RTC */
-
- /* Compute all frequency from that & CDM settings */
- xlbfreq = (tbl_end - tbl_start) << 8;
- cpufreq = (xlbfreq * core_mult[in_be32(&cdm->rstcfg)&0x1f])/10;
- ipbfreq = (in_8(&cdm->ipb_clk_sel) & 1) ?
- xlbfreq / 2 : xlbfreq;
- switch (in_8(&cdm->pci_clk_sel) & 3) {
- case 0:
- pcifreq = ipbfreq;
- break;
- case 1:
- pcifreq = ipbfreq / 2;
- break;
- default:
- pcifreq = xlbfreq / 4;
- break;
- }
- __res.bi_busfreq = xlbfreq;
- __res.bi_intfreq = cpufreq;
- __res.bi_ipbfreq = ipbfreq;
- __res.bi_pcifreq = pcifreq;
-
- /* Release mapping */
- iounmap((void*)rtc);
- iounmap((void*)cdm);
- }
-
- divisor = 4;
-
- tb_ticks_per_jiffy = xlbfreq / HZ / divisor;
- tb_to_us = mulhwu_scale_factor(xlbfreq / divisor, 1000000);
-}
-
-
-void __init
-mpc52xx_add_board_devices(struct ocp_def board_ocp[]) {
- while (board_ocp->vendor != OCP_VENDOR_INVALID)
- if(ocp_add_one_device(board_ocp++))
- printk("mpc5200-ocp: Failed to add board device !\n");
-}
-
--- /dev/null
+/*
+ * arch/ppc/kernel/mv64360_pic.c
+ *
+ * Interrupt controller support for Marvell's MV64360.
+ *
+ * Author: Rabeeh Khoury <rabeeh@galileo.co.il>
+ * Based on MV64360 PIC written by
+ * Chris Zankel <chris@mvista.com>
+ * Mark A. Greer <mgreer@mvista.com>
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * This file contains the specific functions to support the MV64360
+ * interrupt controller.
+ *
+ * The MV64360 has two main interrupt registers (high and low) that
+ * summarizes the interrupts generated by the units of the MV64360.
+ * Each bit is assigned to an interrupt number, where the low register
+ * are assigned from IRQ0 to IRQ31 and the high cause register
+ * from IRQ32 to IRQ63
+ * The GPP (General Purpose Pins) interrupts are assigned from IRQ64 (GPP0)
+ * to IRQ95 (GPP31).
+ * get_irq() returns the lowest interrupt number that is currently asserted.
+ *
+ * Note:
+ * - This driver does not initialize the GPP when used as an interrupt
+ * input.
+ */
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/ocp.h>
+#include <asm/mv64x60.h>
+
+#ifdef CONFIG_IRQ_ALL_CPUS
+#error "The mv64360 does not support yet distribution of IRQs on all CPUs"
+#endif
+/* ========================== forward declaration ========================== */
+
+static void mv64360_unmask_irq(unsigned int);
+static void mv64360_mask_irq(unsigned int);
+static irqreturn_t mv64360_cpu_error_int_handler(int, void *, struct pt_regs *);
+static irqreturn_t mv64360_sram_error_int_handler(int, void *, struct pt_regs *);
+static irqreturn_t mv64360_pci_error_int_handler(int, void *, struct pt_regs *);
+
+/* ========================== local declarations =========================== */
+
+struct hw_interrupt_type mv64360_pic = {
+ .typename = " MV64360_PIC ", /* typename */
+ .enable = mv64360_unmask_irq, /* enable */
+ .disable = mv64360_mask_irq, /* disable */
+ .ack = mv64360_mask_irq, /* ack */
+};
+
+#define CPU_INTR_STR "MV64360 CPU interface error"
+#define SRAM_INTR_STR "MV64360 internal sram error"
+#define PCI0_INTR_STR "MV64360 PCI 0 error"
+#define PCI1_INTR_STR "MV64360 PCI 1 error"
+
+static mv64x60_handle_t base_bh;
+
+u32 mv64360_irq_base = 0; /* MV64360 handles the next 96 IRQs from here */
+
+/* mv64360_init_irq()
+ *
+ * This function initializes the interrupt controller. It assigns
+ * all interrupts from IRQ0 to IRQ95 to the mv64360 interrupt controller.
+ *
+ * Input Variable(s):
+ * None.
+ *
+ * Outpu. Variable(s):
+ * None.
+ *
+ * Returns:
+ * void
+ *
+ * Note:
+ * We register all GPP inputs as interrupt source, but disable them.
+ */
+
+__init void
+mv64360_init_irq(void)
+{
+ struct ocp_def *def;
+ int i;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mv64360_init_irq: enter", 0x0);
+
+ if ( ppc_md.progress ) ppc_md.progress("mv64360_init_irq: enter", 0x0);
+
+ if ((def = ocp_get_one_device(OCP_VENDOR_MARVELL, OCP_FUNC_HB,
+ OCP_ANY_INDEX)) == NULL) {
+ /* XXXX SCREAM */
+ return;
+ }
+ base_bh.v_base = (unsigned long)ioremap(def->paddr, 0x1000);
+
+ ppc_cached_irq_mask[0] = 0;
+ ppc_cached_irq_mask[1] = 0x0f000000; /* Enable GPP intrs */
+ ppc_cached_irq_mask[2] = 0;
+
+ /* disable all interrupts and clear current interrupts */
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_CAUSE, 0);
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
+ ppc_cached_irq_mask[2]);
+ mv64x60_write(&base_bh, MV64360_IC_CPU0_INTR_MASK_LO,
+ ppc_cached_irq_mask[0]);
+ mv64x60_write(&base_bh, MV64360_IC_CPU0_INTR_MASK_HI,
+ ppc_cached_irq_mask[1]);
+
+ /* use the mv64360 for all (possible) interrupt sources */
+ for (i = mv64360_irq_base; i < (mv64360_irq_base + 96); i++) {
+ /* All interrupts are level interrupts */
+ irq_desc[i].status |= IRQ_LEVEL;
+ irq_desc[i].handler = &mv64360_pic;
+ }
+
+ /* Register CPU interface error interrupt handler */
+ request_irq(MV64x60_IRQ_CPU_ERR, mv64360_cpu_error_int_handler,
+ SA_INTERRUPT, CPU_INTR_STR, 0);
+ mv64x60_write(&base_bh, MV64x60_CPU_ERR_MASK, 0x000000ff);
+
+ /* Register internal SRAM error interrupt handler */
+ request_irq(MV64360_IRQ_SRAM_PAR_ERR, mv64360_sram_error_int_handler,
+ SA_INTERRUPT, SRAM_INTR_STR, 0);
+
+ /* Register PCI 0 error interrupt handler */
+ request_irq(MV64360_IRQ_PCI0, mv64360_pci_error_int_handler,
+ SA_INTERRUPT, PCI0_INTR_STR, (void *) 0);
+ mv64x60_write(&base_bh, MV64x60_PCI0_ERR_MASK, 0x00a50c25);
+
+ /* Register PCI 1 error interrupt handler */
+ request_irq(MV64360_IRQ_PCI1, mv64360_pci_error_int_handler,
+ SA_INTERRUPT, PCI1_INTR_STR, (void *) 1);
+ mv64x60_write(&base_bh, MV64x60_PCI1_ERR_MASK, 0x00a50c25);
+
+ if (ppc_md.progress)
+ ppc_md.progress("mv64360_init_irq: exit", 0x0);
+}
+
+
+/* mv64360_get_irq()
+ *
+ * This function returns the lowest interrupt number of all interrupts that
+ * are currently asserted.
+ *
+ * Input Variable(s):
+ * struct pt_regs* not used
+ *
+ * Output Variable(s):
+ * None.
+ *
+ * Returns:
+ * int <interrupt number> or -2 (bogus interrupt)
+ *
+ */
+int
+mv64360_get_irq(struct pt_regs *regs)
+{
+ int irq;
+ int irq_gpp;
+
+#ifdef CONFIG_SMP
+#define BIT28 (1<<28)
+ /*
+ * Second CPU gets only doorbell (message) interrupts.
+ * The doorbell interrupt is BIT28 in the main interrupt low cause reg.
+ */
+ int cpu_nr = smp_processor_id();
+ if (cpu_nr == 1) {
+ irq = mv64x60_read(&base_bh, MV64360_IC_MAIN_CAUSE_LO);
+ if (!(irq & BIT28))
+ return -1;
+ return 28;
+ }
+#endif
+
+ irq = mv64x60_read(&base_bh, MV64360_IC_MAIN_CAUSE_LO);
+ irq = __ilog2((irq & 0x3dfffffe) & ppc_cached_irq_mask[0]);
+ if (irq == -1) {
+ irq = mv64x60_read(&base_bh, MV64360_IC_MAIN_CAUSE_HI);
+ irq = __ilog2((irq & 0x1f0003f7) & ppc_cached_irq_mask[1]);
+ if (irq == -1) {
+ irq = -2; /* bogus interrupt, should never happen */
+ } else {
+ if ((irq >= 24) && (irq < 28)) {
+ irq_gpp =
+ mv64x60_read(&base_bh,
+ MV64x60_GPP_INTR_CAUSE);
+ irq_gpp =
+ __ilog2(irq_gpp &
+ ppc_cached_irq_mask[2]);
+
+ if (irq_gpp == -1) {
+ irq = -2;
+ } else {
+ irq = irq_gpp + 64;
+ mv64x60_write(&base_bh,
+ MV64x60_GPP_INTR_CAUSE,
+ ~(1 << (irq - 64)));
+ }
+ } else {
+ irq += 32;
+ }
+ }
+ }
+
+ if (irq < 0) {
+ return (irq);
+ } else {
+ return (mv64360_irq_base + irq);
+ }
+}
+
+/* mv64360_unmask_irq()
+ *
+ * This function enables an interrupt.
+ *
+ * Input Variable(s):
+ * unsigned int interrupt number (IRQ0...IRQ95).
+ *
+ * Output Variable(s):
+ * None.
+ *
+ * Returns:
+ * void
+ */
+
+static void
+mv64360_unmask_irq(unsigned int irq)
+{
+#ifdef CONFIG_SMP
+ /* second CPU gets only doorbell interrupts */
+ if ((irq - mv64360_irq_base) == 28) {
+ mv64x60_set_bits(&base_bh, MV64360_IC_CPU1_INTR_MASK_LO, BIT28);
+ return;
+ }
+#endif
+ irq -= mv64360_irq_base;
+ if (irq > 31) {
+ if (irq > 63) {
+ /* unmask GPP irq */
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
+ ppc_cached_irq_mask[2] |= (1 << (irq - 64)));
+ } else {
+ /* mask high interrupt register */
+ mv64x60_write(&base_bh, MV64360_IC_CPU0_INTR_MASK_HI,
+ ppc_cached_irq_mask[1] |= (1 << (irq - 32)));
+ }
+ } else {
+ /* mask low interrupt register */
+ mv64x60_write(&base_bh, MV64360_IC_CPU0_INTR_MASK_LO,
+ ppc_cached_irq_mask[0] |= (1 << irq));
+ }
+}
+
+
+/* mv64360_mask_irq()
+ *
+ * This function disables the requested interrupt.
+ *
+ * Input Variable(s):
+ * unsigned int interrupt number (IRQ0...IRQ95).
+ *
+ * Output Variable(s):
+ * None.
+ *
+ * Returns:
+ * void
+ */
+
+static void
+mv64360_mask_irq(unsigned int irq)
+{
+#ifdef CONFIG_SMP
+ if ((irq - mv64360_irq_base) == 28) {
+ mv64x60_clr_bits(&base_bh, MV64360_IC_CPU1_INTR_MASK_LO, BIT28);
+ return;
+ }
+#endif
+ irq -= mv64360_irq_base;
+ if (irq > 31) {
+ if (irq > 63) {
+ /* mask GPP irq */
+ mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
+ ppc_cached_irq_mask[2] &= ~(1 << (irq - 64)));
+ } else {
+ /* mask high interrupt register */
+ mv64x60_write(&base_bh, MV64360_IC_CPU0_INTR_MASK_HI,
+ ppc_cached_irq_mask[1] &= ~(1 << (irq - 32)));
+ }
+ } else {
+ /* mask low interrupt register */
+ mv64x60_write(&base_bh, MV64360_IC_CPU0_INTR_MASK_LO,
+ ppc_cached_irq_mask[0] &= ~(1 << irq));
+ }
+
+}
+
+static irqreturn_t
+mv64360_cpu_error_int_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u32 val;
+ val = mv64x60_read(&base_bh, MV64x60_CPU_ERR_CAUSE);
+ printk(KERN_ERR
+ "mv64360_cpu_error_int_handler: Error on CPU interface - Cause regiser 0x%08x\n",
+ val);
+ printk(KERN_ERR "\tCPU error register dump:\n");
+ printk(KERN_ERR "\tAddress low 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_CPU_ERR_ADDR_LO));
+ printk(KERN_ERR "\tAddress high 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_CPU_ERR_ADDR_HI));
+ printk(KERN_ERR "\tData low 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_CPU_ERR_DATA_LO));
+ printk(KERN_ERR "\tData high 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_CPU_ERR_DATA_HI));
+ printk(KERN_ERR "\tParity 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_CPU_ERR_PARITY));
+ mv64x60_write(&base_bh, MV64x60_CPU_ERR_CAUSE, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+mv64360_sram_error_int_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ printk(KERN_ERR
+ "mv64360_sram_error_int_handler: Error in internal SRAM - Cause register 0x%08x\n",
+ mv64x60_read(&base_bh, MV64360_SRAM_ERR_CAUSE));
+ printk(KERN_ERR "\tSRAM error register dump:\n");
+ printk(KERN_ERR "\tAddress Low 0x%08x\n",
+ mv64x60_read(&base_bh, MV64360_SRAM_ERR_ADDR_LO));
+ printk(KERN_ERR "\tAddress High 0x%08x\n",
+ mv64x60_read(&base_bh, MV64360_SRAM_ERR_ADDR_HI));
+ printk(KERN_ERR "\tData Low 0x%08x\n",
+ mv64x60_read(&base_bh, MV64360_SRAM_ERR_DATA_LO));
+ printk(KERN_ERR "\tData High 0x%08x\n",
+ mv64x60_read(&base_bh, MV64360_SRAM_ERR_DATA_HI));
+ printk(KERN_ERR "\tParity 0x%08x\n",
+ mv64x60_read(&base_bh, MV64360_SRAM_ERR_PARITY));
+ mv64x60_write(&base_bh, MV64360_SRAM_ERR_CAUSE, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+mv64360_pci_error_int_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u32 val;
+ unsigned int pci_bus = (unsigned int) dev_id;
+ if (pci_bus == 0) { /* Error on PCI 0 */
+ val = mv64x60_read(&base_bh, MV64x60_PCI0_ERR_CAUSE);
+ printk(KERN_ERR
+ "mv64360_pci_error_int_handler: Error in PCI %d Interface\n",
+ pci_bus);
+ printk(KERN_ERR "\tPCI %d error register dump:\n", pci_bus);
+ printk(KERN_ERR "\tCause register 0x%08x\n", val);
+ printk(KERN_ERR "\tAddress Low 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI0_ERR_ADDR_LO));
+ printk(KERN_ERR "\tAddress High 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI0_ERR_ADDR_HI));
+ printk(KERN_ERR "\tAttribute 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI0_ERR_DATA_LO));
+ printk(KERN_ERR "\tCommand 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI0_ERR_CMD));
+ mv64x60_write(&base_bh, MV64x60_PCI0_ERR_CAUSE, ~val);
+ }
+ if (pci_bus == 1) { /* Error on PCI 1 */
+ val = mv64x60_read(&base_bh, MV64x60_PCI1_ERR_CAUSE);
+ printk(KERN_ERR
+ "mv64360_pci_error_int_handler: Error in PCI %d Interface\n",
+ pci_bus);
+ printk(KERN_ERR "\tPCI %d error register dump:\n", pci_bus);
+ printk(KERN_ERR "\tCause register 0x%08x\n", val);
+ printk(KERN_ERR "\tAddress Low 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI1_ERR_ADDR_LO));
+ printk(KERN_ERR "\tAddress High 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI1_ERR_ADDR_HI));
+ printk(KERN_ERR "\tAttribute 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI1_ERR_DATA_LO));
+ printk(KERN_ERR "\tCommand 0x%08x\n",
+ mv64x60_read(&base_bh, MV64x60_PCI1_ERR_CMD));
+ mv64x60_write(&base_bh, MV64x60_PCI1_ERR_CAUSE, ~val);
+ }
+ return IRQ_HANDLED;
+}
--- /dev/null
+/*
+ * arch/ppc/syslib/mv64x60.c
+ *
+ * Common routines for the Marvell/Galileo Discovery line of host bridges
+ * (e.g, gt64260 and mv64360).
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ * Rabeeh Khoury <rabeeh@galileo.co.il>
+ *
+ * 2001-2002 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/mv64x60.h>
+#include <asm/delay.h>
+#include <asm/ocp.h>
+
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif /* DEBUG */
+
+
+static u32 mv64x60_mask(u32 val, u32 num_bits);
+static u32 mv64x60_shift_left(u32 val, u32 num_bits);
+static u32 mv64x60_shift_right(u32 val, u32 num_bits);
+static void mv64x60_early_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si);
+static int mv64x60_get_type(mv64x60_handle_t *bh);
+static int mv64x60_setup_for_chip(mv64x60_handle_t *bh);
+static void mv64x60_get_mem_windows(mv64x60_handle_t *bh,
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
+static u32 mv64x60_calc_mem_size(mv64x60_handle_t *bh,
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
+static void mv64x60_config_cpu2mem_windows(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si, u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
+static void mv64x60_config_cpu2pci_windows(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+static void mv64x60_set_cpu2pci_window(mv64x60_handle_t *bh,
+ mv64x60_pci_info_t *pi, u32 *win_tab, u32 *remap_tab);
+static void mv64x60_config_pci2mem_windows(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si, u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
+static void mv64x60_alloc_hoses(mv64x60_handle_t *bh, mv64x60_setup_info_t *si);
+static void mv64x60_init_hoses(mv64x60_handle_t *bh, mv64x60_setup_info_t *si);
+static void mv64x60_init_resources(struct pci_controller *hose,
+ mv64x60_pci_info_t *pi, u32 io_base);
+static void mv64x60_set_pci_params(struct pci_controller *hose,
+ mv64x60_pci_info_t *pi);
+static void mv64x60_enumerate_buses(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+static int mv64x60_pci_exclude_device(u8 bus, u8 devfn);
+static void mv64x60_fixup_ocp(struct ocp_device *, void *arg);
+
+static u32 gt64260_translate_size(u32 base, u32 size, u32 num_bits);
+static u32 gt64260_untranslate_size(u32 base, u32 size, u32 num_bits);
+static void gt64260_set_pci2mem_window(struct pci_controller *hose,
+ u32 window, u32 base);
+static u32 gt64260_is_enabled_32bit(mv64x60_handle_t *bh, u32 window);
+static void gt64260_enable_window_32bit(mv64x60_handle_t *bh, u32 window);
+static void gt64260_disable_window_32bit(mv64x60_handle_t *bh, u32 window);
+static void gt64260_enable_window_64bit(mv64x60_handle_t *bh, u32 window);
+static void gt64260_disable_window_64bit(mv64x60_handle_t *bh, u32 window);
+static void gt64260_disable_all_windows(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+static void gt64260a_chip_specific_init(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+static void gt64260b_chip_specific_init(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+
+static u32 mv64360_translate_size(u32 base_addr, u32 size, u32 num_bits);
+static u32 mv64360_untranslate_size(u32 base_addr, u32 size, u32 num_bits);
+static void mv64360_set_pci2mem_window(struct pci_controller *hose,
+ u32 window, u32 base);
+static u32 mv64360_is_enabled_32bit(mv64x60_handle_t *bh, u32 window);
+static void mv64360_enable_window_32bit(mv64x60_handle_t *bh, u32 window);
+static void mv64360_disable_window_32bit(mv64x60_handle_t *bh, u32 window);
+static void mv64360_enable_window_64bit(mv64x60_handle_t *bh, u32 window);
+static void mv64360_disable_window_64bit(mv64x60_handle_t *bh, u32 window);
+static void mv64360_disable_all_windows(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+static void mv64360_chip_specific_init(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+static void mv64460_chip_specific_init(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+
+
+u8 mv64x60_pci_exclude_bridge = TRUE;
+
+spinlock_t mv64x60_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t mv64x60_rmw_lock = SPIN_LOCK_UNLOCKED;
+
+static mv64x60_32bit_window_t gt64260_32bit_windows[] __initdata = {
+ /* CPU->MEM Windows */
+ [MV64x60_CPU2MEM_0_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_0_BASE,
+ .size_reg = MV64x60_CPU2MEM_0_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2MEM_1_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_1_BASE,
+ .size_reg = MV64x60_CPU2MEM_1_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2MEM_2_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_2_BASE,
+ .size_reg = MV64x60_CPU2MEM_2_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2MEM_3_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_3_BASE,
+ .size_reg = MV64x60_CPU2MEM_3_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->Device Windows */
+ [MV64x60_CPU2DEV_0_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_0_BASE,
+ .size_reg = MV64x60_CPU2DEV_0_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2DEV_1_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_1_BASE,
+ .size_reg = MV64x60_CPU2DEV_1_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2DEV_2_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_2_BASE,
+ .size_reg = MV64x60_CPU2DEV_2_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2DEV_3_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_3_BASE,
+ .size_reg = MV64x60_CPU2DEV_3_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->Boot Window */
+ [MV64x60_CPU2BOOT_WIN] = {
+ .base_reg = MV64x60_CPU2BOOT_0_BASE,
+ .size_reg = MV64x60_CPU2BOOT_0_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->PCI 0 Windows */
+ [MV64x60_CPU2PCI0_IO_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_IO_BASE,
+ .size_reg = MV64x60_CPU2PCI0_IO_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_0_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_0_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_0_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_1_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_1_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_1_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_2_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_2_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_2_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_3_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_3_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_3_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->PCI 1 Windows */
+ [MV64x60_CPU2PCI1_IO_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_IO_BASE,
+ .size_reg = MV64x60_CPU2PCI1_IO_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_0_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_0_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_0_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_1_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_1_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_1_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_2_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_2_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_2_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_3_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_3_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_3_SIZE,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->SRAM Window (64260 has no integrated SRAM) */
+ /* CPU->PCI 0 Remap I/O Window */
+ [MV64x60_CPU2PCI0_IO_REMAP_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_IO_REMAP,
+ .size_reg = 0,
+ .base_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->PCI 1 Remap I/O Window */
+ [MV64x60_CPU2PCI1_IO_REMAP_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_IO_REMAP,
+ .size_reg = 0,
+ .base_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU Memory Protection Windows */
+ [MV64x60_CPU_PROT_0_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_0,
+ .size_reg = MV64x60_CPU_PROT_SIZE_0,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU_PROT_1_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_1,
+ .size_reg = MV64x60_CPU_PROT_SIZE_1,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU_PROT_2_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_2,
+ .size_reg = MV64x60_CPU_PROT_SIZE_2,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU_PROT_3_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_3,
+ .size_reg = MV64x60_CPU_PROT_SIZE_3,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU Snoop Windows */
+ [MV64x60_CPU_SNOOP_0_WIN] = {
+ .base_reg = GT64260_CPU_SNOOP_BASE_0,
+ .size_reg = GT64260_CPU_SNOOP_SIZE_0,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU_SNOOP_1_WIN] = {
+ .base_reg = GT64260_CPU_SNOOP_BASE_1,
+ .size_reg = GT64260_CPU_SNOOP_SIZE_1,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU_SNOOP_2_WIN] = {
+ .base_reg = GT64260_CPU_SNOOP_BASE_2,
+ .size_reg = GT64260_CPU_SNOOP_SIZE_2,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU_SNOOP_3_WIN] = {
+ .base_reg = GT64260_CPU_SNOOP_BASE_3,
+ .size_reg = GT64260_CPU_SNOOP_SIZE_3,
+ .base_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* PCI 0->System Memory Remap Windows */
+ [MV64x60_PCI02MEM_REMAP_0_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_0_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_REMAP_1_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_REMAP_2_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_REMAP_3_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ /* PCI 1->System Memory Remap Windows */
+ [MV64x60_PCI12MEM_REMAP_0_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_0_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_REMAP_1_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_REMAP_2_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_REMAP_3_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 20,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+};
+
+static mv64x60_64bit_window_t gt64260_64bit_windows[] __initdata = {
+ /* CPU->PCI 0 MEM Remap Windows */
+ [MV64x60_CPU2PCI0_MEM_0_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_0_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_0_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_1_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_1_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_1_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_2_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_2_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_2_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_3_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_3_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_3_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->PCI 1 MEM Remap Windows */
+ [MV64x60_CPU2PCI1_MEM_0_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_0_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_0_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_1_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_1_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_1_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_2_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_2_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_2_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_3_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_3_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_3_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 12,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* PCI 0->MEM Access Control Windows */
+ [MV64x60_PCI02MEM_ACC_CNTL_0_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_0_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_0_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_0_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_ACC_CNTL_1_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_1_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_1_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_1_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_ACC_CNTL_2_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_2_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_2_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_2_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_ACC_CNTL_3_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_3_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_3_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_3_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* PCI 1->MEM Access Control Windows */
+ [MV64x60_PCI12MEM_ACC_CNTL_0_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_0_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_0_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_0_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_ACC_CNTL_1_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_1_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_1_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_1_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_ACC_CNTL_2_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_2_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_2_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_2_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_ACC_CNTL_3_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_3_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_3_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_3_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* PCI 0->MEM Snoop Windows */
+ [MV64x60_PCI02MEM_SNOOP_0_WIN] = {
+ .base_hi_reg = GT64260_PCI0_SNOOP_0_BASE_HI,
+ .base_lo_reg = GT64260_PCI0_SNOOP_0_BASE_LO,
+ .size_reg = GT64260_PCI0_SNOOP_0_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_SNOOP_1_WIN] = {
+ .base_hi_reg = GT64260_PCI0_SNOOP_1_BASE_HI,
+ .base_lo_reg = GT64260_PCI0_SNOOP_1_BASE_LO,
+ .size_reg = GT64260_PCI0_SNOOP_1_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_SNOOP_2_WIN] = {
+ .base_hi_reg = GT64260_PCI0_SNOOP_2_BASE_HI,
+ .base_lo_reg = GT64260_PCI0_SNOOP_2_BASE_LO,
+ .size_reg = GT64260_PCI0_SNOOP_2_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_SNOOP_3_WIN] = {
+ .base_hi_reg = GT64260_PCI0_SNOOP_3_BASE_HI,
+ .base_lo_reg = GT64260_PCI0_SNOOP_3_BASE_LO,
+ .size_reg = GT64260_PCI0_SNOOP_3_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* PCI 1->MEM Snoop Windows */
+ [MV64x60_PCI12MEM_SNOOP_0_WIN] = {
+ .base_hi_reg = GT64260_PCI1_SNOOP_0_BASE_HI,
+ .base_lo_reg = GT64260_PCI1_SNOOP_0_BASE_LO,
+ .size_reg = GT64260_PCI1_SNOOP_0_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_SNOOP_1_WIN] = {
+ .base_hi_reg = GT64260_PCI1_SNOOP_1_BASE_HI,
+ .base_lo_reg = GT64260_PCI1_SNOOP_1_BASE_LO,
+ .size_reg = GT64260_PCI1_SNOOP_1_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_SNOOP_2_WIN] = {
+ .base_hi_reg = GT64260_PCI1_SNOOP_2_BASE_HI,
+ .base_lo_reg = GT64260_PCI1_SNOOP_2_BASE_LO,
+ .size_reg = GT64260_PCI1_SNOOP_2_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_SNOOP_3_WIN] = {
+ .base_hi_reg = GT64260_PCI1_SNOOP_3_BASE_HI,
+ .base_lo_reg = GT64260_PCI1_SNOOP_3_BASE_LO,
+ .size_reg = GT64260_PCI1_SNOOP_3_SIZE,
+ .base_lo_bits = 12,
+ .size_bits = 12,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+};
+
+static mv64x60_chip_info_t gt64260a_ci __initdata = {
+ .translate_size = gt64260_translate_size,
+ .untranslate_size = gt64260_untranslate_size,
+ .set_pci2mem_window = gt64260_set_pci2mem_window,
+ .is_enabled_32bit = gt64260_is_enabled_32bit,
+ .enable_window_32bit = gt64260_enable_window_32bit,
+ .disable_window_32bit = gt64260_disable_window_32bit,
+ .enable_window_64bit = gt64260_enable_window_64bit,
+ .disable_window_64bit = gt64260_disable_window_64bit,
+ .disable_all_windows = gt64260_disable_all_windows,
+ .chip_specific_init = gt64260a_chip_specific_init,
+ .window_tab_32bit = gt64260_32bit_windows,
+ .window_tab_64bit = gt64260_64bit_windows,
+};
+
+static mv64x60_chip_info_t gt64260b_ci __initdata = {
+ .translate_size = gt64260_translate_size,
+ .untranslate_size = gt64260_untranslate_size,
+ .set_pci2mem_window = gt64260_set_pci2mem_window,
+ .is_enabled_32bit = gt64260_is_enabled_32bit,
+ .enable_window_32bit = gt64260_enable_window_32bit,
+ .disable_window_32bit = gt64260_disable_window_32bit,
+ .enable_window_64bit = gt64260_enable_window_64bit,
+ .disable_window_64bit = gt64260_disable_window_64bit,
+ .disable_all_windows = gt64260_disable_all_windows,
+ .chip_specific_init = gt64260b_chip_specific_init,
+ .window_tab_32bit = gt64260_32bit_windows,
+ .window_tab_64bit = gt64260_64bit_windows,
+};
+
+
+static mv64x60_32bit_window_t mv64360_32bit_windows[] __initdata = {
+ /* CPU->MEM Windows */
+ [MV64x60_CPU2MEM_0_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_0_BASE,
+ .size_reg = MV64x60_CPU2MEM_0_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2MEM_1_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_1_BASE,
+ .size_reg = MV64x60_CPU2MEM_1_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 1 },
+ [MV64x60_CPU2MEM_2_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_2_BASE,
+ .size_reg = MV64x60_CPU2MEM_2_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 2 },
+ [MV64x60_CPU2MEM_3_WIN] = {
+ .base_reg = MV64x60_CPU2MEM_3_BASE,
+ .size_reg = MV64x60_CPU2MEM_3_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 3 },
+ /* CPU->Device Windows */
+ [MV64x60_CPU2DEV_0_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_0_BASE,
+ .size_reg = MV64x60_CPU2DEV_0_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 4 },
+ [MV64x60_CPU2DEV_1_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_1_BASE,
+ .size_reg = MV64x60_CPU2DEV_1_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 5 },
+ [MV64x60_CPU2DEV_2_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_2_BASE,
+ .size_reg = MV64x60_CPU2DEV_2_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 6 },
+ [MV64x60_CPU2DEV_3_WIN] = {
+ .base_reg = MV64x60_CPU2DEV_3_BASE,
+ .size_reg = MV64x60_CPU2DEV_3_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 7 },
+ /* CPU->Boot Window */
+ [MV64x60_CPU2BOOT_WIN] = {
+ .base_reg = MV64x60_CPU2BOOT_0_BASE,
+ .size_reg = MV64x60_CPU2BOOT_0_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 8 },
+ /* CPU->PCI 0 Windows */
+ [MV64x60_CPU2PCI0_IO_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_IO_BASE,
+ .size_reg = MV64x60_CPU2PCI0_IO_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 9 },
+ [MV64x60_CPU2PCI0_MEM_0_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_0_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_0_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 10 },
+ [MV64x60_CPU2PCI0_MEM_1_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_1_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_1_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 11 },
+ [MV64x60_CPU2PCI0_MEM_2_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_2_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_2_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 12 },
+ [MV64x60_CPU2PCI0_MEM_3_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_MEM_3_BASE,
+ .size_reg = MV64x60_CPU2PCI0_MEM_3_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 13 },
+ /* CPU->PCI 1 Windows */
+ [MV64x60_CPU2PCI1_IO_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_IO_BASE,
+ .size_reg = MV64x60_CPU2PCI1_IO_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 14 },
+ [MV64x60_CPU2PCI1_MEM_0_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_0_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_0_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 15 },
+ [MV64x60_CPU2PCI1_MEM_1_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_1_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_1_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 16 },
+ [MV64x60_CPU2PCI1_MEM_2_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_2_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_2_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 17 },
+ [MV64x60_CPU2PCI1_MEM_3_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_MEM_3_BASE,
+ .size_reg = MV64x60_CPU2PCI1_MEM_3_SIZE,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 18 },
+ /* CPU->SRAM Window */
+ [MV64x60_CPU2SRAM_WIN] = {
+ .base_reg = MV64360_CPU2SRAM_BASE,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 19 },
+ /* CPU->PCI 0 Remap I/O Window */
+ [MV64x60_CPU2PCI0_IO_REMAP_WIN] = {
+ .base_reg = MV64x60_CPU2PCI0_IO_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->PCI 1 Remap I/O Window */
+ [MV64x60_CPU2PCI1_IO_REMAP_WIN] = {
+ .base_reg = MV64x60_CPU2PCI1_IO_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU Memory Protection Windows */
+ [MV64x60_CPU_PROT_0_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_0,
+ .size_reg = MV64x60_CPU_PROT_SIZE_0,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0x80000000 | 31 },
+ [MV64x60_CPU_PROT_1_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_1,
+ .size_reg = MV64x60_CPU_PROT_SIZE_1,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0x80000000 | 31 },
+ [MV64x60_CPU_PROT_2_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_2,
+ .size_reg = MV64x60_CPU_PROT_SIZE_2,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0x80000000 | 31 },
+ [MV64x60_CPU_PROT_3_WIN] = {
+ .base_reg = MV64x60_CPU_PROT_BASE_3,
+ .size_reg = MV64x60_CPU_PROT_SIZE_3,
+ .base_bits = 16,
+ .size_bits = 16,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0x80000000 | 31 },
+ /* CPU Snoop Windows -- don't exist on 64360 */
+ /* PCI 0->System Memory Remap Windows */
+ [MV64x60_PCI02MEM_REMAP_0_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_0_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_REMAP_1_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_REMAP_2_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI02MEM_REMAP_3_WIN] = {
+ .base_reg = MV64x60_PCI0_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ /* PCI 1->System Memory Remap Windows */
+ [MV64x60_PCI12MEM_REMAP_0_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_0_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_REMAP_1_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_REMAP_2_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+ [MV64x60_PCI12MEM_REMAP_3_WIN] = {
+ .base_reg = MV64x60_PCI1_SLAVE_MEM_1_REMAP,
+ .size_reg = 0,
+ .base_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0 },
+};
+
+static mv64x60_64bit_window_t mv64360_64bit_windows[MV64x60_64BIT_WIN_COUNT]
+ __initdata = {
+ /* CPU->PCI 0 MEM Remap Windows */
+ [MV64x60_CPU2PCI0_MEM_0_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_0_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_0_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_1_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_1_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_1_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_2_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_2_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_2_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI0_MEM_3_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI0_MEM_3_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI0_MEM_3_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* CPU->PCI 1 MEM Remap Windows */
+ [MV64x60_CPU2PCI1_MEM_0_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_0_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_0_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_1_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_1_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_1_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_2_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_2_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_2_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ [MV64x60_CPU2PCI1_MEM_3_REMAP_WIN] = {
+ .base_hi_reg = MV64x60_CPU2PCI1_MEM_3_REMAP_HI,
+ .base_lo_reg = MV64x60_CPU2PCI1_MEM_3_REMAP_LO,
+ .size_reg = 0,
+ .base_lo_bits = 16,
+ .size_bits = 0,
+ .get_from_field = mv64x60_shift_left,
+ .map_to_field = mv64x60_shift_right,
+ .extra = 0 },
+ /* PCI 0->MEM Access Control Windows */
+ [MV64x60_PCI02MEM_ACC_CNTL_0_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_0_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_0_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_0_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ [MV64x60_PCI02MEM_ACC_CNTL_1_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_1_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_1_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_1_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ [MV64x60_PCI02MEM_ACC_CNTL_2_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_2_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_2_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_2_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ [MV64x60_PCI02MEM_ACC_CNTL_3_WIN] = {
+ .base_hi_reg = MV64x60_PCI0_ACC_CNTL_3_BASE_HI,
+ .base_lo_reg = MV64x60_PCI0_ACC_CNTL_3_BASE_LO,
+ .size_reg = MV64x60_PCI0_ACC_CNTL_3_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ /* PCI 1->MEM Access Control Windows */
+ [MV64x60_PCI12MEM_ACC_CNTL_0_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_0_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_0_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_0_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ [MV64x60_PCI12MEM_ACC_CNTL_1_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_1_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_1_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_1_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ [MV64x60_PCI12MEM_ACC_CNTL_2_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_2_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_2_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_2_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ [MV64x60_PCI12MEM_ACC_CNTL_3_WIN] = {
+ .base_hi_reg = MV64x60_PCI1_ACC_CNTL_3_BASE_HI,
+ .base_lo_reg = MV64x60_PCI1_ACC_CNTL_3_BASE_LO,
+ .size_reg = MV64x60_PCI1_ACC_CNTL_3_SIZE,
+ .base_lo_bits = 20,
+ .size_bits = 20,
+ .get_from_field = mv64x60_mask,
+ .map_to_field = mv64x60_mask,
+ .extra = 0x80000000 | 0 },
+ /* PCI 0->MEM Snoop Windows -- don't exist on 64360 */
+ /* PCI 1->MEM Snoop Windows -- don't exist on 64360 */
+};
+
+static mv64x60_chip_info_t mv64360_ci __initdata = {
+ .translate_size = mv64360_translate_size,
+ .untranslate_size = mv64360_untranslate_size,
+ .set_pci2mem_window = mv64360_set_pci2mem_window,
+ .is_enabled_32bit = mv64360_is_enabled_32bit,
+ .enable_window_32bit = mv64360_enable_window_32bit,
+ .disable_window_32bit = mv64360_disable_window_32bit,
+ .enable_window_64bit = mv64360_enable_window_64bit,
+ .disable_window_64bit = mv64360_disable_window_64bit,
+ .disable_all_windows = mv64360_disable_all_windows,
+ .chip_specific_init = mv64360_chip_specific_init,
+ .window_tab_32bit = mv64360_32bit_windows,
+ .window_tab_64bit = mv64360_64bit_windows,
+};
+
+static mv64x60_chip_info_t mv64460_ci __initdata = {
+ .translate_size = mv64360_translate_size,
+ .untranslate_size = mv64360_untranslate_size,
+ .set_pci2mem_window = mv64360_set_pci2mem_window,
+ .is_enabled_32bit = mv64360_is_enabled_32bit,
+ .enable_window_32bit = mv64360_enable_window_32bit,
+ .disable_window_32bit = mv64360_disable_window_32bit,
+ .enable_window_64bit = mv64360_enable_window_64bit,
+ .disable_window_64bit = mv64360_disable_window_64bit,
+ .disable_all_windows = mv64360_disable_all_windows,
+ .chip_specific_init = mv64460_chip_specific_init,
+ .window_tab_32bit = mv64360_32bit_windows,
+ .window_tab_64bit = mv64360_64bit_windows,
+};
+
+
+/*
+ *****************************************************************************
+ *
+ * Bridge Initialization Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_init()
+ *
+ * Initialze the bridge based on setting passed in via 'si'. The bridge
+ * handle, 'bh', will be set so that it can be used to make subsequent
+ * calls to routines in this file.
+ */
+int __init
+mv64x60_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
+ int rc = 0;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mv64x60_init: Enter", 0x0);
+
+ mv64x60_early_init(bh, si);
+ mv64x60_alloc_hoses(bh, si); /* Allocate pci hose structures */
+ if (mv64x60_get_type(bh))
+ return -1;
+
+ if (mv64x60_setup_for_chip(bh) != 0) {
+ iounmap((void *)bh->v_base);
+
+ if (ppc_md.progress)
+ ppc_md.progress("mv64x60_init: Exit--error", 0x0);
+ return -1;
+ }
+
+ bh->ci->disable_all_windows(bh, si); /* Disable windows except mem ctlr */
+ mv64x60_config_cpu2pci_windows(bh, si); /* Init CPU->PCI windows */
+ mv64x60_get_mem_windows(bh, mem_windows); /* Read mem ctlr regs */
+ mv64x60_config_cpu2mem_windows(bh, si, mem_windows); /* CPU->MEM setup*/
+ mv64x60_config_pci2mem_windows(bh, si, mem_windows); /* PCI->Sys MEM */
+ mv64x60_init_hoses(bh, si); /* Init hose structs & PCI params */
+ bh->ci->chip_specific_init(bh, si);
+ mv64x60_enumerate_buses(bh, si); /* Enumerate PCI buses */
+ ocp_for_each_device(mv64x60_fixup_ocp, (void *)bh);
+
+ if (ppc_md.progress)
+ ppc_md.progress("mv64x60_init: Exit", 0x0);
+
+ return rc;
+} /* mv64x60_init() */
+
+/*
+ *****************************************************************************
+ *
+ * Pre-Bridge-Init Routines (Externally Visible)
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_get_mem_size()
+ *
+ * Calculate the amount of memory that the memory controller is set up for.
+ * This should only be used by board-specific code if there is no other
+ * way to determine the amount of memory in the system.
+ */
+u32 __init
+mv64x60_get_mem_size(u32 bridge_base, u32 chip_type)
+{
+ mv64x60_handle_t bh;
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
+
+ memset(&bh, 0, sizeof(bh));
+
+ bh.type = chip_type;
+ bh.p_base = bridge_base;
+ bh.v_base = bridge_base;
+
+ (void)mv64x60_setup_for_chip(&bh);
+ mv64x60_get_mem_windows(&bh, mem_windows);
+ return mv64x60_calc_mem_size(&bh, mem_windows);
+}
+
+/*
+ *****************************************************************************
+ *
+ * Window Config Routines (Externally Visible)
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_get_32bit_window()
+ *
+ * Determine the base address and size of a 32-bit window on the bridge.
+ */
+void __init
+mv64x60_get_32bit_window(mv64x60_handle_t *bh, u32 window, u32 *base, u32 *size)
+{
+ u32 val, base_reg, size_reg, base_bits, size_bits;
+ u32 (*get_from_field)(u32 val, u32 num_bits);
+
+ base_reg = bh->ci->window_tab_32bit[window].base_reg;
+
+ if (base_reg != 0) {
+ size_reg = bh->ci->window_tab_32bit[window].size_reg;
+ base_bits = bh->ci->window_tab_32bit[window].base_bits;
+ size_bits = bh->ci->window_tab_32bit[window].size_bits;
+ get_from_field= bh->ci->window_tab_32bit[window].get_from_field;
+
+ val = mv64x60_read(bh, base_reg);
+ *base = get_from_field(val, base_bits);
+
+ if (size_reg != 0) {
+ val = mv64x60_read(bh, size_reg);
+ val = get_from_field(val, size_bits);
+ *size = bh->ci->untranslate_size(*base, val, size_bits);
+ }
+ else {
+ *size = 0;
+ }
+ }
+ else {
+ *base = 0;
+ *size = 0;
+ }
+
+ DBG("get 32bit window: %d, base: 0x%x, size: 0x%x\n",
+ window, *base, *size);
+
+ return;
+}
+
+/*
+ * mv64x60_set_32bit_window()
+ *
+ * Set the base address and size of a 32-bit window on the bridge.
+ */
+void __init
+mv64x60_set_32bit_window(mv64x60_handle_t *bh, u32 window, u32 base, u32 size,
+ u32 other_bits)
+{
+ u32 val, base_reg, size_reg, base_bits, size_bits;
+ u32 (*map_to_field)(u32 val, u32 num_bits);
+
+ DBG("set 32bit window: %d, base: 0x%x, size: 0x%x, other: 0x%x\n",
+ window, base, size, other_bits);
+
+ base_reg = bh->ci->window_tab_32bit[window].base_reg;
+
+ if (base_reg != 0) {
+ size_reg = bh->ci->window_tab_32bit[window].size_reg;
+ base_bits = bh->ci->window_tab_32bit[window].base_bits;
+ size_bits = bh->ci->window_tab_32bit[window].size_bits;
+ map_to_field = bh->ci->window_tab_32bit[window].map_to_field;
+
+ val = map_to_field(base, base_bits) | other_bits;
+ mv64x60_write(bh, base_reg, val);
+
+ if (size_reg != 0) {
+ val = bh->ci->translate_size(base, size, size_bits);
+ val = map_to_field(val, size_bits);
+ mv64x60_write(bh, size_reg, val);
+ }
+ (void)mv64x60_read(bh, base_reg); /* Flush FIFO */
+ }
+
+ return;
+}
+
+/*
+ * mv64x60_get_64bit_window()
+ *
+ * Determine the base address and size of a 64-bit window on the bridge.
+ */
+void __init
+mv64x60_get_64bit_window(mv64x60_handle_t *bh, u32 window, u32 *base_hi,
+ u32 *base_lo, u32 *size)
+{
+ u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
+ u32 (*get_from_field)(u32 val, u32 num_bits);
+
+ base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
+
+ if (base_lo_reg != 0) {
+ size_reg = bh->ci->window_tab_64bit[window].size_reg;
+ base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
+ size_bits = bh->ci->window_tab_64bit[window].size_bits;
+ get_from_field= bh->ci->window_tab_64bit[window].get_from_field;
+
+ *base_hi = mv64x60_read(bh,
+ bh->ci->window_tab_64bit[window].base_hi_reg);
+
+ val = mv64x60_read(bh, base_lo_reg);
+ *base_lo = get_from_field(val, base_lo_bits);
+
+ if (size_reg != 0) {
+ val = mv64x60_read(bh, size_reg);
+ val = get_from_field(val, size_bits);
+ *size = bh->ci->untranslate_size(*base_lo, val,
+ size_bits);
+ }
+ else {
+ *size = 0;
+ }
+ }
+ else {
+ *base_hi = 0;
+ *base_lo = 0;
+ *size = 0;
+ }
+
+ DBG("get 64bit window: %d, base hi: 0x%x, base lo: 0x%x, size: 0x%x\n",
+ window, *base_hi, *base_lo, *size);
+
+ return;
+}
+
+/*
+ * mv64x60_set_64bit_window()
+ *
+ * Set the base address and size of a 64-bit window on the bridge.
+ */
+void __init
+mv64x60_set_64bit_window(mv64x60_handle_t *bh, u32 window,
+ u32 base_hi, u32 base_lo, u32 size, u32 other_bits)
+{
+ u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
+ u32 (*map_to_field)(u32 val, u32 num_bits);
+
+ DBG("set 64bit window: %d, base hi: 0x%x, base lo: 0x%x, " \
+ "size: 0x%x, other: 0x%x\n",
+ window, base_hi, base_lo, size, other_bits);
+
+ base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
+
+ if (base_lo_reg != 0) {
+ size_reg = bh->ci->window_tab_64bit[window].size_reg;
+ base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
+ size_bits = bh->ci->window_tab_64bit[window].size_bits;
+ map_to_field = bh->ci->window_tab_64bit[window].map_to_field;
+
+ mv64x60_write(bh, bh->ci->window_tab_64bit[window].base_hi_reg,
+ base_hi);
+
+ val = map_to_field(base_lo, base_lo_bits) | other_bits;
+ mv64x60_write(bh, base_lo_reg, val);
+
+ if (size_reg != 0) {
+ val = bh->ci->translate_size(base_lo, size, size_bits);
+ val = map_to_field(val, size_bits);
+ mv64x60_write(bh, size_reg, val);
+ }
+
+ (void)mv64x60_read(bh, base_lo_reg); /* Flush FIFO */
+ }
+
+ return;
+}
+
+/*
+ * mv64x60_mask()
+ *
+ * Take the high-order 'num_bits' of 'val' & mask off low bits.
+ */
+static u32 __init
+mv64x60_mask(u32 val, u32 num_bits)
+{
+ DBG("mask val: 0x%x, num_bits: %d == 0x%x\n", val,
+ num_bits, val & (0xffffffff << (32 - num_bits)));
+
+ return val & (0xffffffff << (32 - num_bits));
+}
+
+/*
+ * mv64x60_mask_shift_left()
+ *
+ * Take the low-order 'num_bits' of 'val', shift left to align at bit 31 (MSB).
+ */
+static u32 __init
+mv64x60_shift_left(u32 val, u32 num_bits)
+{
+ DBG("shift left val: 0x%x, num_bits: %d == 0x%x\n", val,
+ num_bits, val << (32 - num_bits));
+
+ return val << (32 - num_bits);
+}
+
+/*
+ * mv64x60_shift_right()
+ *
+ * Take the high-order 'num_bits' of 'val', shift right to align at bit 0 (LSB).
+ */
+static u32 __init
+mv64x60_shift_right(u32 val, u32 num_bits)
+{
+ DBG("shift right val: 0x%x, num_bits: %d == 0x%x\n", val, num_bits,
+ val >> (32 - num_bits));
+
+ return val >> (32 - num_bits);
+}
+
+/*
+ *****************************************************************************
+ *
+ * Early Init Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_early_init()
+ *
+ * Do some bridge work that must take place before we start messing with
+ * the bridge for real.
+ */
+static void __init
+mv64x60_early_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ memset(bh, 0, sizeof(*bh));
+
+ bh->p_base = si->phys_reg_base;
+ bh->v_base = (u32)ioremap(bh->p_base, MV64x60_INTERNAL_SPACE_SIZE);
+ bh->base_irq = si->base_irq;
+
+ /* Bit 12 MUST be 0; set bit 27--don't auto-update cpu remap regs */
+ mv64x60_clr_bits(bh, MV64x60_CPU_CONFIG, (1<<12));
+ mv64x60_set_bits(bh, MV64x60_CPU_CONFIG, (1<<27));
+
+ /*
+ * Turn off timer/counters. Not turning off watchdog timer because
+ * can't read its reg on the 64260A so don't know if we'll be enabling
+ * or disabling.
+ */
+ mv64x60_clr_bits(bh, MV64x60_TIMR_CNTR_0_3_CNTL,
+ ((1<<0) | (1<<8) | (1<<16) | (1<<24)));
+
+#ifdef CONFIG_GT64260
+ mv64x60_clr_bits(bh, GT64260_TIMR_CNTR_4_7_CNTL,
+ ((1<<0) | (1<<8) | (1<<16) | (1<<24)));
+#endif
+
+#if 0
+XXXX Put in PCI_x_RETRY adjustment XXXX
+#endif
+
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * Chip Identification Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_get_type()
+ *
+ * Determine the type of bridge chip we have.
+ */
+static int __init mv64x60_get_type(struct mv64x60_handle *bh)
+{
+ struct pci_controller *hose = bh->hose_a;
+ int pcidev;
+ int devfn;
+ u16 val;
+ u8 save_exclude;
+
+ pcidev = (mv64x60_read(bh, MV64x60_PCI0_P2P_CONFIG) >> 24) & 0xf;
+ devfn = PCI_DEVFN(pcidev, 0);
+
+ save_exclude = mv64x60_pci_exclude_bridge;
+ mv64x60_pci_exclude_bridge = FALSE;
+
+ /* Sanity check of bridge's Vendor ID */
+ early_read_config_word(hose, 0, devfn, PCI_VENDOR_ID, &val);
+
+ if (val != PCI_VENDOR_ID_MARVELL)
+ return -1;
+
+ /* Figure out the type of Marvell bridge it is */
+ early_read_config_word(hose, 0, devfn, PCI_DEVICE_ID, &val);
+
+ switch (val) {
+ case PCI_DEVICE_ID_MARVELL_GT64260:
+ early_read_config_word(hose, 0, devfn,
+ PCI_CLASS_REVISION, &val);
+
+ switch (val & 0xff) {
+ case GT64260_REV_A:
+ bh->type = MV64x60_TYPE_GT64260A;
+ break;
+ case GT64260_REV_B:
+ bh->type = MV64x60_TYPE_GT64260B;
+ break;
+ }
+ break;
+
+ case PCI_DEVICE_ID_MARVELL_MV64360:
+ /* Marvell won't tell me how to distinguish a 64361 & 64362 */
+ bh->type = MV64x60_TYPE_MV64360;
+ break;
+
+ case PCI_DEVICE_ID_MARVELL_MV64460:
+ bh->type = MV64x60_TYPE_MV64460;
+ break;
+
+ default:
+ printk(KERN_CRIT "Unknown Marvell bridge type %04x\n", val);
+ return -1;
+ }
+
+ mv64x60_pci_exclude_bridge = save_exclude;
+ return 0;
+}
+
+/*
+ * mv64x60_setup_for_chip()
+ *
+ * Set 'bh' to use the proper set of routine for the bridge chip that we have.
+ */
+static int __init
+mv64x60_setup_for_chip(mv64x60_handle_t *bh)
+{
+ int rc = 0;
+
+ /* Set up chip-specific info based on the chip/bridge type */
+ switch(bh->type) {
+ case MV64x60_TYPE_GT64260A:
+ bh->ci = >64260a_ci;
+ break;
+
+ case MV64x60_TYPE_GT64260B:
+ bh->ci = >64260b_ci;
+ break;
+
+ case MV64x60_TYPE_MV64360:
+ bh->ci = &mv64360_ci;
+ break;
+
+#if 0 /* Marvell won't tell me how to distinguish--MAG */
+ case MV64x60_TYPE_MV64361:
+ case MV64x60_TYPE_MV64362:
+#endif
+ case MV64x60_TYPE_MV64460:
+ bh->ci = &mv64460_ci;
+ break;
+
+ case MV64x60_TYPE_INVALID:
+ default:
+ if (ppc_md.progress)
+ ppc_md.progress("mv64x60: Unsupported bridge",
+ 0x0);
+ printk("mv64x60: Unsupported bridge\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+/*
+ *****************************************************************************
+ *
+ * System Memory Window Related Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_get_mem_windows()
+ *
+ * Get the values in the memory controller & return in the 'mem_windows' array.
+ */
+static void __init
+mv64x60_get_mem_windows(mv64x60_handle_t *bh,
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
+{
+ u32 i;
+ u32 windows[] = { MV64x60_CPU2MEM_0_WIN, MV64x60_CPU2MEM_1_WIN,
+ MV64x60_CPU2MEM_2_WIN, MV64x60_CPU2MEM_3_WIN };
+
+ for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) {
+ if (bh->ci->is_enabled_32bit(bh, i)) {
+ mv64x60_get_32bit_window(bh, windows[i],
+ &mem_windows[i][0], &mem_windows[i][1]);
+ }
+ else {
+ mem_windows[i][0] = 0;
+ mem_windows[i][1] = 0;
+ }
+ }
+
+ return;
+}
+
+/*
+ * mv64x60_calc_mem_size()
+ *
+ * Using the memory controller register values in 'mem_windows', determine
+ * how much memory it is set up for.
+ */
+static u32 __init
+mv64x60_calc_mem_size(mv64x60_handle_t *bh,
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
+{
+ u32 i, total = 0;
+
+ for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) {
+ total += mem_windows[i][1];
+ }
+
+ return total;
+}
+
+/*
+ *****************************************************************************
+ *
+ * CPU->System MEM Config Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_config_cpu2mem_windows()
+ *
+ * Configure CPU->Memory windows on the bridge.
+ */
+static void __init
+mv64x60_config_cpu2mem_windows(mv64x60_handle_t *bh, mv64x60_setup_info_t *si,
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
+{
+ u32 i;
+ u32 prot_windows[] = {
+ MV64x60_CPU_PROT_0_WIN, MV64x60_CPU_PROT_1_WIN,
+ MV64x60_CPU_PROT_2_WIN, MV64x60_CPU_PROT_3_WIN };
+ u32 cpu_snoop_windows[] = {
+ MV64x60_CPU_SNOOP_0_WIN, MV64x60_CPU_SNOOP_1_WIN,
+ MV64x60_CPU_SNOOP_2_WIN, MV64x60_CPU_SNOOP_3_WIN };
+
+ /* Set CPU protection & snoop windows */
+ for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) {
+ if (bh->ci->is_enabled_32bit(bh, i)) {
+ mv64x60_set_32bit_window(bh, prot_windows[i],
+ mem_windows[i][0], mem_windows[i][1],
+ si->cpu_prot_options[i]);
+ bh->ci->enable_window_32bit(bh, prot_windows[i]);
+
+ if (bh->ci->window_tab_32bit[cpu_snoop_windows[i]].
+ base_reg != 0) {
+ mv64x60_set_32bit_window(bh,
+ cpu_snoop_windows[i], mem_windows[i][0],
+ mem_windows[i][1],
+ si->cpu_snoop_options[i]);
+ bh->ci->enable_window_32bit(bh,
+ cpu_snoop_windows[i]);
+ }
+
+ }
+ }
+
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * CPU->PCI Config Routines
+ *
+ *****************************************************************************
+ */
+
+/*
+ * mv64x60_config_cpu2pci_windows()
+ *
+ * Configure the CPU->PCI windows on the bridge.
+ */
+static void __init
+mv64x60_config_cpu2pci_windows(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("mv64x60_config_bridge: Enter", 0x0);
+
+ /*
+ * Set up various parts of the bridge including CPU->PCI windows.
+ * Depending on the board, there may be only one hose that needs to
+ * be set up.
+ */
+ if (si->pci_0.enable_bus) {
+ u32 win_tab[] = { MV64x60_CPU2PCI0_IO_WIN,
+ MV64x60_CPU2PCI0_MEM_0_WIN,
+ MV64x60_CPU2PCI0_MEM_1_WIN,
+ MV64x60_CPU2PCI0_MEM_2_WIN };
+ u32 remap_tab[] = { MV64x60_CPU2PCI0_IO_REMAP_WIN,
+ MV64x60_CPU2PCI0_MEM_0_REMAP_WIN,
+ MV64x60_CPU2PCI0_MEM_1_REMAP_WIN,
+ MV64x60_CPU2PCI0_MEM_2_REMAP_WIN };
+
+ mv64x60_set_cpu2pci_window(bh, &si->pci_0, win_tab, remap_tab);
+ }
+
+ if (si->pci_1.enable_bus) {
+ u32 win_tab[] = { MV64x60_CPU2PCI1_IO_WIN,
+ MV64x60_CPU2PCI1_MEM_0_WIN,
+ MV64x60_CPU2PCI1_MEM_1_WIN,
+ MV64x60_CPU2PCI1_MEM_2_WIN };
+ u32 remap_tab[] = { MV64x60_CPU2PCI1_IO_REMAP_WIN,
+ MV64x60_CPU2PCI1_MEM_0_REMAP_WIN,
+ MV64x60_CPU2PCI1_MEM_1_REMAP_WIN,
+ MV64x60_CPU2PCI1_MEM_2_REMAP_WIN };
+
+ mv64x60_set_cpu2pci_window(bh, &si->pci_1, win_tab, remap_tab);
+ }
+
+ return;
+} /* mv64x60_config_bridge() */
+
+/*
+ * mv64x60_set_cpu2pci_window()
+ *
+ * Configure the CPU->PCI windows for one of the PCI buses.
+ */
+static void __init
+mv64x60_set_cpu2pci_window(mv64x60_handle_t *bh, mv64x60_pci_info_t *pi,
+ u32 *win_tab, u32 *remap_tab)
+{
+ int i;
+
+ if (pi->pci_io.size > 0) {
+ mv64x60_set_32bit_window(bh, win_tab[0], pi->pci_io.cpu_base,
+ pi->pci_io.size, pi->pci_io.swap);
+ mv64x60_set_32bit_window(bh, remap_tab[0],
+ pi->pci_io.pci_base_lo, 0, 0);
+ bh->ci->enable_window_32bit(bh, win_tab[0]);
+ }
+ else { /* Actually, the window should already be disabled */
+ bh->ci->disable_window_32bit(bh, win_tab[0]);
+ }
+
+ for (i=0; i<3; i++) {
+ if (pi->pci_mem[i].size > 0) {
+ mv64x60_set_32bit_window(bh, win_tab[i+1],
+ pi->pci_mem[i].cpu_base, pi->pci_mem[i].size,
+ pi->pci_mem[i].swap);
+ mv64x60_set_64bit_window(bh, remap_tab[i+1],
+ pi->pci_mem[i].pci_base_hi,
+ pi->pci_mem[i].pci_base_lo, 0, 0);
+ bh->ci->enable_window_32bit(bh, win_tab[i+1]);
+ }
+ else { /* Actually, the window should already be disabled */
+ bh->ci->disable_window_32bit(bh, win_tab[i+1]);
+ }
+ }
+
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * PCI->System MEM Config Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_config_pci2mem_windows()
+ *
+ * Configure the PCI->Memory windows on the bridge.
+ */
+static void __init
+mv64x60_config_pci2mem_windows(mv64x60_handle_t *bh, mv64x60_setup_info_t *si,
+ u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
+{
+ u32 i;
+ u32 pci_0_acc_windows[] = {
+ MV64x60_PCI02MEM_ACC_CNTL_0_WIN,
+ MV64x60_PCI02MEM_ACC_CNTL_1_WIN,
+ MV64x60_PCI02MEM_ACC_CNTL_2_WIN,
+ MV64x60_PCI02MEM_ACC_CNTL_3_WIN };
+ u32 pci_1_acc_windows[] = {
+ MV64x60_PCI12MEM_ACC_CNTL_0_WIN,
+ MV64x60_PCI12MEM_ACC_CNTL_1_WIN,
+ MV64x60_PCI12MEM_ACC_CNTL_2_WIN,
+ MV64x60_PCI12MEM_ACC_CNTL_3_WIN };
+ u32 pci_0_snoop_windows[] = {
+ MV64x60_PCI02MEM_SNOOP_0_WIN,
+ MV64x60_PCI02MEM_SNOOP_1_WIN,
+ MV64x60_PCI02MEM_SNOOP_2_WIN,
+ MV64x60_PCI02MEM_SNOOP_3_WIN };
+ u32 pci_1_snoop_windows[] = {
+ MV64x60_PCI12MEM_SNOOP_0_WIN,
+ MV64x60_PCI12MEM_SNOOP_1_WIN,
+ MV64x60_PCI12MEM_SNOOP_2_WIN,
+ MV64x60_PCI12MEM_SNOOP_3_WIN };
+ u32 pci_0_size[] = {
+ MV64x60_PCI0_MEM_0_SIZE, MV64x60_PCI0_MEM_1_SIZE,
+ MV64x60_PCI0_MEM_2_SIZE, MV64x60_PCI0_MEM_3_SIZE };
+ u32 pci_1_size[] = {
+ MV64x60_PCI1_MEM_0_SIZE, MV64x60_PCI1_MEM_1_SIZE,
+ MV64x60_PCI1_MEM_2_SIZE, MV64x60_PCI1_MEM_3_SIZE };
+
+ /* Clear bit 0 of PCI addr decode control so PCI->CPU remap 1:1 */
+ mv64x60_clr_bits(bh, MV64x60_PCI0_PCI_DECODE_CNTL, 0x00000001);
+ mv64x60_clr_bits(bh, MV64x60_PCI1_PCI_DECODE_CNTL, 0x00000001);
+
+ /*
+ * Set the access control, snoop, BAR size, and window base addresses.
+ * PCI->MEM windows base addresses will match exactly what the
+ * CPU->MEM windows are.
+ */
+ for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++) {
+ if (bh->ci->is_enabled_32bit(bh, i)) {
+ if (si->pci_0.enable_bus) {
+ mv64x60_set_64bit_window(bh,
+ pci_0_acc_windows[i], 0,
+ mem_windows[i][0], mem_windows[i][1],
+ si->pci_0.acc_cntl_options[i]);
+ bh->ci->enable_window_64bit(bh,
+ pci_0_acc_windows[i]);
+
+ if (bh->ci->window_tab_64bit[
+ pci_0_snoop_windows[i]].base_lo_reg
+ != 0) {
+ mv64x60_set_64bit_window(bh,
+ pci_0_snoop_windows[i], 0,
+ mem_windows[i][0],
+ mem_windows[i][1],
+ si->pci_0.snoop_options[i]);
+ bh->ci->enable_window_64bit(bh,
+ pci_0_snoop_windows[i]);
+ }
+
+ bh->ci->set_pci2mem_window(bh->hose_a, i,
+ mem_windows[i][0]);
+ mv64x60_write(bh, pci_0_size[i],
+ mv64x60_mask(mem_windows[i][1] -1, 20));
+
+ /* Enable the window */
+ mv64x60_clr_bits(bh, MV64x60_PCI0_BAR_ENABLE,
+ 1 << i);
+ }
+ if (si->pci_1.enable_bus) {
+ mv64x60_set_64bit_window(bh,
+ pci_1_acc_windows[i], 0,
+ mem_windows[i][0], mem_windows[i][1],
+ si->pci_1.acc_cntl_options[i]);
+ bh->ci->enable_window_64bit(bh,
+ pci_1_acc_windows[i]);
+
+ if (bh->ci->window_tab_64bit[
+ pci_1_snoop_windows[i]].base_lo_reg
+ != 0) {
+ mv64x60_set_64bit_window(bh,
+ pci_1_snoop_windows[i], 0,
+ mem_windows[i][0],
+ mem_windows[i][1],
+ si->pci_1.snoop_options[i]);
+ bh->ci->enable_window_64bit(bh,
+ pci_1_snoop_windows[i]);
+ }
+
+ bh->ci->set_pci2mem_window(bh->hose_b, i,
+ mem_windows[i][0]);
+ mv64x60_write(bh, pci_1_size[i],
+ mv64x60_mask(mem_windows[i][1] -1, 20));
+
+ /* Enable the window */
+ mv64x60_clr_bits(bh, MV64x60_PCI1_BAR_ENABLE,
+ 1 << i);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * Hose & Resource Alloc/Init Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_alloc_hoses()
+ *
+ * Allocate the PCI hose structures for the bridge's PCI buses.
+ */
+static void __init
+mv64x60_alloc_hoses(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ /*
+ * Alloc first hose struct even when its not to be configured b/c the
+ * chip identification routines need to use it.
+ */
+ bh->hose_a = pcibios_alloc_controller();
+ setup_indirect_pci(bh->hose_a,
+ bh->p_base + MV64x60_PCI0_CONFIG_ADDR,
+ bh->p_base + MV64x60_PCI0_CONFIG_DATA);
+
+ if (si->pci_1.enable_bus) {
+ bh->hose_b = pcibios_alloc_controller();
+ setup_indirect_pci(bh->hose_b,
+ bh->p_base + MV64x60_PCI1_CONFIG_ADDR,
+ bh->p_base + MV64x60_PCI1_CONFIG_DATA);
+ }
+
+ return;
+}
+
+/*
+ * mv64x60_init_hoses()
+ *
+ * Initialize the PCI hose structures for the bridge's PCI hoses.
+ */
+static void __init
+mv64x60_init_hoses(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ if (si->pci_1.enable_bus) {
+ bh->io_base_b = (u32)ioremap(si->pci_1.pci_io.cpu_base,
+ si->pci_1.pci_io.size);
+ isa_io_base = bh->io_base_b;
+ }
+
+ if (si->pci_0.enable_bus) {
+ bh->io_base_a = (u32)ioremap(si->pci_0.pci_io.cpu_base,
+ si->pci_0.pci_io.size);
+ isa_io_base = bh->io_base_a;
+
+ mv64x60_init_resources(bh->hose_a, &si->pci_0, bh->io_base_a);
+ mv64x60_set_pci_params(bh->hose_a, &si->pci_0);
+ }
+
+ /* Must do here so proper isa_io_base is used in calculations */
+ if (si->pci_1.enable_bus) {
+ mv64x60_init_resources(bh->hose_b, &si->pci_1, bh->io_base_b);
+ mv64x60_set_pci_params(bh->hose_b, &si->pci_1);
+ }
+
+ return;
+}
+
+/*
+ * mv64x60_init_resources()
+ *
+ * Calculate the offsets, etc. for the hose structures to reflect all of
+ * the address remapping that happens as you go from CPU->PCI and PCI->MEM.
+ */
+static void __init
+mv64x60_init_resources(struct pci_controller *hose, mv64x60_pci_info_t *pi,
+ u32 io_base)
+{
+ int i;
+ /* 2 hoses; 4 resources/hose; sting <= 64 bytes; not work if > 1 chip */
+ static char s[2][4][64];
+
+ if (pi->pci_io.size != 0) {
+ sprintf(s[hose->index][0], "PCI hose %d I/O Space",
+ hose->index);
+ pci_init_resource(&hose->io_resource, io_base - isa_io_base,
+ io_base - isa_io_base + pi->pci_io.size - 1,
+ IORESOURCE_IO, s[hose->index][0]);
+ hose->io_space.start = pi->pci_io.pci_base_lo;
+ hose->io_space.end = pi->pci_io.pci_base_lo + pi->pci_io.size-1;
+ hose->io_base_virt = (void *)isa_io_base;
+ }
+
+ for (i=0; i<3; i++) {
+ if (pi->pci_mem[i].size != 0) {
+ sprintf(s[hose->index][i+1], "PCI hose %d MEM Space %d",
+ hose->index, i);
+ pci_init_resource(&hose->mem_resources[i],
+ pi->pci_mem[i].cpu_base,
+ pi->pci_mem[i].cpu_base + pi->pci_mem[i].size-1,
+ IORESOURCE_MEM, s[hose->index][i+1]);
+ }
+ }
+
+ hose->mem_space.end = pi->pci_mem[0].pci_base_lo +
+ pi->pci_mem[0].size - 1;
+ hose->pci_mem_offset = pi->pci_mem[0].cpu_base -
+ pi->pci_mem[0].pci_base_lo;
+
+ return;
+} /* mv64x60_init_resources() */
+
+/*
+ * mv64x60_set_pci_params()
+ *
+ * Configure a hose's PCI config space parameters.
+ */
+static void __init
+mv64x60_set_pci_params(struct pci_controller *hose, mv64x60_pci_info_t *pi)
+{
+ u32 devfn;
+ u16 u16_val;
+ u8 save_exclude;
+
+ devfn = PCI_DEVFN(0,0);
+
+ save_exclude = mv64x60_pci_exclude_bridge;
+ mv64x60_pci_exclude_bridge = FALSE;
+
+ /* Set class code to indicate host bridge */
+ u16_val = PCI_CLASS_BRIDGE_HOST; /* 0x0600 (host bridge) */
+ early_write_config_word(hose, 0, devfn, PCI_CLASS_DEVICE, u16_val);
+
+ /* Enable 64260 to be PCI master & respond to PCI MEM cycles */
+ early_read_config_word(hose, 0, devfn, PCI_COMMAND, &u16_val);
+ u16_val &= ~(PCI_COMMAND_IO | PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
+ u16_val |= pi->pci_cmd_bits | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+ early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
+
+ /* Set latency timer, cache line size, clear BIST */
+ u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2);
+ early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
+
+ mv64x60_pci_exclude_bridge = save_exclude;
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * PCI Related Routine
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_enumerate_buses()
+ *
+ * If requested, enumerate the PCI buses and set the appropriate
+ * info in the hose structures.
+ */
+static void __init
+mv64x60_enumerate_buses(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ u32 val;
+
+ pci_dram_offset = 0; /* System mem at same addr on PCI & cpu bus */
+
+ ppc_md.pci_exclude_device = mv64x60_pci_exclude_device;
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_map_irq = si->map_irq;
+
+ /* Now that the bridge is set up, its safe to scan the PCI buses */
+ if (si->pci_0.enable_bus) {
+ if (si->pci_0.enumerate_bus) {
+ /* Set bus number for PCI 0 to 0 */
+ val = mv64x60_read(bh, MV64x60_PCI0_P2P_CONFIG);
+ val &= 0xe0000000;
+ val |= 0x000000ff;
+ mv64x60_write(bh, MV64x60_PCI0_P2P_CONFIG, val);
+ /* Flush FIFO*/
+ (void)mv64x60_read(bh, MV64x60_PCI0_P2P_CONFIG);
+
+#if 0
+XXXX Different if in PCI-X mode (look at mv64360_find_bridges()) XXXX
+#endif
+
+ bh->hose_a->first_busno = 0;
+ bh->hose_a->last_busno = 0xff;
+
+ bh->hose_a->last_busno = pciauto_bus_scan(bh->hose_a,
+ bh->hose_a->first_busno);
+ }
+ else {
+ /* Assume bridge set up correctly by someone else */
+ val = mv64x60_read(bh, MV64x60_PCI0_P2P_CONFIG);
+ bh->hose_a->first_busno = (val & 0x00ff0000) >> 16;
+ }
+ }
+
+ if (si->pci_1.enable_bus) {
+ if (si->pci_1.enumerate_bus) {
+ if (si->pci_0.enable_bus) {
+ bh->hose_b->first_busno =
+ bh->hose_a->last_busno + 1;
+
+ /* Set bus number for PCI 1 hose */
+ val = mv64x60_read(bh, MV64x60_PCI1_P2P_CONFIG);
+ val &= 0xe0000000;
+ val |= (bh->hose_b->first_busno << 16) | 0xff;
+ mv64x60_write(bh, MV64x60_PCI1_P2P_CONFIG, val);
+ /* Flush FIFO */
+ (void)mv64x60_read(bh, MV64x60_PCI1_P2P_CONFIG);
+ }
+ else {
+ bh->hose_b->first_busno = 0;
+ }
+
+ bh->hose_b->last_busno = 0xff;
+ bh->hose_b->last_busno = pciauto_bus_scan(bh->hose_b,
+ bh->hose_b->first_busno);
+ }
+ else {
+ /* Assume bridge set up correctly by someone else */
+ val = mv64x60_read(bh, MV64x60_PCI1_P2P_CONFIG);
+ bh->hose_b->first_busno = (val & 0x00ff0000) >> 16;
+ bh->hose_b->last_busno = 0xff; /* No way to know */
+ }
+ }
+
+ if (si->pci_0.enable_bus && !si->pci_0.enumerate_bus) {
+ if (si->pci_1.enable_bus) {
+ bh->hose_a->last_busno = bh->hose_b->first_busno - 1;
+ }
+ else {
+ bh->hose_a->last_busno = 0xff; /* No way to know */
+ }
+ }
+
+ return;
+}
+
+/*
+ * mv64x60_exclude_pci_device()
+ *
+ * This routine is used to make the bridge not appear when the
+ * PCI subsystem is accessing PCI devices (in PCI config space).
+ */
+static int
+mv64x60_pci_exclude_device(u8 bus, u8 devfn)
+{
+ struct pci_controller *hose;
+
+ hose = pci_bus_to_hose(bus);
+
+ /* Skip slot 0 on both hoses */
+ if ((mv64x60_pci_exclude_bridge == TRUE) &&
+ (PCI_SLOT(devfn) == 0) &&
+ (hose->first_busno == bus)) {
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ else {
+ return PCIBIOS_SUCCESSFUL;
+ }
+} /* mv64x60_pci_exclude_device() */
+
+/*
+ *****************************************************************************
+ *
+ * OCP Fixup Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64x60_fixup_ocp()
+ *
+ * Adjust the 'paddr' field in the bridge's OCP entries to reflect where they
+ * really are in the physical address space.
+ */
+static void __init
+mv64x60_fixup_ocp(struct ocp_device *dev, void *arg)
+{
+ mv64x60_handle_t *bh = (mv64x60_handle_t *)arg;
+
+ if (dev->def->vendor == OCP_VENDOR_MARVELL) {
+ dev->def->paddr += bh->p_base;
+ }
+
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * GT64260-Specific Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * gt64260_translate_size()
+ *
+ * On the GT64260, the size register is really the "top" address of the window.
+ */
+static u32 __init
+gt64260_translate_size(u32 base, u32 size, u32 num_bits)
+{
+ return base + mv64x60_mask(size - 1, num_bits);
+}
+
+/*
+ * gt64260_untranslate_size()
+ *
+ * Translate the top address of a window into a window size.
+ */
+static u32 __init
+gt64260_untranslate_size(u32 base, u32 size, u32 num_bits)
+{
+ if (size >= base) {
+ size = size - base + (1 << (32 - num_bits));
+ }
+ else {
+ size = 0;
+ }
+
+ return size;
+}
+
+/*
+ * gt64260_set_pci2mem_window()
+ *
+ * The PCI->MEM window registers are actually in PCI config space so need
+ * to set them by setting the correct config space BARs.
+ */
+static void __init
+gt64260_set_pci2mem_window(struct pci_controller *hose, u32 window, u32 base)
+{
+ u32 reg_addrs[] = { 0x10, 0x14, 0x18, 0x1c };
+
+ DBG("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
+ hose->index, base);
+
+ early_write_config_dword(hose, hose->first_busno,
+ PCI_DEVFN(0, 0), reg_addrs[window],
+ mv64x60_mask(base, 20) | 0x8);
+ return;
+}
+
+/*
+ * gt64260_is_enabled_32bit()
+ *
+ * On a GT64260, a window is enabled iff its top address is >= to its base
+ * address.
+ */
+static u32 __init
+gt64260_is_enabled_32bit(mv64x60_handle_t *bh, u32 window)
+{
+ u32 rc = 0;
+
+ if ((gt64260_32bit_windows[window].base_reg != 0) &&
+ (gt64260_32bit_windows[window].size_reg != 0) &&
+ ((mv64x60_read(bh, gt64260_32bit_windows[window].size_reg) &
+ ((1 << gt64260_32bit_windows[window].size_bits) - 1)) >=
+ (mv64x60_read(bh, gt64260_32bit_windows[window].base_reg) &
+ ((1 << gt64260_32bit_windows[window].base_bits) - 1)))){
+
+ rc = 1;
+ }
+
+ if (rc) {
+ DBG("32bit window %d is enabled\n", window);
+ }
+ else {
+ DBG("32bit window %d is disabled\n", window);
+ }
+
+ return rc;
+}
+
+/*
+ * gt64260_enable_window_32bit()
+ *
+ * On the GT64260, a window is enabled iff the top address is >= to the base
+ * address of the window. Since the window has already been configured by
+ * the time this routine is called, we have nothing to do here.
+ */
+static void __init
+gt64260_enable_window_32bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("enable 32bit window: %d\n", window);
+ return;
+}
+
+/*
+ * gt64260_disable_window_32bit()
+ *
+ * On a GT64260, you disable a window by setting its top address to be less
+ * than its base address.
+ */
+static void __init
+gt64260_disable_window_32bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
+ window, gt64260_32bit_windows[window].base_reg,
+ gt64260_32bit_windows[window].size_reg);
+
+ if ((gt64260_32bit_windows[window].base_reg != 0) &&
+ (gt64260_32bit_windows[window].size_reg != 0)) {
+
+ /* To disable, make bottom reg higher than top reg */
+ mv64x60_write(bh, gt64260_32bit_windows[window].base_reg,0xfff);
+ mv64x60_write(bh, gt64260_32bit_windows[window].size_reg, 0);
+ }
+
+ return;
+}
+
+/*
+ * gt64260_enable_window_64bit()
+ *
+ * On the GT64260, a window is enabled iff the top address is >= to the base
+ * address of the window. Since the window has already been configured by
+ * the time this routine is called, we have nothing to do here.
+ */
+static void __init
+gt64260_enable_window_64bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("enable 64bit window: %d\n", window);
+ return; /* Enabled when window configured (i.e., when top >= base) */
+}
+
+/*
+ * gt64260_disable_window_64bit()
+ *
+ * On a GT64260, you disable a window by setting its top address to be less
+ * than its base address.
+ */
+static void __init
+gt64260_disable_window_64bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
+ window, gt64260_64bit_windows[window].base_lo_reg,
+ gt64260_64bit_windows[window].size_reg);
+
+ if ((gt64260_64bit_windows[window].base_lo_reg != 0) &&
+ (gt64260_64bit_windows[window].size_reg != 0)) {
+
+ /* To disable, make bottom reg higher than top reg */
+ mv64x60_write(bh, gt64260_64bit_windows[window].base_lo_reg,
+ 0xfff);
+ mv64x60_write(bh, gt64260_64bit_windows[window].base_hi_reg, 0);
+ mv64x60_write(bh, gt64260_64bit_windows[window].size_reg, 0);
+ }
+
+ return;
+}
+
+/*
+ * gt64260_disable_all_windows()
+ *
+ * The GT64260 has several windows that aren't represented in the table of
+ * windows at the top of this file. This routine turns all of them off
+ * except for the memory controller windows, of course.
+ */
+static void __init
+gt64260_disable_all_windows(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ u32 i;
+
+ /* Disable 32bit windows (don't disable cpu->mem windows) */
+ for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
+ if (!(si->window_preserve_mask_32 & (1<<i)))
+ gt64260_disable_window_32bit(bh, i);
+ }
+
+ /* Disable 64bit windows */
+ for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++) {
+ if (!(si->window_preserve_mask_64 & (1<<i)))
+ gt64260_disable_window_64bit(bh, i);
+ }
+
+ /* Turn off cpu protection windows not in gt64260_32bit_windows[] */
+ mv64x60_write(bh, GT64260_CPU_PROT_BASE_4, 0xfff);
+ mv64x60_write(bh, GT64260_CPU_PROT_SIZE_4, 0);
+ mv64x60_write(bh, GT64260_CPU_PROT_BASE_5, 0xfff);
+ mv64x60_write(bh, GT64260_CPU_PROT_SIZE_5, 0);
+ mv64x60_write(bh, GT64260_CPU_PROT_BASE_6, 0xfff);
+ mv64x60_write(bh, GT64260_CPU_PROT_SIZE_6, 0);
+ mv64x60_write(bh, GT64260_CPU_PROT_BASE_7, 0xfff);
+ mv64x60_write(bh, GT64260_CPU_PROT_SIZE_7, 0);
+
+ /* Turn off PCI->MEM access cntl wins not in gt64260_64bit_windows[] */
+ mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0xfff);
+ mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_HI, 0);
+ mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_SIZE, 0);
+ mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0xfff);
+ mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_HI, 0);
+ mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_SIZE, 0);
+ mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_LO, 0xfff);
+ mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_HI, 0);
+ mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_SIZE, 0);
+ mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_LO, 0xfff);
+ mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_HI, 0);
+ mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_SIZE, 0);
+
+ mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0xfff);
+ mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_HI, 0);
+ mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_SIZE, 0);
+ mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0xfff);
+ mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_HI, 0);
+ mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_SIZE, 0);
+ mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_LO, 0xfff);
+ mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_HI, 0);
+ mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_SIZE, 0);
+ mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_LO, 0xfff);
+ mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_HI, 0);
+ mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_SIZE, 0);
+
+ /* Disable all PCI-><whatever> windows */
+ mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x07ffffff);
+ mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x07ffffff);
+
+ return;
+}
+
+/*
+ * gt64260a_chip_specific_init()
+ *
+ * Implement errata work arounds for the GT64260A.
+ */
+static void
+gt64260a_chip_specific_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ struct ocp_device *dev;
+ mv64x60_ocp_mpsc_data_t *mpsc_dp;
+ u8 save_exclude;
+ u32 val;
+
+ /* R#18 */
+ /* cpu read buffer to buffer 1 (reg 0x0448) */
+ mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
+
+ /* No longer errata so turn on */
+ /* Enable pci read/write combine, master write trigger,
+ * disable slave sync barrier
+ * readmultiple (reg 0x0c00 and 0x0c80)
+ */
+ if (si->pci_0.enable_bus) {
+ mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
+ ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
+ }
+
+ if (si->pci_1.enable_bus) {
+ mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
+ ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
+ }
+
+#if 1 /* XXXX */
+ /*
+ * Dave Wilhardt found that bit 4 in the PCI Command registers must
+ * be set if you are using cache coherency.
+ *
+ * Note: he also said that bit 4 must be on in all PCI devices but
+ * that has not been implemented yet.
+ */
+ save_exclude = mv64x60_pci_exclude_bridge;
+ mv64x60_pci_exclude_bridge = FALSE;
+
+ early_read_config_dword(bh->hose_a,
+ bh->hose_a->first_busno,
+ PCI_DEVFN(0,0),
+ PCI_COMMAND,
+ &val);
+ val |= PCI_COMMAND_INVALIDATE;
+ early_write_config_dword(bh->hose_a,
+ bh->hose_a->first_busno,
+ PCI_DEVFN(0,0),
+ PCI_COMMAND,
+ val);
+
+ early_read_config_dword(bh->hose_b,
+ bh->hose_b->first_busno,
+ PCI_DEVFN(0,0),
+ PCI_COMMAND,
+ &val);
+ val |= PCI_COMMAND_INVALIDATE;
+ early_write_config_dword(bh->hose_b,
+ bh->hose_b->first_busno,
+ PCI_DEVFN(0,0),
+ PCI_COMMAND,
+ val);
+
+ mv64x60_pci_exclude_bridge = save_exclude;
+#endif
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 0))
+ != NULL) {
+ mpsc_dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+ mpsc_dp->mirror_regs = 1;
+ mpsc_dp->cache_mgmt = 1;
+ }
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 1))
+ != NULL) {
+ mpsc_dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+ mpsc_dp->mirror_regs = 1;
+ mpsc_dp->cache_mgmt = 1;
+ }
+
+ return;
+}
+
+/*
+ * gt64260b_chip_specific_init()
+ *
+ * Implement errata work arounds for the GT64260B.
+ */
+static void
+gt64260b_chip_specific_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ struct ocp_device *dev;
+ mv64x60_ocp_mpsc_data_t *mpsc_dp;
+
+ /* R#18 */
+ /* cpu read buffer to buffer 1 (reg 0x0448) */
+ mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
+
+ /* No longer errata so turn on */
+ /* Enable pci read/write combine, master write trigger,
+ * disable slave sync barrier
+ * readmultiple (reg 0x0c00 and 0x0c80)
+ */
+ if (si->pci_0.enable_bus) {
+ mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
+ ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
+ }
+
+ if (si->pci_1.enable_bus) {
+ mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
+ ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
+ }
+
+ mv64x60_set_bits(bh, GT64260_CPU_WB_PRIORITY_BUFFER_DEPTH, 0xf);
+
+ /*
+ * The 64260B is not supposed to have the bug where the MPSC & ENET
+ * can't access cache coherent regions. However, testing has shown
+ * that the MPSC, at least, still has this bug.
+ */
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 0))
+ != NULL) {
+ mpsc_dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+ mpsc_dp->cache_mgmt = 1;
+ }
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 1))
+ != NULL) {
+ mpsc_dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+ mpsc_dp->cache_mgmt = 1;
+ }
+
+ return;
+}
+
+/*
+ *****************************************************************************
+ *
+ * MV64360-Specific Routines
+ *
+ *****************************************************************************
+ */
+/*
+ * mv64360_translate_size()
+ *
+ * On the MV64360, the size register is set similar to the size you get
+ * from a pci config space BAR register. That is, programmed from LSB to MSB
+ * as a sequence of 1's followed by a sequence of 0's. IOW, "size -1" with the
+ * assumption that the size is a power of 2.
+ */
+static u32 __init
+mv64360_translate_size(u32 base_addr, u32 size, u32 num_bits)
+{
+ return mv64x60_mask(size - 1, num_bits);
+}
+
+/*
+ * mv64360_untranslate_size()
+ *
+ * Translate the size register value of a window into a window size.
+ */
+static u32 __init
+mv64360_untranslate_size(u32 base_addr, u32 size, u32 num_bits)
+{
+ if (size > 0) {
+ size >>= (32 - num_bits);
+ size++;
+ size <<= (32 - num_bits);
+ }
+
+ return size;
+}
+
+/*
+ * mv64360_set_pci2mem_window()
+ *
+ * The PCI->MEM window registers are actually in PCI config space so need
+ * to set them by setting the correct config space BARs.
+ */
+static void __init
+mv64360_set_pci2mem_window(struct pci_controller *hose, u32 window, u32 base)
+{
+ struct {
+ u32 fcn;
+ u32 base_hi_bar;
+ u32 base_lo_bar;
+ } reg_addrs[] = {{ 0, 0x14, 0x10 }, { 0, 0x1c, 0x18 },
+ { 1, 0x14, 0x10 }, { 1, 0x1c, 0x18 }};
+
+ DBG("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
+ hose->index, base);
+
+ early_write_config_dword(hose, hose->first_busno,
+ PCI_DEVFN(0, reg_addrs[window].fcn),
+ reg_addrs[window].base_hi_bar, 0);
+ early_write_config_dword(hose, hose->first_busno,
+ PCI_DEVFN(0, reg_addrs[window].fcn),
+ reg_addrs[window].base_lo_bar,
+ mv64x60_mask(base, 20) | 0xc);
+ return;
+}
+
+/*
+ * mv64360_is_enabled_32bit()
+ *
+ * On a MV64360, a window is enabled by either clearing a bit in the
+ * CPU BAR Enable reg or setting a bit in the window's base reg.
+ * Note that this doesn't work for windows on the PCI slave side but we don't
+ * check those so its okay.
+ */
+static u32 __init
+mv64360_is_enabled_32bit(mv64x60_handle_t *bh, u32 window)
+{
+ u32 rc = 0;
+
+ if ((mv64360_32bit_windows[window].base_reg != 0) &&
+ (mv64360_32bit_windows[window].size_reg != 0)) {
+
+ if (mv64360_32bit_windows[window].extra & 0x80000000) {
+ rc = (mv64x60_read(bh,
+ mv64360_32bit_windows[window].base_reg) &
+ (1 << (mv64360_32bit_windows[window].extra &
+ 0xff))) != 0;
+ }
+ else {
+ rc = (mv64x60_read(bh, MV64360_CPU_BAR_ENABLE) &
+ (1 << mv64360_32bit_windows[window].extra)) ==0;
+ }
+ }
+
+ if (rc) {
+ DBG("32bit window %d is enabled\n", window);
+ }
+ else {
+ DBG("32bit window %d is disabled\n", window);
+ }
+
+ return rc;
+}
+
+/*
+ * mv64360_enable_window_32bit()
+ *
+ * On a MV64360, a window is enabled by either clearing a bit in the
+ * CPU BAR Enable reg or setting a bit in the window's base reg.
+ */
+static void __init
+mv64360_enable_window_32bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("enable 32bit window: %d\n", window);
+
+ if ((mv64360_32bit_windows[window].base_reg != 0) &&
+ (mv64360_32bit_windows[window].size_reg != 0)) {
+
+ if (mv64360_32bit_windows[window].extra & 0x80000000) {
+ mv64x60_set_bits(bh,
+ mv64360_32bit_windows[window].base_reg,
+ (1 << (mv64360_32bit_windows[window].extra &
+ 0xff)));
+ }
+ else {
+ mv64x60_clr_bits(bh, MV64360_CPU_BAR_ENABLE,
+ (1 << mv64360_32bit_windows[window].extra));
+ }
+ }
+
+ return;
+}
+
+/*
+ * mv64360_disable_window_32bit()
+ *
+ * On a MV64360, a window is disabled by either setting a bit in the
+ * CPU BAR Enable reg or clearing a bit in the window's base reg.
+ */
+static void __init
+mv64360_disable_window_32bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
+ window, mv64360_32bit_windows[window].base_reg,
+ mv64360_32bit_windows[window].size_reg);
+
+ if ((mv64360_32bit_windows[window].base_reg != 0) &&
+ (mv64360_32bit_windows[window].size_reg != 0)) {
+
+ if (mv64360_32bit_windows[window].extra & 0x80000000) {
+ mv64x60_clr_bits(bh,
+ mv64360_32bit_windows[window].base_reg,
+ (1 << (mv64360_32bit_windows[window].extra &
+ 0xff)));
+ }
+ else {
+ mv64x60_set_bits(bh, MV64360_CPU_BAR_ENABLE,
+ (1 << mv64360_32bit_windows[window].extra));
+ }
+ }
+
+ return;
+}
+
+/*
+ * mv64360_enable_window_64bit()
+ *
+ * On the MV64360, a 64-bit window is enabled by setting a bit in the window's
+ * base reg.
+ */
+static void __init
+mv64360_enable_window_64bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("enable 64bit window: %d\n", window);
+
+ /* For 64360, 'extra' field holds bit that enables the window */
+ if ((mv64360_64bit_windows[window].base_lo_reg!= 0) &&
+ (mv64360_64bit_windows[window].size_reg != 0)) {
+
+ if (mv64360_64bit_windows[window].extra & 0x80000000) {
+ mv64x60_set_bits(bh,
+ mv64360_64bit_windows[window].base_lo_reg,
+ (1 << (mv64360_64bit_windows[window].extra &
+ 0xff)));
+ } /* Should be no 'else' ones */
+ }
+
+ return;
+}
+
+/*
+ * mv64360_disable_window_64bit()
+ *
+ * On a MV64360, a 64-bit window is disabled by clearing a bit in the window's
+ * base reg.
+ */
+static void __init
+mv64360_disable_window_64bit(mv64x60_handle_t *bh, u32 window)
+{
+ DBG("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
+ window, mv64360_64bit_windows[window].base_lo_reg,
+ mv64360_64bit_windows[window].size_reg);
+
+ if ((mv64360_64bit_windows[window].base_lo_reg != 0) &&
+ (mv64360_64bit_windows[window].size_reg != 0)) {
+
+ if (mv64360_64bit_windows[window].extra & 0x80000000) {
+ mv64x60_clr_bits(bh,
+ mv64360_64bit_windows[window].base_lo_reg,
+ (1 << (mv64360_64bit_windows[window].extra &
+ 0xff)));
+ } /* Should be no 'else' ones */
+ }
+
+ return;
+}
+
+/*
+ * mv64360_disable_all_windows()
+ *
+ * The MV64360 has a few windows that aren't represented in the table of
+ * windows at the top of this file. This routine turns all of them off
+ * except for the memory controller windows, of course.
+ */
+static void __init
+mv64360_disable_all_windows(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ u32 i;
+
+ /* Disable 32bit windows (don't disable cpu->mem windows) */
+ for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
+ if (!(si->window_preserve_mask_32 & (1<<i)))
+ mv64360_disable_window_32bit(bh, i);
+ }
+
+ /* Disable 64bit windows */
+ for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++) {
+ if (!(si->window_preserve_mask_64 & (1<<i)))
+ mv64360_disable_window_64bit(bh, i);
+ }
+
+ /* Turn off PCI->MEM access cntl wins not in mv64360_64bit_windows[] */
+ mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0);
+ mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0);
+ mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0);
+ mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0);
+
+ /* Disable all PCI-><whatever> windows */
+ mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x0000f9ff);
+ mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x0000f9ff);
+
+ return;
+}
+
+/*
+ * mv64360_chip_specific_init()
+ *
+ * No errata work arounds for the MV64360 implemented at this point.
+ */
+static void
+mv64360_chip_specific_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ struct ocp_device *dev;
+ mv64x60_ocp_mpsc_data_t *mpsc_dp;
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 0))
+ != NULL) {
+ mpsc_dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+ mpsc_dp->brg_can_tune = 1;
+ }
+
+ if ((dev = ocp_find_device(OCP_VENDOR_MARVELL, OCP_FUNC_MPSC, 1))
+ != NULL) {
+ mpsc_dp = (mv64x60_ocp_mpsc_data_t *)dev->def->additions;
+ mpsc_dp->brg_can_tune = 1;
+ }
+
+ return;
+}
+
+/*
+ * mv64460_chip_specific_init()
+ *
+ * No errata work arounds for the MV64460 implemented at this point.
+ */
+static void
+mv64460_chip_specific_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si)
+{
+ mv64360_chip_specific_init(bh, si); /* XXXX check errata */
+ return;
+}
--- /dev/null
+/*
+ * arch/ppc/syslib/mv64x60_ocp.c
+ *
+ * Common OCP definitions for the Marvell GT64260/MV64360/MV64460/...
+ * line of host bridges.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/mv64x60.h>
+#include <asm/ocp.h>
+
+static mv64x60_ocp_mpsc_data_t mv64x60_ocp_mpsc0_def = {
+ .mirror_regs = 0,
+ .cache_mgmt = 0,
+ .max_idle = 0,
+ .default_baud = 9600,
+ .default_bits = 8,
+ .default_parity = 'n',
+ .default_flow = 'n',
+ .chr_1_val = 0x00000000,
+ .chr_2_val = 0x00000000,
+ .chr_10_val = 0x00000003,
+ .mpcr_val = 0,
+ .mrr_val = 0x3ffffe38,
+ .rcrr_val = 0,
+ .tcrr_val = 0,
+ .intr_mask_val = 0,
+ .bcr_val = 0,
+ .sdma_irq = MV64x60_IRQ_SDMA_0,
+ .brg_can_tune = 0,
+ .brg_clk_src = 8, /* Default to TCLK */
+ .brg_clk_freq = 100000000, /* Default to 100 MHz */
+};
+static mv64x60_ocp_mpsc_data_t mv64x60_ocp_mpsc1_def = {
+ .mirror_regs = 0,
+ .cache_mgmt = 0,
+ .max_idle = 0,
+ .default_baud = 9600,
+ .default_bits = 8,
+ .default_parity = 'n',
+ .default_flow = 'n',
+ .chr_1_val = 0x00000000,
+ .chr_1_val = 0x00000000,
+ .chr_2_val = 0x00000000,
+ .chr_10_val = 0x00000003,
+ .mpcr_val = 0,
+ .mrr_val = 0x3ffffe38,
+ .rcrr_val = 0,
+ .tcrr_val = 0,
+ .intr_mask_val = 0,
+ .bcr_val = 0,
+ .sdma_irq = MV64x60_IRQ_SDMA_1,
+ .brg_can_tune = 0,
+ .brg_clk_src = 8, /* Default to TCLK */
+ .brg_clk_freq = 100000000, /* Default to 100 MHz */
+};
+MV64x60_OCP_SYSFS_MPSC_DATA()
+
+struct ocp_def core_ocp[] = {
+ /* Base address for the block of bridge's regs */
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x00 */
+ .function = OCP_FUNC_HB,
+ .index = 0,
+ .paddr = 0,
+ .pm = OCP_CPM_NA,
+ },
+ /* 10/100 Ethernet controller */
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x01 */
+ .function = OCP_FUNC_EMAC,
+ .index = 0,
+ .paddr = GT64260_ENET_0_OFFSET,
+ .irq = MV64x60_IRQ_ETH_0,
+ .pm = OCP_CPM_NA,
+ },
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x02 */
+ .function = OCP_FUNC_EMAC,
+ .index = 1,
+ .paddr = GT64260_ENET_1_OFFSET,
+ .irq = MV64x60_IRQ_ETH_1,
+ .pm = OCP_CPM_NA,
+ },
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x03 */
+ .function = OCP_FUNC_EMAC,
+ .index = 2,
+ .paddr = GT64260_ENET_2_OFFSET,
+ .irq = MV64x60_IRQ_ETH_2,
+ .pm = OCP_CPM_NA,
+ },
+ /* Multi-Protocol Serial Controller (MPSC) */
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x04 */
+ .function = OCP_FUNC_MPSC,
+ .index = 0,
+ .paddr = MV64x60_MPSC_0_OFFSET,
+ .irq = MV64x60_IRQ_MPSC_0,
+ .pm = OCP_CPM_NA,
+ .additions = &mv64x60_ocp_mpsc0_def,
+ .show = &mv64x60_ocp_show_mpsc
+ },
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x05 */
+ .function = OCP_FUNC_MPSC,
+ .index = 1,
+ .paddr = MV64x60_MPSC_1_OFFSET,
+ .irq = MV64x60_IRQ_MPSC_1,
+ .pm = OCP_CPM_NA,
+ .additions = &mv64x60_ocp_mpsc1_def,
+ .show = &mv64x60_ocp_show_mpsc
+ },
+ /* Inter-Integrated Circuit Controller */
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x06 */
+ .function = OCP_FUNC_I2C,
+ .index = 0,
+ .paddr = GT64260_I2C_OFFSET,
+ .irq = MV64x60_IRQ_I2C,
+ .pm = OCP_CPM_NA,
+ },
+ /* Programmable Interrupt Controller */
+ { .vendor = OCP_VENDOR_MARVELL, /* 0x07 */
+ .function = OCP_FUNC_PIC,
+ .index = 0,
+ .paddr = GT64260_IC_OFFSET,
+ .pm = OCP_CPM_NA,
+ },
+ { .vendor = OCP_VENDOR_INVALID
+ }
+};
* Externally called, however, it takes an IPI number (0...OPENPIC_NUM_IPI)
* and not a system-wide interrupt number
*/
-void openpic_cause_IPI(u_int ipi, cpumask_t cpumask)
+void openpic_cause_IPI(u_int ipi, u_int cpumask)
{
- cpumask_t phys;
DECL_THIS_CPU;
CHECK_THIS_CPU;
check_arg_ipi(ipi);
- phys = physmask(cpumask);
openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
- cpus_addr(physmask(cpumask))[0]);
+ physmask(cpumask));
}
void openpic_request_IPIs(void)
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset,
openpic_ipi_action, SA_INTERRUPT,
- "IPI0 (call function)", NULL);
+ "IPI0 (call function)", 0);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+1,
openpic_ipi_action, SA_INTERRUPT,
- "IPI1 (reschedule)", NULL);
+ "IPI1 (reschedule)", 0);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+2,
openpic_ipi_action, SA_INTERRUPT,
- "IPI2 (invalidate tlb)", NULL);
+ "IPI2 (invalidate tlb)", 0);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+3,
openpic_ipi_action, SA_INTERRUPT,
- "IPI3 (xmon break)", NULL);
+ "IPI3 (xmon break)", 0);
for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
openpic_enable_ipi(OPENPIC_VEC_IPI+open_pic_irq_offset+i);
spin_lock(&openpic_setup_lock);
#ifdef CONFIG_IRQ_ALL_CPUS
- cpu_set(smp_hw_index[smp_processor_id()], msk);
+ cpu_set(smp_hw_index[smp_processor_id()], mask);
/* let the openpic know we want intrs. default affinity
* is 0xffffffff until changed via /proc
void
smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
{
- cpumask_t mask = CPU_MASK_ALL;
/* make sure we're sending something that translates to an IPI */
if (msg > 0x3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
}
switch (target) {
case MSG_ALL:
- openpic_cause_IPI(msg, mask);
+ openpic_cause_IPI(msg, 0xffffffff);
break;
case MSG_ALL_BUT_SELF:
- cpu_clear(smp_processor_id(), mask);
- openpic_cause_IPI(msg, mask);
+ openpic_cause_IPI(msg,
+ 0xffffffff & ~(1 << smp_processor_id()));
break;
default:
- openpic_cause_IPI(msg, cpumask_of_cpu(target));
+ openpic_cause_IPI(msg, 1<<target);
break;
}
}
/*
- * arch/ppc/kernel/ppc4xx_dma.c
+ * Author: Pete Popov <ppopov@mvista.com> or source@mvista.com
*
- * IBM PPC4xx DMA engine core library
+ * arch/ppc/kernel/ppc405_dma.c
*
- * Copyright 2000-2004 MontaVista Software Inc.
+ * 2000 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
*
- * Cleaned up and converted to new DCR access
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * Original code by Armin Kuster <akuster@mvista.com>
- * and Pete Popov <ppopov@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
+ * IBM 405 DMA Controller Functions
*/
#include <linux/config.h>
#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/io.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ppc4xx_dma.h>
-
-ppc_dma_ch_t dma_channels[MAX_PPC4xx_DMA_CHANNELS];
-
-int
-ppc4xx_get_dma_status(void)
-{
- return (mfdcr(DCRN_DMASR));
-}
-
-void
-ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("set_src_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef PPC4xx_DMA64BIT
- mtdcr(DCRN_DMASAH0 + dmanr*2, (u32)(src_addr >> 32));
-#else
- mtdcr(DCRN_DMASA0 + dmanr*2, (u32)src_addr);
-#endif
-}
-
-void
-ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("set_dst_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef PPC4xx_DMA64BIT
- mtdcr(DCRN_DMADAH0 + dmanr*2, (u32)(dst_addr >> 32));
-#else
- mtdcr(DCRN_DMADA0 + dmanr*2, (u32)dst_addr);
-#endif
-}
-
-void
-ppc4xx_enable_dma(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
- DMA_CS1 | DMA_TS1 | DMA_CH1_ERR,
- DMA_CS2 | DMA_TS2 | DMA_CH2_ERR,
- DMA_CS3 | DMA_TS3 | DMA_CH3_ERR};
-
- if (p_dma_ch->in_use) {
- printk("enable_dma: channel %d in use\n", dmanr);
- return;
- }
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("enable_dma: bad channel: %d\n", dmanr);
- return;
- }
-
- if (p_dma_ch->mode == DMA_MODE_READ) {
- /* peripheral to memory */
- ppc4xx_set_src_addr(dmanr, 0);
- ppc4xx_set_dst_addr(dmanr, p_dma_ch->addr);
- } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
- /* memory to peripheral */
- ppc4xx_set_src_addr(dmanr, p_dma_ch->addr);
- ppc4xx_set_dst_addr(dmanr, 0);
- }
-
- /* for other xfer modes, the addresses are already set */
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
-
- control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
- if (p_dma_ch->mode == DMA_MODE_MM) {
- /* software initiated memory to memory */
- control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
- }
-
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- /*
- * Clear the CS, TS, RI bits for the channel from DMASR. This
- * has been observed to happen correctly only after the mode and
- * ETD/DCE bits in DMACRx are set above. Must do this before
- * enabling the channel.
- */
-
- mtdcr(DCRN_DMASR, status_bits[dmanr]);
-
- /*
- * For device-paced transfers, Terminal Count Enable apparently
- * must be on, and this must be turned on after the mode, etc.
- * bits are cleared above (at least on Redwood-6).
- */
-
- if ((p_dma_ch->mode == DMA_MODE_MM_DEVATDST) ||
- (p_dma_ch->mode == DMA_MODE_MM_DEVATSRC))
- control |= DMA_TCE_ENABLE;
-
- /*
- * Now enable the channel.
- */
-
- control |= (p_dma_ch->mode | DMA_CE_ENABLE);
-
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- p_dma_ch->in_use = 1;
-}
-
-void
-ppc4xx_disable_dma(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (!p_dma_ch->in_use) {
- printk("disable_dma: channel %d not in use\n", dmanr);
- return;
- }
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("disable_dma: bad channel: %d\n", dmanr);
- return;
- }
-
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control &= ~DMA_CE_ENABLE;
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- p_dma_ch->in_use = 0;
-}
-
-/*
- * Sets the dma mode for single DMA transfers only.
- * For scatter/gather transfers, the mode is passed to the
- * alloc_dma_handle() function as one of the parameters.
- *
- * The mode is simply saved and used later. This allows
- * the driver to call set_dma_mode() and set_dma_addr() in
- * any order.
- *
- * Valid mode values are:
- *
- * DMA_MODE_READ peripheral to memory
- * DMA_MODE_WRITE memory to peripheral
- * DMA_MODE_MM memory to memory
- * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
- * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
- */
-int
-ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode)
-{
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("set_dma_mode: bad channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->mode = mode;
-
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Sets the DMA Count register. Note that 'count' is in bytes.
- * However, the DMA Count register counts the number of "transfers",
- * where each transfer is equal to the bus width. Thus, count
- * MUST be a multiple of the bus width.
- */
-void
-ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count)
-{
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
-#ifdef DEBUG_4xxDMA
- {
- int error = 0;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if (count & 0x1)
- error = 1;
- break;
- case PW_32:
- if (count & 0x3)
- error = 1;
- break;
- case PW_64:
- if (count & 0x7)
- error = 1;
- break;
- default:
- printk("set_dma_count: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return;
- }
- if (error)
- printk
- ("Warning: set_dma_count count 0x%x bus width %d\n",
- count, p_dma_ch->pwidth);
- }
-#endif
+#include <asm/ppc405_dma.h>
- count = count >> p_dma_ch->shift;
-
- mtdcr(DCRN_DMACT0 + (dmanr * 0x8), count);
-}
/*
- * Returns the number of bytes left to be transfered.
- * After a DMA transfer, this should return zero.
- * Reading this while a DMA transfer is still in progress will return
- * unpredictable results.
+ * Function prototypes
*/
-int
-ppc4xx_get_dma_residue(unsigned int dmanr)
-{
- unsigned int count;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_get_dma_residue: bad channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+int hw_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+int init_dma_channel(unsigned int);
+int get_channel_config(unsigned int, ppc_dma_ch_t *);
+int set_channel_priority(unsigned int, unsigned int);
+unsigned int get_peripheral_width(unsigned int);
+int alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+void free_dma_handle(sgl_handle_t);
- count = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
- return (count << p_dma_ch->shift);
-}
-
-/*
- * Sets the DMA address for a memory to peripheral or peripheral
- * to memory transfer. The address is just saved in the channel
- * structure for now and used later in enable_dma().
- */
-void
-ppc4xx_set_dma_addr(unsigned int dmanr, phys_addr_t addr)
-{
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_dma_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef DEBUG_4xxDMA
- {
- int error = 0;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if ((unsigned) addr & 0x1)
- error = 1;
- break;
- case PW_32:
- if ((unsigned) addr & 0x3)
- error = 1;
- break;
- case PW_64:
- if ((unsigned) addr & 0x7)
- error = 1;
- break;
- default:
- printk("ppc4xx_set_dma_addr: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return;
- }
- if (error)
- printk("Warning: ppc4xx_set_dma_addr addr 0x%x bus width %d\n",
- addr, p_dma_ch->pwidth);
- }
-#endif
-
- /* save dma address and program it later after we know the xfer mode */
- p_dma_ch->addr = addr;
-}
-
-/*
- * Sets both DMA addresses for a memory to memory transfer.
- * For memory to peripheral or peripheral to memory transfers
- * the function set_dma_addr() should be used instead.
- */
-void
-ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
- phys_addr_t dst_dma_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_dma_addr2: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef DEBUG_4xxDMA
- {
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- int error = 0;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if (((unsigned) src_dma_addr & 0x1) ||
- ((unsigned) dst_dma_addr & 0x1)
- )
- error = 1;
- break;
- case PW_32:
- if (((unsigned) src_dma_addr & 0x3) ||
- ((unsigned) dst_dma_addr & 0x3)
- )
- error = 1;
- break;
- case PW_64:
- if (((unsigned) src_dma_addr & 0x7) ||
- ((unsigned) dst_dma_addr & 0x7)
- )
- error = 1;
- break;
- default:
- printk("ppc4xx_set_dma_addr2: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return;
- }
- if (error)
- printk
- ("Warning: ppc4xx_set_dma_addr2 src 0x%x dst 0x%x bus width %d\n",
- src_dma_addr, dst_dma_addr, p_dma_ch->pwidth);
- }
-#endif
-
- ppc4xx_set_src_addr(dmanr, src_dma_addr);
- ppc4xx_set_dst_addr(dmanr, dst_dma_addr);
-}
-
-/*
- * Enables the channel interrupt.
- *
- * If performing a scatter/gatter transfer, this function
- * MUST be called before calling alloc_dma_handle() and building
- * the sgl list. Otherwise, interrupts will not be enabled, if
- * they were previously disabled.
- */
-int
-ppc4xx_enable_dma_interrupt(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_enable_dma_interrupt: bad channel: %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->int_enable = 1;
-
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Disables the channel interrupt.
- *
- * If performing a scatter/gatter transfer, this function
- * MUST be called before calling alloc_dma_handle() and building
- * the sgl list. Otherwise, interrupts will not be disabled, if
- * they were previously enabled.
- */
-int
-ppc4xx_disable_dma_interrupt(unsigned int dmanr)
-{
- unsigned int control;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_disable_dma_interrupt: bad channel: %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->int_enable = 0;
-
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- return DMA_STATUS_GOOD;
-}
+ppc_dma_ch_t dma_channels[MAX_405GP_DMA_CHANNELS];
/*
* Configures a DMA channel, including the peripheral bus width, if a
* called from platform specific init code. The driver should not need to
* call this function.
*/
-int
-ppc4xx_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t * p_init)
+int hw_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t *p_init)
{
- unsigned int polarity;
- uint32_t control = 0;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
- DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
- DMA_MODE_WRITE = 0; /* Memory to Peripheral */
-
- if (!p_init) {
- printk("ppc4xx_init_dma_channel: NULL p_init\n");
- return DMA_STATUS_NULL_POINTER;
- }
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_init_dma_channel: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ if (!p_init) {
+ printk("hw_init_dma_channel: NULL p_init\n");
+ return DMA_STATUS_NULL_POINTER;
+ }
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("hw_init_dma_channel: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
#if DCRN_POL > 0
- polarity = mfdcr(DCRN_POL);
+ polarity = mfdcr(DCRN_POL);
#else
- polarity = 0;
+ polarity = 0;
#endif
- /* Setup the control register based on the values passed to
- * us in p_init. Then, over-write the control register with this
- * new value.
- */
- control |= SET_DMA_CONTROL;
-
- /* clear all polarity signals and then "or" in new signal levels */
- polarity &= ~GET_DMA_POLARITY(dmanr);
- polarity |= p_dma_ch->polarity;
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+
+ control |= (
+ SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */
+ SET_DMA_PL(p_init->pl) | /* peripheral location */
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */
+ );
+
+ switch (dmanr) {
+ case 0:
+ /* clear all polarity signals and then "or" in new signal levels */
+ polarity &= ~(DMAReq0_ActiveLow | DMAAck0_ActiveLow | EOT0_ActiveLow);
+ polarity |= p_dma_ch->polarity;
#if DCRN_POL > 0
- mtdcr(DCRN_POL, polarity);
+ mtdcr(DCRN_POL, polarity);
#endif
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
-
- /* save these values in our dma channel structure */
- memcpy(p_dma_ch, p_init, sizeof (ppc_dma_ch_t));
-
- /*
- * The peripheral width values written in the control register are:
- * PW_8 0
- * PW_16 1
- * PW_32 2
- * PW_64 3
- *
- * Since the DMA count register takes the number of "transfers",
- * we need to divide the count sent to us in certain
- * functions by the appropriate number. It so happens that our
- * right shift value is equal to the peripheral width value.
- */
- p_dma_ch->shift = p_init->pwidth;
-
- /*
- * Save the control word for easy access.
- */
- p_dma_ch->control = control;
-
- mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
- return DMA_STATUS_GOOD;
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ polarity &= ~(DMAReq1_ActiveLow | DMAAck1_ActiveLow | EOT1_ActiveLow);
+ polarity |= p_dma_ch->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ polarity &= ~(DMAReq2_ActiveLow | DMAAck2_ActiveLow | EOT2_ActiveLow);
+ polarity |= p_dma_ch->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ polarity &= ~(DMAReq3_ActiveLow | DMAAck3_ActiveLow | EOT3_ActiveLow);
+ polarity |= p_dma_ch->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ /* save these values in our dma channel structure */
+ memcpy(p_dma_ch, p_init, sizeof(ppc_dma_ch_t));
+
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ p_dma_ch->shift = p_init->pwidth;
+
+ /*
+ * Save the control word for easy access.
+ */
+ p_dma_ch->control = control;
+
+ mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
+ return DMA_STATUS_GOOD;
}
+
+
+
/*
* This function returns the channel configuration.
*/
-int
-ppc4xx_get_channel_config(unsigned int dmanr, ppc_dma_ch_t * p_dma_ch)
+int get_channel_config(unsigned int dmanr, ppc_dma_ch_t *p_dma_ch)
{
- unsigned int polarity;
- unsigned int control;
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_get_channel_config: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+ unsigned int polarity;
+ unsigned int control;
#if DCRN_POL > 0
- polarity = mfdcr(DCRN_POL);
+ polarity = mfdcr(DCRN_POL);
#else
- polarity = 0;
+ polarity = 0;
#endif
- p_dma_ch->polarity = polarity & GET_DMA_POLARITY(dmanr);
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
-
- p_dma_ch->cp = GET_DMA_PRIORITY(control);
- p_dma_ch->pwidth = GET_DMA_PW(control);
- p_dma_ch->psc = GET_DMA_PSC(control);
- p_dma_ch->pwc = GET_DMA_PWC(control);
- p_dma_ch->phc = GET_DMA_PHC(control);
- p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
- p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
- p_dma_ch->shift = GET_DMA_PW(control);
-
-#ifdef CONFIG_PPC4xx_EDMA
- p_dma_ch->pf = GET_DMA_PREFETCH(control);
-#else
- p_dma_ch->ch_enable = GET_DMA_CH(control);
- p_dma_ch->ece_enable = GET_DMA_ECE(control);
- p_dma_ch->tcd_disable = GET_DMA_TCD(control);
-#endif
- return DMA_STATUS_GOOD;
+ switch (dmanr) {
+ case 0:
+ p_dma_ch->polarity =
+ polarity & (DMAReq0_ActiveLow | DMAAck0_ActiveLow | EOT0_ActiveLow);
+ control = mfdcr(DCRN_DMACR0);
+ break;
+ case 1:
+ p_dma_ch->polarity =
+ polarity & (DMAReq1_ActiveLow | DMAAck1_ActiveLow | EOT1_ActiveLow);
+ control = mfdcr(DCRN_DMACR1);
+ break;
+ case 2:
+ p_dma_ch->polarity =
+ polarity & (DMAReq2_ActiveLow | DMAAck2_ActiveLow | EOT2_ActiveLow);
+ control = mfdcr(DCRN_DMACR2);
+ break;
+ case 3:
+ p_dma_ch->polarity =
+ polarity & (DMAReq3_ActiveLow | DMAAck3_ActiveLow | EOT3_ActiveLow);
+ control = mfdcr(DCRN_DMACR3);
+ break;
+ default:
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+
+ return DMA_STATUS_GOOD;
}
/*
* PRIORITY_HIGH
*
*/
-int
-ppc4xx_set_channel_priority(unsigned int dmanr, unsigned int priority)
+int set_channel_priority(unsigned int dmanr, unsigned int priority)
{
- unsigned int control;
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_channel_priority: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
+ unsigned int control;
+
+#ifdef DEBUG_405DMA
+ if ( (priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) &&
+ (priority != PRIORITY_HIGH)) {
+ printk("set_channel_priority: bad priority: 0x%x\n", priority);
+ }
+#endif
- if ((priority != PRIORITY_LOW) &&
- (priority != PRIORITY_MID_LOW) &&
- (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
- printk("ppc4xx_set_channel_priority: bad priority: 0x%x\n", priority);
- }
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control|= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("set_channel_priority: bad channel: %d\n", dmanr);
+#endif
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- control |= SET_DMA_PRIORITY(priority);
- mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
- return DMA_STATUS_GOOD;
-}
/*
* Returns the width of the peripheral attached to this channel. This assumes
*
* The function returns 0 on error.
*/
-unsigned int
-ppc4xx_get_peripheral_width(unsigned int dmanr)
+unsigned int get_peripheral_width(unsigned int dmanr)
{
- unsigned int control;
+ unsigned int control;
+
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("get_peripheral_width: bad channel: %d\n", dmanr);
+#endif
+ return 0;
+ }
+ return(GET_DMA_PW(control));
+}
+
+
+
+
+/*
+ * Create a scatter/gather list handle. This is simply a structure which
+ * describes a scatter/gather list.
+ *
+ * A handle is returned in "handle" which the driver should save in order to
+ * be able to access this list later. A chunk of memory will be allocated
+ * to be used by the API for internal management purposes, including managing
+ * the sg list and allocating memory for the sgl descriptors. One page should
+ * be more than enough for that purpose. Perhaps it's a bit wasteful to use
+ * a whole page for a single sg list, but most likely there will be only one
+ * sg list per channel.
+ *
+ * Interrupt notes:
+ * Each sgl descriptor has a copy of the DMA control word which the DMA engine
+ * loads in the control register. The control word has a "global" interrupt
+ * enable bit for that channel. Interrupts are further qualified by a few bits
+ * in the sgl descriptor count register. In order to setup an sgl, we have to
+ * know ahead of time whether or not interrupts will be enabled at the completion
+ * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
+ * be called before calling alloc_dma_handle(). If the interrupt mode will never
+ * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt()
+ * do not have to be called -- interrupts will be enabled or disabled based
+ * on how the channel was configured after powerup by the hw_init_dma_channel()
+ * function. Each sgl descriptor will be setup to interrupt if an error occurs;
+ * however, only the last descriptor will be setup to interrupt. Thus, an
+ * interrupt will occur (if interrupts are enabled) only after the complete
+ * sgl transfer is done.
+ */
+int alloc_dma_handle(sgl_handle_t *phandle, unsigned int mode, unsigned int dmanr)
+{
+ sgl_list_info_t *psgl;
+ dma_addr_t dma_addr;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ uint32_t sg_command;
+ void *ret;
+
+#ifdef DEBUG_405DMA
+ if (!phandle) {
+ printk("alloc_dma_handle: null handle pointer\n");
+ return DMA_STATUS_NULL_POINTER;
+ }
+ switch (mode) {
+ case DMA_MODE_READ:
+ case DMA_MODE_WRITE:
+ case DMA_MODE_MM:
+ case DMA_MODE_MM_DEVATSRC:
+ case DMA_MODE_MM_DEVATDST:
+ break;
+ default:
+ printk("alloc_dma_handle: bad mode 0x%x\n", mode);
+ return DMA_STATUS_BAD_MODE;
+ }
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("alloc_dma_handle: invalid channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_get_peripheral_width: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
+ /* Get a page of memory, which is zeroed out by pci_alloc_consistent() */
+
+/* wrong not a pci device - armin */
+ /* psgl = (sgl_list_info_t *) pci_alloc_consistent(NULL, SGL_LIST_SIZE, &dma_addr);
+*/
+
+ ret = consistent_alloc(GFP_ATOMIC |GFP_DMA, SGL_LIST_SIZE, &dma_addr);
+ if (ret != NULL) {
+ memset(ret, 0,SGL_LIST_SIZE );
+ psgl = (sgl_list_info_t *) ret;
}
- control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- return (GET_DMA_PW(control));
+ if (psgl == NULL) {
+ *phandle = (sgl_handle_t)NULL;
+ return DMA_STATUS_OUT_OF_MEMORY;
+ }
+
+ psgl->dma_addr = dma_addr;
+ psgl->dmanr = dmanr;
+
+ /*
+ * Modify and save the control word. These word will get written to each sgl
+ * descriptor. The DMA engine then loads this control word into the control
+ * register every time it reads a new descriptor.
+ */
+ psgl->control = p_dma_ch->control;
+ psgl->control &= ~(DMA_TM_MASK | DMA_TD); /* clear all "mode" bits first */
+ psgl->control |= (mode | DMA_CH_ENABLE); /* save the control word along with the mode */
+
+ if (p_dma_ch->int_enable) {
+ psgl->control |= DMA_CIE_ENABLE; /* channel interrupt enabled */
+ }
+ else {
+ psgl->control &= ~DMA_CIE_ENABLE;
+ }
+
+#if DCRN_ASGC > 0
+ sg_command = mfdcr(DCRN_ASGC);
+ switch (dmanr) {
+ case 0:
+ sg_command |= SSG0_MASK_ENABLE;
+ break;
+ case 1:
+ sg_command |= SSG1_MASK_ENABLE;
+ break;
+ case 2:
+ sg_command |= SSG2_MASK_ENABLE;
+ break;
+ case 3:
+ sg_command |= SSG3_MASK_ENABLE;
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("alloc_dma_handle: bad channel: %d\n", dmanr);
+#endif
+ free_dma_handle((sgl_handle_t)psgl);
+ *phandle = (sgl_handle_t)NULL;
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ mtdcr(DCRN_ASGC, sg_command); /* enable writing to this channel's sgl control bits */
+#else
+ (void)sg_command;
+#endif
+ psgl->sgl_control = SG_ERI_ENABLE | SG_LINK; /* sgl descriptor control bits */
+
+ if (p_dma_ch->int_enable) {
+ if (p_dma_ch->tce_enable)
+ psgl->sgl_control |= SG_TCI_ENABLE;
+ else
+ psgl->sgl_control |= SG_ETI_ENABLE;
+ }
+
+ *phandle = (sgl_handle_t)psgl;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
+ * The list must be empty (contain no elements).
+ */
+void free_dma_handle(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+
+ if (!handle) {
+#ifdef DEBUG_405DMA
+ printk("free_dma_handle: got NULL\n");
+#endif
+ return;
+ }
+ else if (psgl->phead) {
+#ifdef DEBUG_405DMA
+ printk("free_dma_handle: list not empty\n");
+#endif
+ return;
+ }
+ else if (!psgl->dma_addr) { /* should never happen */
+#ifdef DEBUG_405DMA
+ printk("free_dma_handle: no dma address\n");
+#endif
+ return;
+ }
+
+ /* wrong not a PCI device -armin */
+ /* pci_free_consistent(NULL, SGL_LIST_SIZE, (void *)psgl, psgl->dma_addr); */
+ // free_pages((unsigned long)psgl, get_order(SGL_LIST_SIZE));
+ consistent_free((void *)psgl);
+
+
}
-EXPORT_SYMBOL(ppc4xx_init_dma_channel);
-EXPORT_SYMBOL(ppc4xx_get_channel_config);
-EXPORT_SYMBOL(ppc4xx_set_channel_priority);
-EXPORT_SYMBOL(ppc4xx_get_peripheral_width);
+EXPORT_SYMBOL(hw_init_dma_channel);
+EXPORT_SYMBOL(get_channel_config);
+EXPORT_SYMBOL(set_channel_priority);
+EXPORT_SYMBOL(get_peripheral_width);
+EXPORT_SYMBOL(alloc_dma_handle);
+EXPORT_SYMBOL(free_dma_handle);
EXPORT_SYMBOL(dma_channels);
-EXPORT_SYMBOL(ppc4xx_set_src_addr);
-EXPORT_SYMBOL(ppc4xx_set_dst_addr);
-EXPORT_SYMBOL(ppc4xx_set_dma_addr);
-EXPORT_SYMBOL(ppc4xx_set_dma_addr2);
-EXPORT_SYMBOL(ppc4xx_enable_dma);
-EXPORT_SYMBOL(ppc4xx_disable_dma);
-EXPORT_SYMBOL(ppc4xx_set_dma_mode);
-EXPORT_SYMBOL(ppc4xx_set_dma_count);
-EXPORT_SYMBOL(ppc4xx_get_dma_residue);
-EXPORT_SYMBOL(ppc4xx_enable_dma_interrupt);
-EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt);
-EXPORT_SYMBOL(ppc4xx_get_dma_status);
+++ /dev/null
-/*
- * arch/ppc/kernel/ppc4xx_sgdma.c
- *
- * IBM PPC4xx DMA engine scatter/gather library
- *
- * Copyright 2002-2003 MontaVista Software Inc.
- *
- * Cleaned up and converted to new DCR access
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * Original code by Armin Kuster <akuster@mvista.com>
- * and Pete Popov <ppopov@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/ppc4xx_dma.h>
-
-void
-ppc4xx_set_sg_addr(int dmanr, phys_addr_t sg_addr)
-{
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_set_sg_addr: bad channel: %d\n", dmanr);
- return;
- }
-
-#ifdef PPC4xx_DMA_64BIT
- mtdcr(DCRN_ASGH0 + (dmanr * 0x8), (u32)(sg_addr >> 32));
-#endif
- mtdcr(DCRN_ASG0 + (dmanr * 0x8), (u32)sg_addr);
-}
-
-/*
- * Add a new sgl descriptor to the end of a scatter/gather list
- * which was created by alloc_dma_handle().
- *
- * For a memory to memory transfer, both dma addresses must be
- * valid. For a peripheral to memory transfer, one of the addresses
- * must be set to NULL, depending on the direction of the transfer:
- * memory to peripheral: set dst_addr to NULL,
- * peripheral to memory: set src_addr to NULL.
- */
-int
-ppc4xx_add_dma_sgl(sgl_handle_t handle, phys_addr_t src_addr, phys_addr_t dst_addr,
- unsigned int count)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- ppc_dma_ch_t *p_dma_ch;
-
- if (!handle) {
- printk("ppc4xx_add_dma_sgl: null handle\n");
- return DMA_STATUS_BAD_HANDLE;
- }
-
- if (psgl->dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_add_dma_sgl: bad channel: %d\n", psgl->dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch = &dma_channels[psgl->dmanr];
-
-#ifdef DEBUG_4xxDMA
- {
- int error = 0;
- unsigned int aligned =
- (unsigned) src_addr | (unsigned) dst_addr | count;
- switch (p_dma_ch->pwidth) {
- case PW_8:
- break;
- case PW_16:
- if (aligned & 0x1)
- error = 1;
- break;
- case PW_32:
- if (aligned & 0x3)
- error = 1;
- break;
- case PW_64:
- if (aligned & 0x7)
- error = 1;
- break;
- default:
- printk("ppc4xx_add_dma_sgl: invalid bus width: 0x%x\n",
- p_dma_ch->pwidth);
- return DMA_STATUS_GENERAL_ERROR;
- }
- if (error)
- printk
- ("Alignment warning: ppc4xx_add_dma_sgl src 0x%x dst 0x%x count 0x%x bus width var %d\n",
- src_addr, dst_addr, count, p_dma_ch->pwidth);
-
- }
-#endif
-
- if ((unsigned) (psgl->ptail + 1) >= ((unsigned) psgl + SGL_LIST_SIZE)) {
- printk("sgl handle out of memory \n");
- return DMA_STATUS_OUT_OF_MEMORY;
- }
-
- if (!psgl->ptail) {
- psgl->phead = (ppc_sgl_t *)
- ((unsigned) psgl + sizeof (sgl_list_info_t));
- psgl->phead_dma = psgl->dma_addr + sizeof(sgl_list_info_t);
- psgl->ptail = psgl->phead;
- psgl->ptail_dma = psgl->phead_dma;
- } else {
- psgl->ptail->next = psgl->ptail_dma + sizeof(ppc_sgl_t);
- psgl->ptail++;
- psgl->ptail_dma += sizeof(ppc_sgl_t);
- }
-
- psgl->ptail->control = psgl->control;
- psgl->ptail->src_addr = src_addr;
- psgl->ptail->dst_addr = dst_addr;
- psgl->ptail->control_count = (count >> p_dma_ch->shift) |
- psgl->sgl_control;
- psgl->ptail->next = (uint32_t) NULL;
-
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Enable (start) the DMA described by the sgl handle.
- */
-void
-ppc4xx_enable_dma_sgl(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- ppc_dma_ch_t *p_dma_ch;
- uint32_t sg_command;
-
- if (!handle) {
- printk("ppc4xx_enable_dma_sgl: null handle\n");
- return;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_enable_dma_sgl: bad channel in handle %d\n",
- psgl->dmanr);
- return;
- } else if (!psgl->phead) {
- printk("ppc4xx_enable_dma_sgl: sg list empty\n");
- return;
- }
-
- p_dma_ch = &dma_channels[psgl->dmanr];
- psgl->ptail->control_count &= ~SG_LINK; /* make this the last dscrptr */
- sg_command = mfdcr(DCRN_ASGC);
-
- ppc4xx_set_sg_addr(psgl->dmanr, psgl->phead_dma);
-
- sg_command |= SSG_ENABLE(psgl->dmanr);
-
- mtdcr(DCRN_ASGC, sg_command); /* start transfer */
-}
-
-/*
- * Halt an active scatter/gather DMA operation.
- */
-void
-ppc4xx_disable_dma_sgl(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- uint32_t sg_command;
-
- if (!handle) {
- printk("ppc4xx_enable_dma_sgl: null handle\n");
- return;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_enable_dma_sgl: bad channel in handle %d\n",
- psgl->dmanr);
- return;
- }
-
- sg_command = mfdcr(DCRN_ASGC);
- sg_command &= ~SSG_ENABLE(psgl->dmanr);
- mtdcr(DCRN_ASGC, sg_command); /* stop transfer */
-}
-
-/*
- * Returns number of bytes left to be transferred from the entire sgl list.
- * *src_addr and *dst_addr get set to the source/destination address of
- * the sgl descriptor where the DMA stopped.
- *
- * An sgl transfer must NOT be active when this function is called.
- */
-int
-ppc4xx_get_dma_sgl_residue(sgl_handle_t handle, phys_addr_t * src_addr,
- phys_addr_t * dst_addr)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
- ppc_dma_ch_t *p_dma_ch;
- ppc_sgl_t *pnext, *sgl_addr;
- uint32_t count_left;
-
- if (!handle) {
- printk("ppc4xx_get_dma_sgl_residue: null handle\n");
- return DMA_STATUS_BAD_HANDLE;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_get_dma_sgl_residue: bad channel in handle %d\n",
- psgl->dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- sgl_addr = (ppc_sgl_t *) __va(mfdcr(DCRN_ASG0 + (psgl->dmanr * 0x8)));
- count_left = mfdcr(DCRN_DMACT0 + (psgl->dmanr * 0x8));
-
- if (!sgl_addr) {
- printk("ppc4xx_get_dma_sgl_residue: sgl addr register is null\n");
- goto error;
- }
-
- pnext = psgl->phead;
- while (pnext &&
- ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE) &&
- (pnext != sgl_addr))
- ) {
- pnext++;
- }
-
- if (pnext == sgl_addr) { /* found the sgl descriptor */
-
- *src_addr = pnext->src_addr;
- *dst_addr = pnext->dst_addr;
-
- /*
- * Now search the remaining descriptors and add their count.
- * We already have the remaining count from this descriptor in
- * count_left.
- */
- pnext++;
-
- while ((pnext != psgl->ptail) &&
- ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE))
- ) {
- count_left += pnext->control_count & SG_COUNT_MASK;
- }
-
- if (pnext != psgl->ptail) { /* should never happen */
- printk
- ("ppc4xx_get_dma_sgl_residue error (1) psgl->ptail 0x%x handle 0x%x\n",
- (unsigned int) psgl->ptail, (unsigned int) handle);
- goto error;
- }
-
- /* success */
- p_dma_ch = &dma_channels[psgl->dmanr];
- return (count_left << p_dma_ch->shift); /* count in bytes */
-
- } else {
- /* this shouldn't happen */
- printk
- ("get_dma_sgl_residue, unable to match current address 0x%x, handle 0x%x\n",
- (unsigned int) sgl_addr, (unsigned int) handle);
-
- }
-
- error:
- *src_addr = (phys_addr_t) NULL;
- *dst_addr = (phys_addr_t) NULL;
- return 0;
-}
-
-/*
- * Returns the address(es) of the buffer(s) contained in the head element of
- * the scatter/gather list. The element is removed from the scatter/gather
- * list and the next element becomes the head.
- *
- * This function should only be called when the DMA is not active.
- */
-int
-ppc4xx_delete_dma_sgl_element(sgl_handle_t handle, phys_addr_t * src_dma_addr,
- phys_addr_t * dst_dma_addr)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
-
- if (!handle) {
- printk("ppc4xx_delete_sgl_element: null handle\n");
- return DMA_STATUS_BAD_HANDLE;
- } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) {
- printk("ppc4xx_delete_sgl_element: bad channel in handle %d\n",
- psgl->dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- if (!psgl->phead) {
- printk("ppc4xx_delete_sgl_element: sgl list empty\n");
- *src_dma_addr = (phys_addr_t) NULL;
- *dst_dma_addr = (phys_addr_t) NULL;
- return DMA_STATUS_SGL_LIST_EMPTY;
- }
-
- *src_dma_addr = (phys_addr_t) psgl->phead->src_addr;
- *dst_dma_addr = (phys_addr_t) psgl->phead->dst_addr;
-
- if (psgl->phead == psgl->ptail) {
- /* last descriptor on the list */
- psgl->phead = NULL;
- psgl->ptail = NULL;
- } else {
- psgl->phead++;
- psgl->phead_dma += sizeof(ppc_sgl_t);
- }
-
- return DMA_STATUS_GOOD;
-}
-
-
-/*
- * Create a scatter/gather list handle. This is simply a structure which
- * describes a scatter/gather list.
- *
- * A handle is returned in "handle" which the driver should save in order to
- * be able to access this list later. A chunk of memory will be allocated
- * to be used by the API for internal management purposes, including managing
- * the sg list and allocating memory for the sgl descriptors. One page should
- * be more than enough for that purpose. Perhaps it's a bit wasteful to use
- * a whole page for a single sg list, but most likely there will be only one
- * sg list per channel.
- *
- * Interrupt notes:
- * Each sgl descriptor has a copy of the DMA control word which the DMA engine
- * loads in the control register. The control word has a "global" interrupt
- * enable bit for that channel. Interrupts are further qualified by a few bits
- * in the sgl descriptor count register. In order to setup an sgl, we have to
- * know ahead of time whether or not interrupts will be enabled at the completion
- * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
- * be called before calling alloc_dma_handle(). If the interrupt mode will never
- * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt()
- * do not have to be called -- interrupts will be enabled or disabled based
- * on how the channel was configured after powerup by the hw_init_dma_channel()
- * function. Each sgl descriptor will be setup to interrupt if an error occurs;
- * however, only the last descriptor will be setup to interrupt. Thus, an
- * interrupt will occur (if interrupts are enabled) only after the complete
- * sgl transfer is done.
- */
-int
-ppc4xx_alloc_dma_handle(sgl_handle_t * phandle, unsigned int mode, unsigned int dmanr)
-{
- sgl_list_info_t *psgl;
- dma_addr_t dma_addr;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- uint32_t sg_command;
- void *ret;
-
- if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
- printk("ppc4xx_alloc_dma_handle: invalid channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- if (!phandle) {
- printk("ppc4xx_alloc_dma_handle: null handle pointer\n");
- return DMA_STATUS_NULL_POINTER;
- }
-
- /* Get a page of memory, which is zeroed out by consistent_alloc() */
- ret = dma_alloc_coherent(NULL, DMA_PPC4xx_SIZE, &dma_addr, GFP_KERNEL);
- if (ret != NULL) {
- memset(ret, 0, DMA_PPC4xx_SIZE);
- psgl = (sgl_list_info_t *) ret;
- }
-
- if (psgl == NULL) {
- *phandle = (sgl_handle_t) NULL;
- return DMA_STATUS_OUT_OF_MEMORY;
- }
-
- psgl->dma_addr = dma_addr;
- psgl->dmanr = dmanr;
-
- /*
- * Modify and save the control word. These words will be
- * written to each sgl descriptor. The DMA engine then
- * loads this control word into the control register
- * every time it reads a new descriptor.
- */
- psgl->control = p_dma_ch->control;
- /* Clear all mode bits */
- psgl->control &= ~(DMA_TM_MASK | DMA_TD);
- /* Save control word and mode */
- psgl->control |= (mode | DMA_CE_ENABLE);
-
- /* In MM mode, we must set ETD/TCE */
- if (mode == DMA_MODE_MM)
- psgl->control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
-
- if (p_dma_ch->int_enable) {
- /* Enable channel interrupt */
- psgl->control |= DMA_CIE_ENABLE;
- } else {
- psgl->control &= ~DMA_CIE_ENABLE;
- }
-
- sg_command = mfdcr(DCRN_ASGC);
- sg_command |= SSG_MASK_ENABLE(dmanr);
-
- /* Enable SGL control access */
- mtdcr(DCRN_ASGC, sg_command);
- psgl->sgl_control = SG_ERI_ENABLE | SG_LINK;
-
- if (p_dma_ch->int_enable) {
- if (p_dma_ch->tce_enable)
- psgl->sgl_control |= SG_TCI_ENABLE;
- else
- psgl->sgl_control |= SG_ETI_ENABLE;
- }
-
- *phandle = (sgl_handle_t) psgl;
- return DMA_STATUS_GOOD;
-}
-
-/*
- * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
- * The list must be empty (contain no elements).
- */
-void
-ppc4xx_free_dma_handle(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
-
- if (!handle) {
- printk("ppc4xx_free_dma_handle: got NULL\n");
- return;
- } else if (psgl->phead) {
- printk("ppc4xx_free_dma_handle: list not empty\n");
- return;
- } else if (!psgl->dma_addr) { /* should never happen */
- printk("ppc4xx_free_dma_handle: no dma address\n");
- return;
- }
-
- dma_free_coherent(NULL, DMA_PPC4xx_SIZE, (void *) psgl, 0);
-}
-
-EXPORT_SYMBOL(ppc4xx_alloc_dma_handle);
-EXPORT_SYMBOL(ppc4xx_free_dma_handle);
-EXPORT_SYMBOL(ppc4xx_add_dma_sgl);
-EXPORT_SYMBOL(ppc4xx_delete_dma_sgl_element);
-EXPORT_SYMBOL(ppc4xx_enable_dma_sgl);
-EXPORT_SYMBOL(ppc4xx_disable_dma_sgl);
-EXPORT_SYMBOL(ppc4xx_get_dma_sgl_residue);
pci->piwar2 = 0;
pci->piwar3 = 0;
- /* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI1_LOWER_MEM */
+ /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0x80000000 */
pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
- /* Enable, Mem R/W */
- pci->powar1 = 0x80044000 |
- (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
+ pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
- /* Setup outboud IO windows @ MPC85XX_PCI1_IO_BASE */
+ /* Setup 16M outboud IO windows @ 0xe2000000 */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
- /* Enable, IO R/W */
- pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
+ pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin);
extern int mpc85xx_exclude_device(u_char bus, u_char devfn);
-#ifdef CONFIG_85xx_PCI2
+#if CONFIG_85xx_PCI2
static void __init
mpc85xx_setup_pci2(struct pci_controller *hose)
{
pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET,
MPC85xx_PCI2_SIZE);
- early_read_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, &temps);
+ early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
- early_write_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, temps);
- early_write_config_byte(hose, hose->bus_offset, 0, PCI_LATENCY_TIMER, 0x80);
+ early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);
+ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
/* Disable all windows (except powar0 since its ignored) */
pci->powar1 = 0;
pci->piwar2 = 0;
pci->piwar3 = 0;
- /* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI2_LOWER_MEM */
+ /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0xa0000000 */
pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
- /* Enable, Mem R/W */
- pci->powar1 = 0x80044000 |
- (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
+ pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
- /* Setup outboud IO windows @ MPC85XX_PCI2_IO_BASE */
+ /* Setup 16M outboud IO windows @ 0xe3000000 */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
- /* Enable, IO R/W */
- pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
+ pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
#define __PPC_SYSLIB_PPC85XX_SETUP_H
#include <linux/config.h>
+#include <linux/serial.h>
#include <linux/init.h>
#include <asm/ppcboot.h>
/* All newworld pmac machines and CHRPs now use the interrupt tree */
for (np = allnodes; np != NULL; np = np->allnext) {
- if (get_property(np, "interrupt-parent", NULL)) {
+ if (get_property(np, "interrupt-parent", 0)) {
use_of_interrupt_tree = 1;
break;
}
struct device_node *child;
int *ip;
- np->name = get_property(np, "name", NULL);
- np->type = get_property(np, "device_type", NULL);
+ np->name = get_property(np, "name", 0);
+ np->type = get_property(np, "device_type", 0);
if (!np->name)
np->name = "<NULL>";
mem_start = finish_node_interrupts(np, mem_start);
/* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
nsizec = *ip;
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
*prevp = np;
prevp = &np->next;
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
*lenp = pp->length;
return pp->value;
}
- return NULL;
+ return 0;
}
/*
static void * early_get_property(unsigned long base, unsigned long node,
char *prop);
-prom_entry prom __initdata;
-ihandle prom_chosen __initdata;
-ihandle prom_stdout __initdata;
+prom_entry prom __initdata = 0;
+ihandle prom_chosen __initdata = 0;
+ihandle prom_stdout __initdata = 0;
-char *prom_display_paths[FB_MAX] __initdata;
+char *prom_display_paths[FB_MAX] __initdata = { 0, };
phandle prom_display_nodes[FB_MAX] __initdata;
-unsigned int prom_num_displays __initdata;
-char *of_stdout_device __initdata;
-static ihandle prom_disp_node __initdata;
+unsigned int prom_num_displays __initdata = 0;
+char *of_stdout_device __initdata = 0;
+static ihandle prom_disp_node __initdata = 0;
unsigned int rtas_data; /* physical pointer */
unsigned int rtas_entry; /* physical pointer */
prom_args.args[i] = va_arg(list, void *);
va_end(list);
for (i = 0; i < nret; ++i)
- prom_args.args[i + nargs] = NULL;
+ prom_args.args[i + nargs] = 0;
prom(&prom_args);
return prom_args.args[nargs];
}
prom_args.args[i] = va_arg(list, void *);
va_end(list);
for (i = 0; i < nret; ++i)
- prom_args.args[i + nargs] = NULL;
+ prom_args.args[i + nargs] = 0;
prom(&prom_args);
for (i = 1; i < nret; ++i)
rets[i-1] = prom_args.args[nargs + i];
};
const unsigned char *clut;
- prom_disp_node = NULL;
+ prom_disp_node = 0;
- for (node = NULL; prom_next_node(&node); ) {
+ for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
call_prom("getprop", 4, 1, node, "device_type",
type, sizeof(type));
}
allnextp = &allnodes;
mem_start = ALIGNUL(mem_start);
- new_start = inspect_node(root, NULL, mem_start, mem_end, &allnextp);
- *allnextp = NULL;
+ new_start = inspect_node(root, 0, mem_start, mem_end, &allnextp);
+ *allnextp = 0;
return new_start;
}
/* look for cpus */
*(unsigned long *)(0x0) = 0;
asm volatile("dcbf 0,%0": : "r" (0) : "memory");
- for (node = NULL; prom_next_node(&node); ) {
+ for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
call_prom("getprop", 4, 1, node, "device_type",
type, sizeof(type));
prom_print("returning 0x");
prom_print_hex(phys);
prom_print("from prom_init\n");
- prom_stdout = NULL;
+ prom_stdout = 0;
return phys;
}
return (void *)((unsigned long)pp->value + base);
}
}
- return NULL;
+ return 0;
}
/* Is boot-info compatible ? */
boot_infos = PTRUNRELOC(bi);
if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
- bi->logicalDisplayBase = NULL;
+ bi->logicalDisplayBase = 0;
#ifdef CONFIG_BOOTX_TEXT
btext_init(bi);
/* The zero index is used to indicate the end of the list of
operands. */
#define UNUSED (0)
- { 0, 0, NULL, NULL, 0 },
+ { 0, 0, 0, 0, 0 },
/* The BA field in an XL form instruction. */
#define BA (1)
#define BA_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 16, 0, 0, PPC_OPERAND_CR },
/* The BA field in an XL form instruction when it must be the same
as the BT field in the same instruction. */
/* The BB field in an XL form instruction. */
#define BB (3)
#define BB_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 11, 0, 0, PPC_OPERAND_CR },
/* The BB field in an XL form instruction when it must be the same
as the BA field in the same instruction. */
/* The BF field in an X or XL form instruction. */
#define BF (11)
- { 3, 23, NULL, NULL, PPC_OPERAND_CR },
+ { 3, 23, 0, 0, PPC_OPERAND_CR },
/* An optional BF field. This is used for comparison instructions,
in which an omitted BF field is taken as zero. */
#define OBF (12)
- { 3, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+ { 3, 23, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The BFA field in an X or XL form instruction. */
#define BFA (13)
- { 3, 18, NULL, NULL, PPC_OPERAND_CR },
+ { 3, 18, 0, 0, PPC_OPERAND_CR },
/* The BI field in a B form or XL form instruction. */
#define BI (14)
#define BI_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 16, 0, 0, PPC_OPERAND_CR },
/* The BO field in a B form instruction. Certain values are
illegal. */
/* The BT field in an X or XL form instruction. */
#define BT (17)
- { 5, 21, NULL, NULL, PPC_OPERAND_CR },
+ { 5, 21, 0, 0, PPC_OPERAND_CR },
/* The condition register number portion of the BI field in a B form
or XL form instruction. This is used for the extended
conditional branch mnemonics, which set the lower two bits of the
BI field. This field is optional. */
#define CR (18)
- { 3, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+ { 3, 18, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The D field in a D form instruction. This is a displacement off
a register, and implies that the next operand is a register in
parentheses. */
#define D (19)
- { 16, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
+ { 16, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
/* The DS field in a DS form instruction. This is like D, but the
lower two bits are forced to zero. */
/* The FL1 field in a POWER SC form instruction. */
#define FL1 (21)
- { 4, 12, NULL, NULL, 0 },
+ { 4, 12, 0, 0, 0 },
/* The FL2 field in a POWER SC form instruction. */
#define FL2 (22)
- { 3, 2, NULL, NULL, 0 },
+ { 3, 2, 0, 0, 0 },
/* The FLM field in an XFL form instruction. */
#define FLM (23)
- { 8, 17, NULL, NULL, 0 },
+ { 8, 17, 0, 0, 0 },
/* The FRA field in an X or A form instruction. */
#define FRA (24)
#define FRA_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 16, 0, 0, PPC_OPERAND_FPR },
/* The FRB field in an X or A form instruction. */
#define FRB (25)
#define FRB_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 11, 0, 0, PPC_OPERAND_FPR },
/* The FRC field in an A form instruction. */
#define FRC (26)
#define FRC_MASK (0x1f << 6)
- { 5, 6, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 6, 0, 0, PPC_OPERAND_FPR },
/* The FRS field in an X form instruction or the FRT field in a D, X
or A form instruction. */
#define FRS (27)
#define FRT (FRS)
- { 5, 21, NULL, NULL, PPC_OPERAND_FPR },
+ { 5, 21, 0, 0, PPC_OPERAND_FPR },
/* The FXM field in an XFX instruction. */
#define FXM (28)
#define FXM_MASK (0xff << 12)
- { 8, 12, NULL, NULL, 0 },
+ { 8, 12, 0, 0, 0 },
/* The L field in a D or X form instruction. */
#define L (29)
- { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
+ { 1, 21, 0, 0, PPC_OPERAND_OPTIONAL },
/* The LEV field in a POWER SC form instruction. */
#define LEV (30)
- { 7, 5, NULL, NULL, 0 },
+ { 7, 5, 0, 0, 0 },
/* The LI field in an I form instruction. The lower two bits are
forced to zero. */
/* The MB field in an M form instruction. */
#define MB (33)
#define MB_MASK (0x1f << 6)
- { 5, 6, NULL, NULL, 0 },
+ { 5, 6, 0, 0, 0 },
/* The ME field in an M form instruction. */
#define ME (34)
#define ME_MASK (0x1f << 1)
- { 5, 1, NULL, NULL, 0 },
+ { 5, 1, 0, 0, 0 },
/* The MB and ME fields in an M form instruction expressed a single
operand which is a bitmask indicating which bits to select. This
is a two operand form using PPC_OPERAND_NEXT. See the
description in opcode/ppc.h for what this means. */
#define MBE (35)
- { 5, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
+ { 5, 6, 0, 0, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
{ 32, 0, insert_mbe, extract_mbe, 0 },
/* The MB or ME field in an MD or MDS form instruction. The high
/* The RA field in an D, DS, X, XO, M, or MDS form instruction. */
#define RA (40)
#define RA_MASK (0x1f << 16)
- { 5, 16, NULL, NULL, PPC_OPERAND_GPR },
+ { 5, 16, 0, 0, PPC_OPERAND_GPR },
/* The RA field in a D or X form instruction which is an updating
load, which means that the RA field may not be zero and may not
equal the RT field. */
#define RAL (41)
- { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ral, 0, PPC_OPERAND_GPR },
/* The RA field in an lmw instruction, which has special value
restrictions. */
#define RAM (42)
- { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ram, 0, PPC_OPERAND_GPR },
/* The RA field in a D or X form instruction which is an updating
store or an updating floating point load, which means that the RA
field may not be zero. */
#define RAS (43)
- { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ras, 0, PPC_OPERAND_GPR },
/* The RB field in an X, XO, M, or MDS form instruction. */
#define RB (44)
#define RB_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, PPC_OPERAND_GPR },
+ { 5, 11, 0, 0, PPC_OPERAND_GPR },
/* The RB field in an X form instruction when it must be the same as
the RS field in the instruction. This is used for extended
#define RS (46)
#define RT (RS)
#define RT_MASK (0x1f << 21)
- { 5, 21, NULL, NULL, PPC_OPERAND_GPR },
+ { 5, 21, 0, 0, PPC_OPERAND_GPR },
/* The SH field in an X or M form instruction. */
#define SH (47)
#define SH_MASK (0x1f << 11)
- { 5, 11, NULL, NULL, 0 },
+ { 5, 11, 0, 0, 0 },
/* The SH field in an MD form instruction. This is split. */
#define SH6 (48)
/* The SI field in a D form instruction. */
#define SI (49)
- { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
+ { 16, 0, 0, 0, PPC_OPERAND_SIGNED },
/* The SI field in a D form instruction when we accept a wide range
of positive values. */
#define SISIGNOPT (50)
- { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
+ { 16, 0, 0, 0, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
/* The SPR field in an XFX form instruction. This is flipped--the
lower 5 bits are stored in the upper 5 and vice- versa. */
/* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */
#define SPRBAT (52)
#define SPRBAT_MASK (0x3 << 17)
- { 2, 17, NULL, NULL, 0 },
+ { 2, 17, 0, 0, 0 },
/* The SPRG register number in an XFX form m[ft]sprg instruction. */
#define SPRG (53)
#define SPRG_MASK (0x3 << 16)
- { 2, 16, NULL, NULL, 0 },
+ { 2, 16, 0, 0, 0 },
/* The SR field in an X form instruction. */
#define SR (54)
- { 4, 16, NULL, NULL, 0 },
+ { 4, 16, 0, 0, 0 },
/* The SV field in a POWER SC form instruction. */
#define SV (55)
- { 14, 2, NULL, NULL, 0 },
+ { 14, 2, 0, 0, 0 },
/* The TBR field in an XFX form instruction. This is like the SPR
field, but it is optional. */
/* The TO field in a D or X form instruction. */
#define TO (57)
#define TO_MASK (0x1f << 21)
- { 5, 21, NULL, NULL, 0 },
+ { 5, 21, 0, 0, 0 },
/* The U field in an X form instruction. */
#define U (58)
- { 4, 12, NULL, NULL, 0 },
+ { 4, 12, 0, 0, 0 },
/* The UI field in a D form instruction. */
#define UI (59)
- { 16, 0, NULL, NULL, 0 },
+ { 16, 0, 0, 0, 0 },
};
/* The functions used to insert and extract complicated operands. */
scc_initialized = 1;
if (via_modem) {
for (;;) {
- xmon_write(NULL, "ATE1V1\r", 7);
+ xmon_write(0, "ATE1V1\r", 7);
if (xmon_expect("OK", 5)) {
- xmon_write(NULL, "ATA\r", 4);
+ xmon_write(0, "ATA\r", 4);
if (xmon_expect("CONNECT", 40))
break;
}
- xmon_write(NULL, "+++", 3);
+ xmon_write(0, "+++", 3);
xmon_expect("OK", 3);
}
}
c = xmon_getchar();
if (c == -1) {
if (p == str)
- return NULL;
+ return 0;
break;
}
*p++ = c;
set_backlight_level(BACKLIGHT_MAX);
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
#endif /* CONFIG_PMAC_BACKLIGHT */
cmd = cmds(excp);
if (cmd == 's') {
insert_bpts();
}
xmon_leave();
- xmon_regs[smp_processor_id()] = NULL;
+ xmon_regs[smp_processor_id()] = 0;
#ifdef CONFIG_SMP
clear_bit(0, &got_xmon);
clear_bit(smp_processor_id(), &cpus_in_xmon);
for (i = 0; i < NBPTS; ++i, ++bp)
if (bp->enabled && pc == bp->address)
return bp;
- return NULL;
+ return 0;
}
static void
xmon_puts(sysmap);
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
}
else
printf("No System.map\n");
__delay(200);
n = size;
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return n;
}
} else {
printf("*** Error writing address %x\n", adrs + n);
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return n;
}
} else {
printf("*** %x exception occurred\n", fault_except);
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
}
/* Input scanning routines */
} while (cur);
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
termch = 0;
break;
}
*(ep++) = 0;
if (saddr)
*saddr = prev;
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return rbuffer;
}
prev = next;
bail:
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return NULL;
}
}
sync();
}
- debugger_fault_handler = NULL;
+ debugger_fault_handler = 0;
return result;
}
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
+endmenu
+
config SPINLINE
bool "Inline spinlock code at each call site"
depends on SMP && !PPC_SPLPAR && !PPC_ISERIES
If in doubt, say N.
-endmenu
-
source "kernel/vserver/Kconfig"
source "security/Kconfig"
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
+#include <asm/processor.h>
.globl __div64_32
__div64_32:
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
obj-$(CONFIG_PPC_OF) += of_device.o
-pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
- iSeries_IoMmTable.o
-pci-obj-$(CONFIG_PPC_PSERIES) += pci_dn.o pci_dma_direct.o
+obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_iommu.o
-obj-$(CONFIG_PCI) += pci.o pci_iommu.o $(pci-obj-y)
+ifdef CONFIG_PPC_ISERIES
+obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o \
+ iSeries_IoMmTable.o
+else
+obj-$(CONFIG_PCI) += pci_dma_direct.o
+endif
obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
-obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
pmac_time.o pmac_nvram.o pmac_low_i2c.o \
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
- DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
+ DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_next_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
- DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
- DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
- DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
- DEFINE(PACASLBR3, offsetof(struct paca_struct, slb_r3));
-#ifdef CONFIG_HUGETLB_PAGE
- DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs));
-#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACAPROFENABLED, offsetof(struct paca_struct, prof_enabled));
DEFINE(PACAPROFLEN, offsetof(struct paca_struct, prof_len));
cur_cpu_spec->firmware_features);
}
-void chrp_progress(char *s, unsigned short hex)
+void
+chrp_progress(char *s, unsigned short hex)
{
struct device_node *root;
int width, *p;
return;
if (max_width == 0) {
- if ((root = find_path_device("/rtas")) &&
+ if ( (root = find_path_device("/rtas")) &&
(p = (unsigned int *)get_property(root,
"ibm,display-line-length",
- NULL)))
+ NULL)) )
max_width = *p;
else
max_width = 0x10;
display_character = rtas_token("display-character");
set_indicator = rtas_token("set-indicator");
}
-
if (display_character == RTAS_UNKNOWN_SERVICE) {
+ /* use hex display */
+ if (set_indicator == RTAS_UNKNOWN_SERVICE)
+ return;
+ rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
+ return;
+ }
+
+ if(display_character == RTAS_UNKNOWN_SERVICE) {
/* use hex display if available */
- if (set_indicator != RTAS_UNKNOWN_SERVICE)
+ if(set_indicator != RTAS_UNKNOWN_SERVICE)
rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
return;
}
spin_lock(&progress_lock);
- /*
- * Last write ended with newline, but we didn't print it since
+ /* Last write ended with newline, but we didn't print it since
* it would just clear the bottom line of output. Print it now
* instead.
*
* If no newline is pending, print a CR to start output at the
* beginning of the line.
*/
- if (pending_newline) {
+ if(pending_newline) {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
pending_newline = 0;
- } else {
+ } else
rtas_call(display_character, 1, 1, NULL, '\r');
- }
width = max_width;
os = s;
while (*os) {
- if (*os == '\n' || *os == '\r') {
+ if(*os == '\n' || *os == '\r') {
/* Blank to end of line. */
- while (width-- > 0)
+ while(width-- > 0)
rtas_call(display_character, 1, 1, NULL, ' ');
/* If newline is the last character, save it
* until next call to avoid bumping up the
* display output.
*/
- if (*os == '\n' && !os[1]) {
+ if(*os == '\n' && !os[1]) {
pending_newline = 1;
spin_unlock(&progress_lock);
return;
/* RTAS wants CR-LF, not just LF */
- if (*os == '\n') {
+ if(*os == '\n') {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
} else {
os++;
/* if we overwrite the screen length */
- if (width <= 0)
- while ((*os != 0) && (*os != '\n') && (*os != '\r'))
+ if ( width <= 0 )
+ while ( (*os != 0) && (*os != '\n') && (*os != '\r') )
os++;
}
/* Blank to end of line. */
- while (width-- > 0)
- rtas_call(display_character, 1, 1, NULL, ' ');
+ while ( width-- > 0 )
+ rtas_call(display_character, 1, 1, NULL, ' ' );
spin_unlock(&progress_lock);
}
{
struct eeh_early_enable_info *info = data;
int ret;
- char *status = get_property(dn, "status", NULL);
- u32 *class_code = (u32 *)get_property(dn, "class-code", NULL);
- u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", NULL);
- u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
+ char *status = get_property(dn, "status", 0);
+ u32 *class_code = (u32 *)get_property(dn, "class-code", 0);
+ u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", 0);
+ u32 *device_id = (u32 *)get_property(dn, "device-id", 0);
u32 *regs;
int enable;
/* Ok... see if this device supports EEH. Some do, some don't,
* and the only way to find out is to check each and every one. */
- regs = (u32 *)get_property(dn, "reg", NULL);
+ regs = (u32 *)get_property(dn, "reg", 0);
if (regs) {
/* First register entry is addr (00BBSS00) */
/* Try to enable eeh */
info.buid_lo = BUID_LO(buid);
info.buid_hi = BUID_HI(buid);
- traverse_pci_devices(phb, early_enable_eeh, &info);
+ traverse_pci_devices(phb, early_enable_eeh, NULL, &info);
}
if (eeh_subsystem_enabled) {
/* Build list of strings to match */
nstrs = 0;
- s = (char *)get_property(dn, "ibm,loc-code", NULL);
+ s = (char *)get_property(dn, "ibm,loc-code", 0);
if (s)
strs[nstrs++] = s;
sprintf(devname, "dev%04x:%04x", vendor_id, device_id);
*/
ld r11,.SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT
- beq 15f
+ beq- 15f
ld r11,.SYS_CALL_TABLE32@toc(2)
clrldi r3,r3,32
clrldi r4,r4,32
15:
slwi r0,r0,3
ldx r10,r11,r0 /* Fetch system call handler [ptr] */
- mtctr r10
- bctrl /* Call handler */
+ mtlr r10
+ blrl /* Call handler */
syscall_exit:
#ifdef SHOW_SYSCALLS
stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
- beq- 1f /* only restore r13 if */
+ beq 1f /* only restore r13 if */
ld r13,GPR13(r1) /* returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtspr SRR0,r7
mtspr SRR1,r8
rfid
- b . /* prevent speculative execution */
syscall_enosys:
li r3,-ENOSYS
ld r1,GPR1(r1)
rfid
- b . /* prevent speculative execution */
+ b .
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work:
mtspr SRR0,r5
mtspr SRR1,r6
rfid
- b . /* prevent speculative execution */
_STATIC(rtas_return_loc)
/* relocation is off at this point */
mtspr SRR0,r3
mtspr SRR1,r4
rfid
- b . /* prevent speculative execution */
_STATIC(rtas_restore_regs)
/* relocation is on at this point */
#define EX_R13 32
#define EX_SRR0 40
#define EX_DAR 48
-#define EX_LR 48 /* SLB miss saves LR, but not DAR */
#define EX_DSISR 56
#define EX_CCR 60
mtspr SRR0,r12; \
mfspr r12,SRR1; /* and SRR1 */ \
mtspr SRR1,r10; \
- rfid; \
- b . /* prevent speculative execution */
+ rfid
/*
* This is the start of the interrupt handlers for iSeries
. = n; \
.globl label##_Pseries; \
label##_Pseries: \
- HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_Iseries; \
label##_Iseries: \
- HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
#define MASKABLE_EXCEPTION_ISERIES(n, label) \
.globl label##_Iseries; \
label##_Iseries: \
- HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
lbz r10,PACAPROFENABLED(r13); \
. = 0x200
_MachineCheckPseries:
- HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
. = 0x300
.globl DataAccess_Pseries
DataAccess_Pseries:
- HMT_MEDIUM
mtspr SPRG1,r13
BEGIN_FTR_SECTION
mtspr SPRG2,r12
. = 0x380
.globl DataAccessSLB_Pseries
DataAccessSLB_Pseries:
- HMT_MEDIUM
mtspr SPRG1,r13
- mfspr r13,SPRG3 /* get paca address into r13 */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACASLBR3(r13)
- mfspr r9,SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
- clrrdi r12,r13,32 /* get high part of &label */
- mfmsr r10
- mfspr r11,SRR0 /* save SRR0 */
- ori r12,r12,(.do_slb_miss)@l
- ori r10,r10,MSR_IR|MSR_DR /* DON'T set RI for SLB miss */
- mtspr SRR0,r12
- mfspr r12,SRR1 /* and SRR1 */
- mtspr SRR1,r10
- mfspr r3,DAR
- rfid
- b . /* prevent speculative execution */
+ mtspr SPRG2,r12
+ mfspr r13,DAR
+ mfcr r12
+ srdi r13,r13,60
+ cmpdi r13,0xc
+ beq .do_slb_bolted_Pseries
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccessSLB_common)
STD_EXCEPTION_PSERIES(0x400, InstructionAccess)
-
- . = 0x480
- .globl InstructionAccessSLB_Pseries
-InstructionAccessSLB_Pseries:
- HMT_MEDIUM
- mtspr SPRG1,r13
- mfspr r13,SPRG3 /* get paca address into r13 */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACASLBR3(r13)
- mfspr r9,SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
- clrrdi r12,r13,32 /* get high part of &label */
- mfmsr r10
- mfspr r11,SRR0 /* save SRR0 */
- ori r12,r12,(.do_slb_miss)@l
- ori r10,r10,MSR_IR|MSR_DR /* DON'T set RI for SLB miss */
- mtspr SRR0,r12
- mfspr r12,SRR1 /* and SRR1 */
- mtspr SRR1,r10
- mr r3,r11 /* SRR0 is faulting address */
- rfid
- b . /* prevent speculative execution */
-
+ STD_EXCEPTION_PSERIES(0x480, InstructionAccessSLB)
STD_EXCEPTION_PSERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_PSERIES(0x600, Alignment)
STD_EXCEPTION_PSERIES(0x700, ProgramCheck)
. = 0xc00
.globl SystemCall_Pseries
SystemCall_Pseries:
- HMT_MEDIUM
mr r9,r13
mfmsr r10
mfspr r13,SPRG3
mfspr r12,SRR1
mtspr SRR1,r10
rfid
- b . /* prevent speculative execution */
STD_EXCEPTION_PSERIES(0xd00, SingleStep)
STD_EXCEPTION_PSERIES(0xe00, Trap_0e)
mfspr r12,SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+_GLOBAL(do_slb_bolted_Pseries)
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
+ EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_slb_bolted)
+
/* Space for the naca. Architected to be located at real address
* NACA_PHYS_ADDR. Various tools rely on this location being fixed.
.globl DataAccessSLB_Iseries
DataAccessSLB_Iseries:
mtspr SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
- std r3,PACASLBR3(r13)
- ld r11,PACALPPACA+LPPACASRR0(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
- mfspr r3,DAR
- b .do_slb_miss
-
- STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
+ mtspr SPRG2,r12
+ mfspr r13,DAR
+ mfcr r12
+ srdi r13,r13,60
+ cmpdi r13,0xc
+ beq .do_slb_bolted_Iseries
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
+ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
+ EXCEPTION_PROLOG_ISERIES_2
+ b DataAccessSLB_common
- .globl InstructionAccessSLB_Iseries
-InstructionAccessSLB_Iseries:
- mtspr SPRG1,r13 /* save r13 */
+.do_slb_bolted_Iseries:
+ mtcrf 0x80,r12
+ mfspr r12,SPRG2
EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
- std r3,PACASLBR3(r13)
- ld r11,PACALPPACA+LPPACASRR0(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
- mr r3,r11
- b .do_slb_miss
+ EXCEPTION_PROLOG_ISERIES_2
+ b .do_slb_bolted
+ STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
+ STD_EXCEPTION_ISERIES(0x480, InstructionAccessSLB, PACA_EXGEN)
MASKABLE_EXCEPTION_ISERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_ISERIES(0x600, Alignment, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0x700, ProgramCheck, PACA_EXGEN)
li r11,1
stb r11,PACALPPACA+LPPACADECRINT(r13)
lwz r12,PACADEFAULTDECR(r13)
- mtspr SPRN_DEC,r12
+ mtspr DEC,r12
/* fall through */
.globl HardwareInterrupt_Iseries_masked
ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13)
rfid
- b . /* prevent speculative execution */
#endif
/*
. = 0x8000
.globl SystemReset_FWNMI
SystemReset_FWNMI:
- HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, SystemReset_common)
.globl MachineCheck_FWNMI
MachineCheck_FWNMI:
- HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
REST_4GPRS(10, r1)
ld r1,GPR1(r1)
rfid
- b . /* prevent speculative execution */
unrecov_fer:
bl .save_nvgprs
li r5,0x300
b .do_hash_page /* Try to handle as hpte fault */
+ .align 7
+ .globl DataAccessSLB_common
+DataAccessSLB_common:
+ mfspr r10,DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ std r3,_DAR(r1)
+ bl .slb_allocate
+ cmpdi r3,0 /* Check return code */
+ beq fast_exception_return /* Return if we succeeded */
+ li r5,0
+ std r5,_DSISR(r1)
+ b .handle_page_fault
+
.align 7
.globl InstructionAccess_common
InstructionAccess_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
+ .align 7
+ .globl InstructionAccessSLB_common
+InstructionAccessSLB_common:
+ EXCEPTION_PROLOG_COMMON(0x480, PACA_EXGEN)
+ ld r3,_NIP(r1) /* SRR0 = NIA */
+ bl .slb_allocate
+ or. r3,r3,r3 /* Check return code */
+ beq+ fast_exception_return /* Return if we succeeded */
+
+ ld r4,_NIP(r1)
+ li r5,0
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ b .handle_page_fault
+
.align 7
.globl HardwareInterrupt_common
.globl HardwareInterrupt_entry
bl .local_irq_restore
b 11f
#else
- beq fast_exception_return /* Return from exception on success */
+ beq+ fast_exception_return /* Return from exception on success */
/* fall through */
#endif
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
- b . /* prevent speculative execution */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r11 and r12 contain the saved SRR0 and SRR1.
- * r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
-_GLOBAL(do_slb_miss)
- mflr r10
-
+/* XXX note fix masking in get_kernel_vsid to match */
+_GLOBAL(do_slb_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
- bl .slb_allocate /* handle it */
+ /*
+ * We take the next entry, round robin. Previously we tried
+ * to find a free slot first but that took too long. Unfortunately
+ * we dont have any LRU information to help us choose a slot.
+ */
- /* All done -- return from exception. */
+ /* r13 = paca */
+1: ld r10,PACASTABRR(r13)
+ addi r9,r10,1
+ cmpdi r9,SLB_NUM_ENTRIES
+ blt+ 2f
+ li r9,2 /* dont touch slot 0 or 1 */
+2: std r9,PACASTABRR(r13)
+
+ /* r13 = paca, r10 = entry */
+
+ /*
+ * Never cast out the segment for our kernel stack. Since we
+ * dont invalidate the ERAT we could have a valid translation
+ * for the kernel stack during the first part of exception exit
+ * which gets invalidated due to a tlbie from another cpu at a
+ * non recoverable point (after setting srr0/1) - Anton
+ */
+ slbmfee r9,r10
+ srdi r9,r9,27
+ /*
+ * Use paca->ksave as the value of the kernel stack pointer,
+ * because this is valid at all times.
+ * The >> 27 (rather than >> 28) is so that the LSB is the
+ * valid bit - this way we check valid and ESID in one compare.
+ * In order to completely close the tiny race in the context
+ * switch (between updating r1 and updating paca->ksave),
+ * we check against both r1 and paca->ksave.
+ */
+ srdi r11,r1,27
+ ori r11,r11,1
+ cmpd r11,r9
+ beq- 1b
+ ld r11,PACAKSAVE(r13)
+ srdi r11,r11,27
+ ori r11,r11,1
+ cmpd r11,r9
+ beq- 1b
+
+ /* r13 = paca, r10 = entry */
+
+ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
+ mfspr r9,DAR
+ rldicl r11,r9,36,51
+ sldi r11,r11,15
+ srdi r9,r9,60
+ or r11,r11,r9
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACASLBR3(r13)
+ /* VSID_RANDOMIZER */
+ li r9,9
+ sldi r9,r9,32
+ oris r9,r9,58231
+ ori r9,r9,39831
+
+ /* vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK */
+ mulld r11,r11,r9
+ clrldi r11,r11,28
+
+ /* r13 = paca, r10 = entry, r11 = vsid */
+
+ /* Put together slb word1 */
+ sldi r11,r11,12
+
+BEGIN_FTR_SECTION
+ /* set kp and c bits */
+ ori r11,r11,0x480
+END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+BEGIN_FTR_SECTION
+ /* set kp, l and c bits */
+ ori r11,r11,0x580
+END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
+
+ /* r13 = paca, r10 = entry, r11 = slb word1 */
+
+ /* Put together slb word0 */
+ mfspr r9,DAR
+ clrrdi r9,r9,28 /* get the new esid */
+ oris r9,r9,0x800 /* set valid bit */
+ rldimi r9,r10,0,52 /* insert entry */
+
+ /* r13 = paca, r9 = slb word0, r11 = slb word1 */
+
+ /*
+ * No need for an isync before or after this slbmte. The exception
+ * we enter with and the rfid we exit with are context synchronizing .
+ */
+ slbmte r11,r9
+
+ /* All done -- return from exception. */
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
- mtlr r10
-
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- unrecov_slb
-.machine push
-.machine "power4"
+ /*
+ * Until everyone updates binutils hardwire the POWER4 optimised
+ * single field mtcrf
+ */
+#if 0
+ .machine push
+ .machine "power4"
mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
+ .machine pop
+#else
+ .long 0x7d380120
+#endif
+
+ mfmsr r10
+ clrrdi r10,r10,2
+ mtmsrd r10,1
mtspr SRR0,r11
mtspr SRR1,r12
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
- b . /* prevent speculative execution */
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
mtspr SRR1,r3
sync
rfid
- b . /* prevent speculative execution */
_GLOBAL(__start_initialization_pSeries)
mr r31,r3 /* save parameters */
mr r30,r4
mtspr SRR0,r3
mtspr SRR1,r4
rfid
- b . /* prevent speculative execution */
/*
* Running with relocation on at this point. All we want to do is
mtspr SRR0,r3
mtspr SRR1,r4
rfid
- b . /* prevent speculative execution */
#endif /* CONFIG_PPC_PSERIES */
/* This is where all platforms converge execution */
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <asm/hvcall.h>
#include <asm/prom.h>
#include <asm/hvconsole.h>
return 0;
}
-EXPORT_SYMBOL(hvc_get_chars);
-
int hvc_put_chars(int index, const char *buf, int count)
{
unsigned long *lbuf = (unsigned long *) buf;
return -1;
}
-EXPORT_SYMBOL(hvc_put_chars);
-
/* return the number of client vterms present */
/* XXX this requires an interface change to handle multiple discontiguous
* vterms */
* we should _always_ be able to find one. */
vty = of_find_node_by_name(NULL, "vty");
if (vty && device_is_compatible(vty, "hvterm1")) {
- u32 *termno = (u32 *)get_property(vty, "reg", NULL);
+ u32 *termno = (u32 *)get_property(vty, "reg", 0);
if (termno && start_termno)
*start_termno = *termno;
+++ /dev/null
-/*
- * hvcserver.c
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * PPC64 virtual I/O console server support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <asm/hvcall.h>
-#include <asm/hvcserver.h>
-#include <asm/io.h>
-
-#define HVCS_ARCH_VERSION "1.0.0"
-
-MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
-MODULE_DESCRIPTION("IBM hvcs ppc64 API");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HVCS_ARCH_VERSION);
-
-/*
- * Convert arch specific return codes into relevant errnos. The hvcs
- * functions aren't performance sensitive, so this conversion isn't an
- * issue.
- */
-int hvcs_convert(long to_convert)
-{
- switch (to_convert) {
- case H_Success:
- return 0;
- case H_Parameter:
- return -EINVAL;
- case H_Hardware:
- return -EIO;
- case H_Busy:
- case H_LongBusyOrder1msec:
- case H_LongBusyOrder10msec:
- case H_LongBusyOrder100msec:
- case H_LongBusyOrder1sec:
- case H_LongBusyOrder10sec:
- case H_LongBusyOrder100sec:
- return -EBUSY;
- case H_Function: /* fall through */
- default:
- return -EPERM;
- }
-}
-
-int hvcs_free_partner_info(struct list_head *head)
-{
- struct hvcs_partner_info *pi;
- struct list_head *element;
-
- if (!head) {
- return -EINVAL;
- }
-
- while (!list_empty(head)) {
- element = head->next;
- pi = list_entry(element, struct hvcs_partner_info, node);
- list_del(element);
- kfree(pi);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(hvcs_free_partner_info);
-
-/* Helper function for hvcs_get_partner_info */
-int hvcs_next_partner(unsigned int unit_address,
- unsigned long last_p_partition_ID,
- unsigned long last_p_unit_address, unsigned long *pi_buff)
-
-{
- long retval;
- retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
- last_p_partition_ID,
- last_p_unit_address, virt_to_phys(pi_buff));
- return hvcs_convert(retval);
-}
-
-/*
- * The unit_address parameter is the unit address of the vty-server vdevice
- * in whose partner information the caller is interested. This function
- * uses a pointer to a list_head instance in which to store the partner info.
- * This function returns non-zero on success, or if there is no partner info.
- *
- * Invocation of this function should always be followed by an invocation of
- * hvcs_free_partner_info() using a pointer to the SAME list head instance
- * that was used to store the partner_info list.
- */
-int hvcs_get_partner_info(unsigned int unit_address, struct list_head *head,
- unsigned long *pi_buff)
-{
- /*
- * This is a page sized buffer to be passed to hvcall per invocation.
- * NOTE: the first long returned is unit_address. The second long
- * returned is the partition ID and starting with pi_buff[2] are
- * HVCS_CLC_LENGTH characters, which are diff size than the unsigned
- * long, hence the casting mumbojumbo you see later.
- */
- unsigned long last_p_partition_ID;
- unsigned long last_p_unit_address;
- struct hvcs_partner_info *next_partner_info = NULL;
- int more = 1;
- int retval;
-
- memset(pi_buff, 0x00, PAGE_SIZE);
- /* invalid parameters */
- if (!head)
- return -EINVAL;
-
- last_p_partition_ID = last_p_unit_address = ~0UL;
- INIT_LIST_HEAD(head);
-
- if (!pi_buff)
- return -ENOMEM;
-
- do {
- retval = hvcs_next_partner(unit_address, last_p_partition_ID,
- last_p_unit_address, pi_buff);
- if (retval) {
- /*
- * Don't indicate that we've failed if we have
- * any list elements.
- */
- if (!list_empty(head))
- return 0;
- return retval;
- }
-
- last_p_partition_ID = pi_buff[0];
- last_p_unit_address = pi_buff[1];
-
- /* This indicates that there are no further partners */
- if (last_p_partition_ID == ~0UL
- && last_p_unit_address == ~0UL)
- break;
-
- /* This is a very small struct and will be freed soon in
- * hvcs_free_partner_info(). */
- next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
- GFP_ATOMIC);
-
- if (!next_partner_info) {
- printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
- " allocate partner info struct.\n");
- hvcs_free_partner_info(head);
- return -ENOMEM;
- }
-
- next_partner_info->unit_address
- = (unsigned int)last_p_unit_address;
- next_partner_info->partition_ID
- = (unsigned int)last_p_partition_ID;
-
- /* copy the Null-term char too */
- strncpy(&next_partner_info->location_code[0],
- (char *)&pi_buff[2],
- strlen((char *)&pi_buff[2]) + 1);
-
- list_add_tail(&(next_partner_info->node), head);
- next_partner_info = NULL;
-
- } while (more);
-
- return 0;
-}
-EXPORT_SYMBOL(hvcs_get_partner_info);
-
-/*
- * If this function is called once and -EINVAL is returned it may
- * indicate that the partner info needs to be refreshed for the
- * target unit address at which point the caller must invoke
- * hvcs_get_partner_info() and then call this function again. If,
- * for a second time, -EINVAL is returned then it indicates that
- * there is probably already a partner connection registered to a
- * different vty-server@ vdevice. It is also possible that a second
- * -EINVAL may indicate that one of the parms is not valid, for
- * instance if the link was removed between the vty-server@ vdevice
- * and the vty@ vdevice that you are trying to open. Don't shoot the
- * messenger. Firmware implemented it this way.
- */
-int hvcs_register_connection( unsigned int unit_address,
- unsigned int p_partition_ID, unsigned int p_unit_address)
-{
- long retval;
- retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
- p_partition_ID, p_unit_address);
- return hvcs_convert(retval);
-}
-EXPORT_SYMBOL(hvcs_register_connection);
-
-/*
- * If -EBUSY is returned continue to call this function
- * until 0 is returned.
- */
-int hvcs_free_connection(unsigned int unit_address)
-{
- long retval;
- retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
- return hvcs_convert(retval);
-}
-EXPORT_SYMBOL(hvcs_free_connection);
#include <asm/mmu_context.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/abs_addr.h>
-#include <linux/spinlock.h>
-
-static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
-/*
- * Very primitive algorithm for picking up a lock
- */
-static inline void iSeries_hlock(unsigned long slot)
-{
- if (slot & 0x8)
- slot = ~slot;
- spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
-}
+#if 0
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
-static inline void iSeries_hunlock(unsigned long slot)
-{
- if (slot & 0x8)
- slot = ~slot;
- spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
-}
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+#include <asm/cputable.h>
+#endif
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary,
if (secondary)
return -1;
- iSeries_hlock(hpte_group);
-
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
- BUG_ON(lhpte.dw0.dw0.v);
+ if (lhpte.dw0.dw0.v)
+ panic("select_hpte_slot found entry already valid\n");
- if (slot == -1) { /* No available entry found in either group */
- iSeries_hunlock(hpte_group);
+ if (slot == -1) /* No available entry found in either group */
return -1;
- }
if (slot < 0) { /* MSB set means secondary group */
secondary = 1;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
- iSeries_hunlock(hpte_group);
-
return (secondary << 3) | (slot & 7);
}
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
- iSeries_hlock(hpte_group);
-
for (i = 0; i < HPTES_PER_GROUP; i++) {
lhpte.dw0.dword0 =
iSeries_hpte_getword0(hpte_group + slot_offset);
if (!lhpte.dw0.dw0.bolted) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
- iSeries_hunlock(hpte_group);
return i;
}
slot_offset &= 0x7;
}
- iSeries_hunlock(hpte_group);
-
return -1;
}
HPTE hpte;
unsigned long avpn = va >> 23;
- iSeries_hlock(slot);
-
HvCallHpt_get(&hpte, slot);
if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) {
HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
- iSeries_hunlock(slot);
return 0;
}
- iSeries_hunlock(slot);
-
return -1;
}
{
HPTE lhpte;
unsigned long avpn = va >> 23;
- unsigned long flags;
-
- local_irq_save(flags);
-
- iSeries_hlock(slot);
lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v)
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
-
- iSeries_hunlock(slot);
-
- local_irq_restore(flags);
}
void hpte_init_iSeries(void)
#endif /* CONFIG_SMP */
+/* XXX Make this into free_irq() - Anton */
+
+/* This could be promoted to a real free_irq() ... */
+static int
+do_free_irq(int irq, void* dev_id)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ struct irqaction **p;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ synchronize_irq(irq);
+ kfree(action);
+ return 0;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ break;
+ }
+ return -ENOENT;
+}
+
+
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
if (irq >= NR_IRQS)
return -EINVAL;
if (!handler)
- return -EINVAL;
+ /* We could implement really free_irq() instead of that... */
+ return do_free_irq(irq, dev_id);
action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL);
void free_irq(unsigned int irq, void *dev_id)
{
- irq_desc_t *desc = get_irq_desc(irq);
- struct irqaction **p;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- for (;;) {
- struct irqaction * action = *p;
- if (action) {
- struct irqaction **pp = p;
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
-
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- mask_irq(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
- /* Wait to make sure it's not being used on another CPU */
- synchronize_irq(irq);
- kfree(action);
- return;
- }
- printk("Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- break;
- }
- return;
+ request_irq(irq, NULL, 0, NULL, dev_id);
}
EXPORT_SYMBOL(free_irq);
}
#ifdef CONFIG_PPC_ISERIES
-void do_IRQ(struct pt_regs *regs)
+int do_IRQ(struct pt_regs *regs)
{
struct paca_struct *lpaca;
struct ItLpQueue *lpq;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
+
+ return 1; /* lets ret_from_int know we can do checks */
}
#else /* CONFIG_PPC_ISERIES */
-void do_IRQ(struct pt_regs *regs)
+int do_IRQ(struct pt_regs *regs)
{
- int irq;
+ int irq, first = 1;
irq_enter();
}
#endif
- irq = ppc_md.get_irq(regs);
-
- if (irq >= 0)
+ /*
+ * Every arch is required to implement ppc_md.get_irq.
+ * This function will either return an irq number or -1 to
+ * indicate there are no more pending. But the first time
+ * through the loop this means there wasn't an IRQ pending.
+ * The value -2 is for buggy hardware and means that this IRQ
+ * has already been handled. -- Tom
+ */
+ while ((irq = ppc_md.get_irq(regs)) >= 0) {
ppc_irq_dispatch_handler(regs, irq);
- else
+ first = 0;
+ }
+ if (irq != -2 && first)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
irq_exit();
+
+ return 1; /* lets ret_from_int know we can do checks */
}
#endif /* CONFIG_PPC_ISERIES */
.llong .compat_sys_sched_setaffinity
.llong .compat_sys_sched_getaffinity
.llong .sys_ni_syscall
-#ifdef CONFIG_TUX
- .llong .__sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .llong .sys_tux
-# else
- .llong .sys_ni_syscall
-# endif
-#endif
+ .llong .sys_ni_syscall /* 225 - reserved for tux */
.llong .sys32_sendfile64
.llong .compat_sys_io_setup
.llong .sys_io_destroy
.llong .sys_sched_setaffinity
.llong .sys_sched_getaffinity
.llong .sys_ni_syscall
-#ifdef CONFIG_TUX
- .llong .__sys_tux
-#else
-# ifdef CONFIG_TUX_MODULE
- .llong .sys_tux
-# else
- .llong .sys_ni_syscall
-# endif
-#endif
+ .llong .sys_ni_syscall /* 225 - reserved for tux */
.llong .sys_ni_syscall /* 32bit only sendfile64 */
.llong .sys_io_setup
.llong .sys_io_destroy
struct device_node *np;
int i;
unsigned int *addrp;
- unsigned char* chrp_int_ack_special = NULL;
+ unsigned char* chrp_int_ack_special = 0;
unsigned char init_senses[NR_IRQS - NUM_ISA_INTERRUPTS];
int nmi_irq = -1;
#if defined(CONFIG_VT) && defined(CONFIG_ADB_KEYBOARD) && defined(XMON)
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(openpic_vec_ipi, openpic_ipi_action, SA_INTERRUPT,
- "IPI0 (call function)", NULL);
+ "IPI0 (call function)", 0);
request_irq(openpic_vec_ipi+1, openpic_ipi_action, SA_INTERRUPT,
- "IPI1 (reschedule)", NULL);
+ "IPI1 (reschedule)", 0);
request_irq(openpic_vec_ipi+2, openpic_ipi_action, SA_INTERRUPT,
- "IPI2 (unused)", NULL);
+ "IPI2 (unused)", 0);
request_irq(openpic_vec_ipi+3, openpic_ipi_action, SA_INTERRUPT,
- "IPI3 (debugger break)", NULL);
+ "IPI3 (debugger break)", 0);
for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
openpic_enable_ipi(openpic_vec_ipi+i);
HPTE *hptep = htab_data.htab + slot;
Hpte_dword0 dw0;
unsigned long avpn = va >> 23;
+ unsigned long flags;
int ret = 0;
if (large)
tlbiel(va);
} else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock(&pSeries_tlbie_lock);
+ spin_lock_irqsave(&pSeries_tlbie_lock, flags);
tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock(&pSeries_tlbie_lock);
+ spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
return ret;
if (large)
avpn &= ~0x1UL;
- local_irq_save(flags);
pSeries_lock_hpte(hptep);
dw0 = hptep->dw0.dw0;
tlbiel(va);
} else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock(&pSeries_tlbie_lock);
+ spin_lock_irqsave(&pSeries_tlbie_lock, flags);
tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock(&pSeries_tlbie_lock);
+ spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
- local_irq_restore(flags);
}
static void pSeries_flush_hash_range(unsigned long context,
/* XXX fix for large ptes */
unsigned long large = 0;
- local_irq_save(flags);
-
j = 0;
for (i = 0; i < number; i++) {
if ((batch->addr[i] >= USER_START) &&
} else {
/* XXX double check that it is safe to take this late */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock(&pSeries_tlbie_lock);
+ spin_lock_irqsave(&pSeries_tlbie_lock, flags);
asm volatile("ptesync":::"memory");
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock(&pSeries_tlbie_lock);
+ spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
}
-
- local_irq_restore(flags);
}
void hpte_init_pSeries(void)
bus = pci_bus_b(ln);
busdn = PCI_GET_DN(bus);
- dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", NULL);
+ dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", 0);
if (dma_window) {
/* Bussubno hasn't been copied yet.
* Do it now because iommu_table_setparms_lpar needs it.
{
unsigned int *dma_window;
- dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", NULL);
+ dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", 0);
if (!dma_window)
panic("iommu_table_setparms_lpar: device %s has no"
}
/* now we have the stdout node; figure out what type of device it is. */
- name = (char *)get_property(stdout_node, "name", NULL);
+ name = (char *)get_property(stdout_node, "name", 0);
if (!name) {
printk(KERN_WARNING "stdout node missing 'name' property!\n");
goto out;
if (strncmp(name, "vty", 3) == 0) {
if (device_is_compatible(stdout_node, "hvterm1")) {
- termno = (u32 *)get_property(stdout_node, "reg", NULL);
+ termno = (u32 *)get_property(stdout_node, "reg", 0);
if (termno) {
vtermno = termno[0];
ppc_md.udbg_putc = udbg_putcLP;
isa_dn = of_find_node_by_type(NULL, "isa");
if (isa_dn) {
isa_io_base = pci_io_base;
+ of_node_put(isa_dn);
pci_process_ISA_OF_ranges(isa_dn,
hose->io_base_phys,
hose->io_base_virt);
- of_node_put(isa_dn);
/* Allow all IO */
io_page_mask = -1;
}
BUG(); /* No I/O resource for this PHB? */
if (request_resource(&ioport_resource, res))
- printk(KERN_ERR "Failed to request IO on "
- "PCI domain %d\n", pci_domain_nr(bus));
-
+ printk(KERN_ERR "Failed to request IO"
+ "on hose %d\n", 0 /* FIXME */);
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
BUG(); /* No memory resource for this PHB? */
bus->resource[i+1] = res;
if (res->flags && request_resource(&iomem_resource, res))
- printk(KERN_ERR "Failed to request MEM on "
- "PCI domain %d\n",
- pci_domain_nr(bus));
+ printk(KERN_ERR "Failed to request MEM"
+ "on hose %d\n", 0 /* FIXME */);
}
} else if (pci_probe_only &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
/* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled.
- *
- * ABI requires stack to be 128-byte aligned
*/
-char emergency_stack[PAGE_SIZE * NR_CPUS] __attribute__((aligned(128)));
+char emergency_stack[PAGE_SIZE * NR_CPUS];
/* The Paca is an array with one entry per processor. Each contains an
* ItLpPaca, which contains the information shared between the
.stab_addr = (asrv), /* Virt pointer to segment table */ \
.emergency_sp = &emergency_stack[((number)+1) * PAGE_SIZE], \
.cpu_start = (start), /* Processor start */ \
+ .stab_next_rr = 1, \
.lppaca = { \
.xDesc = 0xd397d781, /* "LpPa" */ \
.xSize = sizeof(struct ItLpPaca), \
#ifdef CONFIG_PPC_ISERIES
PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR),
#else
- PACAINITDATA( 0, 1, NULL, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
+ PACAINITDATA( 0, 1, 0, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
#endif
- PACAINITDATA( 1, 0, NULL, 0, 0),
- PACAINITDATA( 2, 0, NULL, 0, 0),
- PACAINITDATA( 3, 0, NULL, 0, 0),
- PACAINITDATA( 4, 0, NULL, 0, 0),
- PACAINITDATA( 5, 0, NULL, 0, 0),
- PACAINITDATA( 6, 0, NULL, 0, 0),
- PACAINITDATA( 7, 0, NULL, 0, 0),
- PACAINITDATA( 8, 0, NULL, 0, 0),
- PACAINITDATA( 9, 0, NULL, 0, 0),
- PACAINITDATA(10, 0, NULL, 0, 0),
- PACAINITDATA(11, 0, NULL, 0, 0),
- PACAINITDATA(12, 0, NULL, 0, 0),
- PACAINITDATA(13, 0, NULL, 0, 0),
- PACAINITDATA(14, 0, NULL, 0, 0),
- PACAINITDATA(15, 0, NULL, 0, 0),
- PACAINITDATA(16, 0, NULL, 0, 0),
- PACAINITDATA(17, 0, NULL, 0, 0),
- PACAINITDATA(18, 0, NULL, 0, 0),
- PACAINITDATA(19, 0, NULL, 0, 0),
- PACAINITDATA(20, 0, NULL, 0, 0),
- PACAINITDATA(21, 0, NULL, 0, 0),
- PACAINITDATA(22, 0, NULL, 0, 0),
- PACAINITDATA(23, 0, NULL, 0, 0),
- PACAINITDATA(24, 0, NULL, 0, 0),
- PACAINITDATA(25, 0, NULL, 0, 0),
- PACAINITDATA(26, 0, NULL, 0, 0),
- PACAINITDATA(27, 0, NULL, 0, 0),
- PACAINITDATA(28, 0, NULL, 0, 0),
- PACAINITDATA(29, 0, NULL, 0, 0),
- PACAINITDATA(30, 0, NULL, 0, 0),
- PACAINITDATA(31, 0, NULL, 0, 0),
+ PACAINITDATA( 1, 0, 0, 0, 0),
+ PACAINITDATA( 2, 0, 0, 0, 0),
+ PACAINITDATA( 3, 0, 0, 0, 0),
+ PACAINITDATA( 4, 0, 0, 0, 0),
+ PACAINITDATA( 5, 0, 0, 0, 0),
+ PACAINITDATA( 6, 0, 0, 0, 0),
+ PACAINITDATA( 7, 0, 0, 0, 0),
+ PACAINITDATA( 8, 0, 0, 0, 0),
+ PACAINITDATA( 9, 0, 0, 0, 0),
+ PACAINITDATA(10, 0, 0, 0, 0),
+ PACAINITDATA(11, 0, 0, 0, 0),
+ PACAINITDATA(12, 0, 0, 0, 0),
+ PACAINITDATA(13, 0, 0, 0, 0),
+ PACAINITDATA(14, 0, 0, 0, 0),
+ PACAINITDATA(15, 0, 0, 0, 0),
+ PACAINITDATA(16, 0, 0, 0, 0),
+ PACAINITDATA(17, 0, 0, 0, 0),
+ PACAINITDATA(18, 0, 0, 0, 0),
+ PACAINITDATA(19, 0, 0, 0, 0),
+ PACAINITDATA(20, 0, 0, 0, 0),
+ PACAINITDATA(21, 0, 0, 0, 0),
+ PACAINITDATA(22, 0, 0, 0, 0),
+ PACAINITDATA(23, 0, 0, 0, 0),
+ PACAINITDATA(24, 0, 0, 0, 0),
+ PACAINITDATA(25, 0, 0, 0, 0),
+ PACAINITDATA(26, 0, 0, 0, 0),
+ PACAINITDATA(27, 0, 0, 0, 0),
+ PACAINITDATA(28, 0, 0, 0, 0),
+ PACAINITDATA(29, 0, 0, 0, 0),
+ PACAINITDATA(30, 0, 0, 0, 0),
+ PACAINITDATA(31, 0, 0, 0, 0),
#if NR_CPUS > 32
- PACAINITDATA(32, 0, NULL, 0, 0),
- PACAINITDATA(33, 0, NULL, 0, 0),
- PACAINITDATA(34, 0, NULL, 0, 0),
- PACAINITDATA(35, 0, NULL, 0, 0),
- PACAINITDATA(36, 0, NULL, 0, 0),
- PACAINITDATA(37, 0, NULL, 0, 0),
- PACAINITDATA(38, 0, NULL, 0, 0),
- PACAINITDATA(39, 0, NULL, 0, 0),
- PACAINITDATA(40, 0, NULL, 0, 0),
- PACAINITDATA(41, 0, NULL, 0, 0),
- PACAINITDATA(42, 0, NULL, 0, 0),
- PACAINITDATA(43, 0, NULL, 0, 0),
- PACAINITDATA(44, 0, NULL, 0, 0),
- PACAINITDATA(45, 0, NULL, 0, 0),
- PACAINITDATA(46, 0, NULL, 0, 0),
- PACAINITDATA(47, 0, NULL, 0, 0),
- PACAINITDATA(48, 0, NULL, 0, 0),
- PACAINITDATA(49, 0, NULL, 0, 0),
- PACAINITDATA(50, 0, NULL, 0, 0),
- PACAINITDATA(51, 0, NULL, 0, 0),
- PACAINITDATA(52, 0, NULL, 0, 0),
- PACAINITDATA(53, 0, NULL, 0, 0),
- PACAINITDATA(54, 0, NULL, 0, 0),
- PACAINITDATA(55, 0, NULL, 0, 0),
- PACAINITDATA(56, 0, NULL, 0, 0),
- PACAINITDATA(57, 0, NULL, 0, 0),
- PACAINITDATA(58, 0, NULL, 0, 0),
- PACAINITDATA(59, 0, NULL, 0, 0),
- PACAINITDATA(60, 0, NULL, 0, 0),
- PACAINITDATA(61, 0, NULL, 0, 0),
- PACAINITDATA(62, 0, NULL, 0, 0),
- PACAINITDATA(63, 0, NULL, 0, 0),
+ PACAINITDATA(32, 0, 0, 0, 0),
+ PACAINITDATA(33, 0, 0, 0, 0),
+ PACAINITDATA(34, 0, 0, 0, 0),
+ PACAINITDATA(35, 0, 0, 0, 0),
+ PACAINITDATA(36, 0, 0, 0, 0),
+ PACAINITDATA(37, 0, 0, 0, 0),
+ PACAINITDATA(38, 0, 0, 0, 0),
+ PACAINITDATA(39, 0, 0, 0, 0),
+ PACAINITDATA(40, 0, 0, 0, 0),
+ PACAINITDATA(41, 0, 0, 0, 0),
+ PACAINITDATA(42, 0, 0, 0, 0),
+ PACAINITDATA(43, 0, 0, 0, 0),
+ PACAINITDATA(44, 0, 0, 0, 0),
+ PACAINITDATA(45, 0, 0, 0, 0),
+ PACAINITDATA(46, 0, 0, 0, 0),
+ PACAINITDATA(47, 0, 0, 0, 0),
+ PACAINITDATA(48, 0, 0, 0, 0),
+ PACAINITDATA(49, 0, 0, 0, 0),
+ PACAINITDATA(50, 0, 0, 0, 0),
+ PACAINITDATA(51, 0, 0, 0, 0),
+ PACAINITDATA(52, 0, 0, 0, 0),
+ PACAINITDATA(53, 0, 0, 0, 0),
+ PACAINITDATA(54, 0, 0, 0, 0),
+ PACAINITDATA(55, 0, 0, 0, 0),
+ PACAINITDATA(56, 0, 0, 0, 0),
+ PACAINITDATA(57, 0, 0, 0, 0),
+ PACAINITDATA(58, 0, 0, 0, 0),
+ PACAINITDATA(59, 0, 0, 0, 0),
+ PACAINITDATA(60, 0, 0, 0, 0),
+ PACAINITDATA(61, 0, 0, 0, 0),
+ PACAINITDATA(62, 0, 0, 0, 0),
+ PACAINITDATA(63, 0, 0, 0, 0),
#if NR_CPUS > 64
- PACAINITDATA(64, 0, NULL, 0, 0),
- PACAINITDATA(65, 0, NULL, 0, 0),
- PACAINITDATA(66, 0, NULL, 0, 0),
- PACAINITDATA(67, 0, NULL, 0, 0),
- PACAINITDATA(68, 0, NULL, 0, 0),
- PACAINITDATA(69, 0, NULL, 0, 0),
- PACAINITDATA(70, 0, NULL, 0, 0),
- PACAINITDATA(71, 0, NULL, 0, 0),
- PACAINITDATA(72, 0, NULL, 0, 0),
- PACAINITDATA(73, 0, NULL, 0, 0),
- PACAINITDATA(74, 0, NULL, 0, 0),
- PACAINITDATA(75, 0, NULL, 0, 0),
- PACAINITDATA(76, 0, NULL, 0, 0),
- PACAINITDATA(77, 0, NULL, 0, 0),
- PACAINITDATA(78, 0, NULL, 0, 0),
- PACAINITDATA(79, 0, NULL, 0, 0),
- PACAINITDATA(80, 0, NULL, 0, 0),
- PACAINITDATA(81, 0, NULL, 0, 0),
- PACAINITDATA(82, 0, NULL, 0, 0),
- PACAINITDATA(83, 0, NULL, 0, 0),
- PACAINITDATA(84, 0, NULL, 0, 0),
- PACAINITDATA(85, 0, NULL, 0, 0),
- PACAINITDATA(86, 0, NULL, 0, 0),
- PACAINITDATA(87, 0, NULL, 0, 0),
- PACAINITDATA(88, 0, NULL, 0, 0),
- PACAINITDATA(89, 0, NULL, 0, 0),
- PACAINITDATA(90, 0, NULL, 0, 0),
- PACAINITDATA(91, 0, NULL, 0, 0),
- PACAINITDATA(92, 0, NULL, 0, 0),
- PACAINITDATA(93, 0, NULL, 0, 0),
- PACAINITDATA(94, 0, NULL, 0, 0),
- PACAINITDATA(95, 0, NULL, 0, 0),
- PACAINITDATA(96, 0, NULL, 0, 0),
- PACAINITDATA(97, 0, NULL, 0, 0),
- PACAINITDATA(98, 0, NULL, 0, 0),
- PACAINITDATA(99, 0, NULL, 0, 0),
- PACAINITDATA(100, 0, NULL, 0, 0),
- PACAINITDATA(101, 0, NULL, 0, 0),
- PACAINITDATA(102, 0, NULL, 0, 0),
- PACAINITDATA(103, 0, NULL, 0, 0),
- PACAINITDATA(104, 0, NULL, 0, 0),
- PACAINITDATA(105, 0, NULL, 0, 0),
- PACAINITDATA(106, 0, NULL, 0, 0),
- PACAINITDATA(107, 0, NULL, 0, 0),
- PACAINITDATA(108, 0, NULL, 0, 0),
- PACAINITDATA(109, 0, NULL, 0, 0),
- PACAINITDATA(110, 0, NULL, 0, 0),
- PACAINITDATA(111, 0, NULL, 0, 0),
- PACAINITDATA(112, 0, NULL, 0, 0),
- PACAINITDATA(113, 0, NULL, 0, 0),
- PACAINITDATA(114, 0, NULL, 0, 0),
- PACAINITDATA(115, 0, NULL, 0, 0),
- PACAINITDATA(116, 0, NULL, 0, 0),
- PACAINITDATA(117, 0, NULL, 0, 0),
- PACAINITDATA(118, 0, NULL, 0, 0),
- PACAINITDATA(119, 0, NULL, 0, 0),
- PACAINITDATA(120, 0, NULL, 0, 0),
- PACAINITDATA(121, 0, NULL, 0, 0),
- PACAINITDATA(122, 0, NULL, 0, 0),
- PACAINITDATA(123, 0, NULL, 0, 0),
- PACAINITDATA(124, 0, NULL, 0, 0),
- PACAINITDATA(125, 0, NULL, 0, 0),
- PACAINITDATA(126, 0, NULL, 0, 0),
- PACAINITDATA(127, 0, NULL, 0, 0),
+ PACAINITDATA(64, 0, 0, 0, 0),
+ PACAINITDATA(65, 0, 0, 0, 0),
+ PACAINITDATA(66, 0, 0, 0, 0),
+ PACAINITDATA(67, 0, 0, 0, 0),
+ PACAINITDATA(68, 0, 0, 0, 0),
+ PACAINITDATA(69, 0, 0, 0, 0),
+ PACAINITDATA(70, 0, 0, 0, 0),
+ PACAINITDATA(71, 0, 0, 0, 0),
+ PACAINITDATA(72, 0, 0, 0, 0),
+ PACAINITDATA(73, 0, 0, 0, 0),
+ PACAINITDATA(74, 0, 0, 0, 0),
+ PACAINITDATA(75, 0, 0, 0, 0),
+ PACAINITDATA(76, 0, 0, 0, 0),
+ PACAINITDATA(77, 0, 0, 0, 0),
+ PACAINITDATA(78, 0, 0, 0, 0),
+ PACAINITDATA(79, 0, 0, 0, 0),
+ PACAINITDATA(80, 0, 0, 0, 0),
+ PACAINITDATA(81, 0, 0, 0, 0),
+ PACAINITDATA(82, 0, 0, 0, 0),
+ PACAINITDATA(83, 0, 0, 0, 0),
+ PACAINITDATA(84, 0, 0, 0, 0),
+ PACAINITDATA(85, 0, 0, 0, 0),
+ PACAINITDATA(86, 0, 0, 0, 0),
+ PACAINITDATA(87, 0, 0, 0, 0),
+ PACAINITDATA(88, 0, 0, 0, 0),
+ PACAINITDATA(89, 0, 0, 0, 0),
+ PACAINITDATA(90, 0, 0, 0, 0),
+ PACAINITDATA(91, 0, 0, 0, 0),
+ PACAINITDATA(92, 0, 0, 0, 0),
+ PACAINITDATA(93, 0, 0, 0, 0),
+ PACAINITDATA(94, 0, 0, 0, 0),
+ PACAINITDATA(95, 0, 0, 0, 0),
+ PACAINITDATA(96, 0, 0, 0, 0),
+ PACAINITDATA(97, 0, 0, 0, 0),
+ PACAINITDATA(98, 0, 0, 0, 0),
+ PACAINITDATA(99, 0, 0, 0, 0),
+ PACAINITDATA(100, 0, 0, 0, 0),
+ PACAINITDATA(101, 0, 0, 0, 0),
+ PACAINITDATA(102, 0, 0, 0, 0),
+ PACAINITDATA(103, 0, 0, 0, 0),
+ PACAINITDATA(104, 0, 0, 0, 0),
+ PACAINITDATA(105, 0, 0, 0, 0),
+ PACAINITDATA(106, 0, 0, 0, 0),
+ PACAINITDATA(107, 0, 0, 0, 0),
+ PACAINITDATA(108, 0, 0, 0, 0),
+ PACAINITDATA(109, 0, 0, 0, 0),
+ PACAINITDATA(110, 0, 0, 0, 0),
+ PACAINITDATA(111, 0, 0, 0, 0),
+ PACAINITDATA(112, 0, 0, 0, 0),
+ PACAINITDATA(113, 0, 0, 0, 0),
+ PACAINITDATA(114, 0, 0, 0, 0),
+ PACAINITDATA(115, 0, 0, 0, 0),
+ PACAINITDATA(116, 0, 0, 0, 0),
+ PACAINITDATA(117, 0, 0, 0, 0),
+ PACAINITDATA(118, 0, 0, 0, 0),
+ PACAINITDATA(119, 0, 0, 0, 0),
+ PACAINITDATA(120, 0, 0, 0, 0),
+ PACAINITDATA(121, 0, 0, 0, 0),
+ PACAINITDATA(122, 0, 0, 0, 0),
+ PACAINITDATA(123, 0, 0, 0, 0),
+ PACAINITDATA(124, 0, 0, 0, 0),
+ PACAINITDATA(125, 0, 0, 0, 0),
+ PACAINITDATA(126, 0, 0, 0, 0),
+ PACAINITDATA(127, 0, 0, 0, 0),
#endif
#endif
};
*******************************************************************/
struct device_node;
typedef void *(*traverse_func)(struct device_node *me, void *data);
-void *traverse_pci_devices(struct device_node *start, traverse_func pre,
- void *data);
+void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data);
+void *traverse_all_pci_devices(traverse_func pre);
void pci_devs_phb_init(void);
void pci_fix_bus_sysdata(void);
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "pci.h"
-/*
- * Traverse_func that inits the PCI fields of the device node.
+/* Traverse_func that inits the PCI fields of the device node.
* NOTE: this *must* be done before read/write config to the device.
*/
-static void * __init update_dn_pci_info(struct device_node *dn, void *data)
+static void * __init
+update_dn_pci_info(struct device_node *dn, void *data)
{
- struct pci_controller *phb = data;
+#ifdef CONFIG_PPC_PSERIES
+ struct pci_controller *phb = (struct pci_controller *)data;
u32 *regs;
- char *device_type = get_property(dn, "device_type", NULL);
+ char *device_type = get_property(dn, "device_type", 0);
char *model;
dn->phb = phb;
- if (device_type && (strcmp(device_type, "pci") == 0) &&
- (get_property(dn, "class-code", NULL) == 0)) {
+ if (device_type && strcmp(device_type, "pci") == 0 && get_property(dn, "class-code", 0) == 0) {
/* special case for PHB's. Sigh. */
- regs = (u32 *)get_property(dn, "bus-range", NULL);
+ regs = (u32 *)get_property(dn, "bus-range", 0);
dn->busno = regs[0];
model = (char *)get_property(dn, "model", NULL);
else
dn->devfn = 0; /* assumption */
} else {
- regs = (u32 *)get_property(dn, "reg", NULL);
+ regs = (u32 *)get_property(dn, "reg", 0);
if (regs) {
/* First register entry is addr (00BBSS00) */
dn->busno = (regs[0] >> 16) & 0xff;
dn->devfn = (regs[0] >> 8) & 0xff;
}
}
+#endif
return NULL;
}
-/*
+/******************************************************************
* Traverse a device tree stopping each PCI device in the tree.
* This is done depth first. As each node is processed, a "pre"
- * function is called and the children are processed recursively.
+ * function is called, the children are processed recursively, and
+ * then a "post" function is called.
*
- * The "pre" func returns a value. If non-zero is returned from
- * the "pre" func, the traversal stops and this value is returned.
- * This return value is useful when using traverse as a method of
- * finding a device.
+ * The "pre" and "post" funcs return a value. If non-zero
+ * is returned from the "pre" func, the traversal stops and this
+ * value is returned. The return value from "post" is not used.
+ * This return value is useful when using traverse as
+ * a method of finding a device.
*
- * NOTE: we do not run the func for devices that do not appear to
+ * NOTE: we do not run the funcs for devices that do not appear to
* be PCI except for the start node which we assume (this is good
* because the start node is often a phb which may be missing PCI
* properties).
* We use the class-code as an indicator. If we run into
* one of these nodes we also assume its siblings are non-pci for
* performance.
- */
-void *traverse_pci_devices(struct device_node *start, traverse_func pre,
- void *data)
+ *
+ ******************************************************************/
+void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data)
{
struct device_node *dn, *nextdn;
void *ret;
- if (pre && ((ret = pre(start, data)) != NULL))
+ if (pre && (ret = pre(start, data)) != NULL)
return ret;
for (dn = start->child; dn; dn = nextdn) {
nextdn = NULL;
- if (get_property(dn, "class-code", NULL)) {
- if (pre && ((ret = pre(dn, data)) != NULL))
+#ifdef CONFIG_PPC_PSERIES
+ if (get_property(dn, "class-code", 0)) {
+ if (pre && (ret = pre(dn, data)) != NULL)
return ret;
- if (dn->child)
+ if (dn->child) {
/* Depth first...do children */
nextdn = dn->child;
- else if (dn->sibling)
+ } else if (dn->sibling) {
/* ok, try next sibling instead. */
nextdn = dn->sibling;
+ } else {
+ /* no more children or siblings...call "post" */
+ if (post)
+ post(dn, data);
+ }
}
+#endif
if (!nextdn) {
/* Walk up to next valid sibling. */
do {
return NULL;
}
-/*
- * Same as traverse_pci_devices except this does it for all phbs.
+/* Same as traverse_pci_devices except this does it for all phbs.
*/
-static void *traverse_all_pci_devices(traverse_func pre)
+void *traverse_all_pci_devices(traverse_func pre)
{
- struct pci_controller *phb;
+ struct pci_controller* phb;
void *ret;
-
- for (phb = hose_head; phb; phb = phb->next)
- if ((ret = traverse_pci_devices(phb->arch_data, pre, phb))
- != NULL)
+ for (phb=hose_head;phb;phb=phb->next)
+ if ((ret = traverse_pci_devices((struct device_node *)phb->arch_data, pre, NULL, phb)) != NULL)
return ret;
return NULL;
}
-/*
- * Traversal func that looks for a <busno,devfcn> value.
+/* Traversal func that looks for a <busno,devfcn> value.
* If found, the device_node is returned (thus terminating the traversal).
*/
-static void *is_devfn_node(struct device_node *dn, void *data)
+static void *
+is_devfn_node(struct device_node *dn, void *data)
{
int busno = ((unsigned long)data >> 8) & 0xff;
int devfn = ((unsigned long)data) & 0xff;
- return ((devfn == dn->devfn) && (busno == dn->busno)) ? dn : NULL;
+ return (devfn == dn->devfn && busno == dn->busno) ? dn : NULL;
}
-/*
- * This is the "slow" path for looking up a device_node from a
+/* This is the "slow" path for looking up a device_node from a
* pci_dev. It will hunt for the device under its parent's
* phb and then update sysdata for a future fastpath.
*
*/
struct device_node *fetch_dev_dn(struct pci_dev *dev)
{
- struct device_node *orig_dn = dev->sysdata;
+ struct device_node *orig_dn = (struct device_node *)dev->sysdata;
struct pci_controller *phb = orig_dn->phb; /* assume same phb as orig_dn */
struct device_node *phb_dn;
struct device_node *dn;
unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
- phb_dn = phb->arch_data;
- dn = traverse_pci_devices(phb_dn, is_devfn_node, (void *)searchval);
+ phb_dn = (struct device_node *)(phb->arch_data);
+ dn = (struct device_node *)traverse_pci_devices(phb_dn, is_devfn_node, NULL, (void *)searchval);
if (dn) {
dev->sysdata = dn;
/* ToDo: call some device init hook here */
EXPORT_SYMBOL(fetch_dev_dn);
-/*
+/******************************************************************
* Actually initialize the phbs.
* The buswalk on this phb has not happened yet.
- */
-void __init pci_devs_phb_init(void)
+ ******************************************************************/
+void __init
+pci_devs_phb_init(void)
{
/* This must be done first so the device nodes have valid pci info! */
traverse_all_pci_devices(update_dn_pci_info);
}
-static void __init pci_fixup_bus_sysdata_list(struct list_head *bus_list)
+static void __init
+pci_fixup_bus_sysdata_list(struct list_head *bus_list)
{
struct list_head *ln;
struct pci_bus *bus;
- for (ln = bus_list->next; ln != bus_list; ln = ln->next) {
+ for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
bus = pci_bus_b(ln);
if (bus->self)
bus->sysdata = bus->self->sysdata;
}
}
-/*
+/******************************************************************
* Fixup the bus->sysdata ptrs to point to the bus' device_node.
* This is done late in pcibios_init(). We do this mostly for
* sanity, but pci_dma.c uses these at DMA time so they must be
* To do this we recurse down the bus hierarchy. Note that PHB's
* have bus->self == NULL, but fortunately bus->sysdata is already
* correct in this case.
- */
-void __init pci_fix_bus_sysdata(void)
+ ******************************************************************/
+void __init
+pci_fix_bus_sysdata(void)
{
pci_fixup_bus_sysdata_list(&pci_root_buses);
}
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
- class_code = (unsigned int *) get_property(node, "class-code", NULL);
+ class_code = (unsigned int *) get_property(node, "class-code", 0);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
{
struct proc_dir_entry *root;
- root = proc_mkdir("ppc64", NULL);
+ root = proc_mkdir("ppc64", 0);
if (!root)
return 1;
if (!proc_mkdir("rtas", root))
return 1;
- if (!proc_symlink("rtas", NULL, "ppc64/rtas"))
+ if (!proc_symlink("rtas", 0, "ppc64/rtas"))
return 1;
return 0;
return (file->f_pos = new);
}
-static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t page_map_read( struct file *file, char *buf, size_t nbytes, loff_t *ppos)
{
+ unsigned pos = *ppos;
struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
- return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
+
+ if ( pos >= dp->size )
+ return 0;
+ if ( nbytes >= dp->size )
+ nbytes = dp->size;
+ if ( pos + nbytes > dp->size )
+ nbytes = dp->size - pos;
+
+ copy_to_user( buf, (char *)dp->data + pos, nbytes );
+ *ppos = pos + nbytes;
+ return nbytes;
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
/* prom structure */
struct prom_t prom;
-char *prom_display_paths[FB_MAX] __initdata = { NULL, };
+char *prom_display_paths[FB_MAX] __initdata = { 0, };
phandle prom_display_nodes[FB_MAX] __initdata;
unsigned int prom_num_displays = 0;
-char *of_stdout_device = NULL;
+char *of_stdout_device = 0;
static int iommu_force_on;
int ppc64_iommu_off;
#define MAX_PHB (32 * 6) /* 32 drawers * 6 PHBs/drawer */
struct of_tce_table of_tce_table[MAX_PHB + 1];
-char *bootpath = NULL;
-char *bootdevice = NULL;
+char *bootpath = 0;
+char *bootdevice = 0;
int boot_cpuid = 0;
#define MAX_CPU_THREADS 2
-struct device_node *allnodes = NULL;
+struct device_node *allnodes = 0;
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
va_list list;
-
+
_prom->args.service = ADDR(service);
_prom->args.nargs = nargs;
_prom->args.nret = nret;
- _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
+ _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
- va_start(list, nret);
+ va_start(list, nret);
for (i=0; i < nargs; i++)
_prom->args.args[i] = va_arg(list, prom_arg_t);
- va_end(list);
+ va_end(list);
for (i=0; i < nret ;i++)
_prom->args.rets[i] = 0;
static void __init prom_print_hex(unsigned long val)
{
unsigned long offset = reloc_offset();
- int i, nibbles = sizeof(val)*2;
- char buf[sizeof(val)*2+1];
+ int i, nibbles = sizeof(val)*2;
+ char buf[sizeof(val)*2+1];
struct prom_t *_prom = PTRRELOC(&prom);
- for (i = nibbles-1; i >= 0; i--) {
- buf[i] = (val & 0xf) + '0';
- if (buf[i] > '9')
- buf[i] += ('a'-'0'-10);
- val >>= 4;
- }
- buf[nibbles] = '\0';
+ for (i = nibbles-1; i >= 0; i--) {
+ buf[i] = (val & 0xf) + '0';
+ if (buf[i] > '9')
+ buf[i] += ('a'-'0'-10);
+ val >>= 4;
+ }
+ buf[nibbles] = '\0';
call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
}
{
phandle node;
char type[64];
- unsigned long num_cpus = 0;
- unsigned long offset = reloc_offset();
+ unsigned long num_cpus = 0;
+ unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
- struct naca_struct *_naca = RELOC(naca);
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct naca_struct *_naca = RELOC(naca);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
/* NOTE: _naca->debug_switch is already initialized. */
prom_debug("prom_initialize_naca: start...\n");
_naca->pftSize = 0; /* ilog2 of htab size. computed below. */
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (!strcmp(type, RELOC("cpu"))) {
+ if (!strcmp(type, RELOC("cpu"))) {
num_cpus += 1;
/* We're assuming *all* of the CPUs have the same
_naca->pftSize = pft_size[1];
}
}
- } else if (!strcmp(type, RELOC("serial"))) {
+ } else if (!strcmp(type, RELOC("serial"))) {
phandle isa, pci;
struct isa_reg_property reg;
union pci_range ranges;
((((unsigned long)ranges.pci64.phys_hi) << 32) |
(ranges.pci64.phys_lo)) + reg.address;
}
- }
+ }
}
if (_systemcfg->platform == PLATFORM_POWERMAC)
}
/* We gotta have at least 1 cpu... */
- if ( (_systemcfg->processorCount = num_cpus) < 1 )
- PROM_BUG();
+ if ( (_systemcfg->processorCount = num_cpus) < 1 )
+ PROM_BUG();
_systemcfg->physicalMemorySize = lmb_phys_mem_size();
_systemcfg->version.minor = SYSTEMCFG_MINOR;
_systemcfg->processor = _get_PVR();
- prom_debug("systemcfg->processorCount = 0x%x\n",
+ prom_debug("systemcfg->processorCount = 0x%x\n",
_systemcfg->processorCount);
- prom_debug("systemcfg->physicalMemorySize = 0x%x\n",
+ prom_debug("systemcfg->physicalMemorySize = 0x%x\n",
_systemcfg->physicalMemorySize);
- prom_debug("naca->pftSize = 0x%x\n",
+ prom_debug("naca->pftSize = 0x%x\n",
_naca->pftSize);
- prom_debug("systemcfg->dCacheL1LineSize = 0x%x\n",
+ prom_debug("systemcfg->dCacheL1LineSize = 0x%x\n",
_systemcfg->dCacheL1LineSize);
- prom_debug("systemcfg->iCacheL1LineSize = 0x%x\n",
+ prom_debug("systemcfg->iCacheL1LineSize = 0x%x\n",
_systemcfg->iCacheL1LineSize);
- prom_debug("naca->serialPortAddr = 0x%x\n",
+ prom_debug("naca->serialPortAddr = 0x%x\n",
_naca->serialPortAddr);
- prom_debug("naca->interrupt_controller = 0x%x\n",
+ prom_debug("naca->interrupt_controller = 0x%x\n",
_naca->interrupt_controller);
- prom_debug("systemcfg->platform = 0x%x\n",
+ prom_debug("systemcfg->platform = 0x%x\n",
_systemcfg->platform);
prom_debug("prom_initialize_naca: end...\n");
}
#ifdef DEBUG_PROM
void prom_dump_lmb(void)
{
- unsigned long i;
- unsigned long offset = reloc_offset();
+ unsigned long i;
+ unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
- prom_printf("\nprom_dump_lmb:\n");
- prom_printf(" memory.cnt = 0x%x\n",
+ prom_printf("\nprom_dump_lmb:\n");
+ prom_printf(" memory.cnt = 0x%x\n",
_lmb->memory.cnt);
- prom_printf(" memory.size = 0x%x\n",
+ prom_printf(" memory.size = 0x%x\n",
_lmb->memory.size);
- for (i=0; i < _lmb->memory.cnt ;i++) {
- prom_printf(" memory.region[0x%x].base = 0x%x\n",
+ for (i=0; i < _lmb->memory.cnt ;i++) {
+ prom_printf(" memory.region[0x%x].base = 0x%x\n",
i, _lmb->memory.region[i].base);
- prom_printf(" .physbase = 0x%x\n",
+ prom_printf(" .physbase = 0x%x\n",
_lmb->memory.region[i].physbase);
- prom_printf(" .size = 0x%x\n",
+ prom_printf(" .size = 0x%x\n",
_lmb->memory.region[i].size);
- }
+ }
- prom_printf("\n reserved.cnt = 0x%x\n",
+ prom_printf("\n reserved.cnt = 0x%x\n",
_lmb->reserved.cnt);
- prom_printf(" reserved.size = 0x%x\n",
+ prom_printf(" reserved.size = 0x%x\n",
_lmb->reserved.size);
- for (i=0; i < _lmb->reserved.cnt ;i++) {
- prom_printf(" reserved.region[0x%x\n].base = 0x%x\n",
+ for (i=0; i < _lmb->reserved.cnt ;i++) {
+ prom_printf(" reserved.region[0x%x\n].base = 0x%x\n",
i, _lmb->reserved.region[i].base);
- prom_printf(" .physbase = 0x%x\n",
+ prom_printf(" .physbase = 0x%x\n",
_lmb->reserved.region[i].physbase);
- prom_printf(" .size = 0x%x\n",
+ prom_printf(" .size = 0x%x\n",
_lmb->reserved.region[i].size);
- }
+ }
}
#endif /* DEBUG_PROM */
{
phandle node;
char type[64];
- unsigned long i, offset = reloc_offset();
+ unsigned long i, offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
union lmb_reg_property reg;
unsigned long lmb_base, lmb_size;
unsigned long num_regs, bytes_per_reg = (_prom->encode_phys_size*2)/8;
if (_systemcfg->platform == PLATFORM_POWERMAC)
bytes_per_reg = 12;
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
- prom_getprop(node, "device_type", type, sizeof(type));
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("memory")))
+ if (strcmp(type, RELOC("memory")))
continue;
num_regs = prom_getprop(node, "reg", ®, sizeof(reg))
struct rtas_t *_rtas = PTRRELOC(&rtas);
struct systemcfg *_systemcfg = RELOC(systemcfg);
ihandle prom_rtas;
- u32 getprop_rval;
+ u32 getprop_rval;
char hypertas_funcs[4];
prom_debug("prom_instantiate_rtas: start...\n");
prom_getprop(prom_rtas, "rtas-size",
&getprop_rval, sizeof(getprop_rval));
- _rtas->size = getprop_rval;
+ _rtas->size = getprop_rval;
prom_printf("instantiating rtas");
if (_rtas->size != 0) {
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
prom_printf(" done\n");
}
- prom_debug("rtas->base = 0x%x\n", _rtas->base);
- prom_debug("rtas->entry = 0x%x\n", _rtas->entry);
- prom_debug("rtas->size = 0x%x\n", _rtas->size);
+ prom_debug("rtas->base = 0x%x\n", _rtas->base);
+ prom_debug("rtas->entry = 0x%x\n", _rtas->entry);
+ prom_debug("rtas->size = 0x%x\n", _rtas->size);
}
prom_debug("prom_instantiate_rtas: end...\n");
}
{
phandle node;
ihandle phb_node;
- unsigned long offset = reloc_offset();
+ unsigned long offset = reloc_offset();
char compatible[64], path[64], type[64], model[64];
unsigned long i, table = 0;
unsigned long base, vbase, align;
/* Keep the old logic in tack to avoid regression. */
if (compatible[0] != 0) {
- if ((strstr(compatible, RELOC("python")) == NULL) &&
- (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
- (strstr(compatible, RELOC("Winnipeg")) == NULL))
+ if((strstr(compatible, RELOC("python")) == NULL) &&
+ (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
+ (strstr(compatible, RELOC("Winnipeg")) == NULL))
continue;
} else if (model[0] != 0) {
if ((strstr(model, RELOC("ython")) == NULL) &&
/* Call OF to setup the TCE hardware */
if (call_prom("package-to-path", 3, 1, node,
path, sizeof(path)-1) == PROM_ERROR) {
- prom_printf("package-to-path failed\n");
- } else {
- prom_printf("opening PHB %s", path);
- }
-
- phb_node = call_prom("open", 1, 1, path);
- if ( (long)phb_node <= 0) {
- prom_printf("... failed\n");
- } else {
- prom_printf("... done\n");
- }
- call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
+ prom_printf("package-to-path failed\n");
+ } else {
+ prom_printf("opening PHB %s", path);
+ }
+
+ phb_node = call_prom("open", 1, 1, path);
+ if ( (long)phb_node <= 0) {
+ prom_printf("... failed\n");
+ } else {
+ prom_printf("... done\n");
+ }
+ call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
phb_node, -1, minsize,
(u32) base, (u32) (base >> 32));
- call_prom("close", 1, 0, phb_node);
+ call_prom("close", 1, 0, phb_node);
table++;
}
unsigned int cpu_threads, hw_cpu_num;
int propsize;
extern void __secondary_hold(void);
- extern unsigned long __secondary_hold_spinloop;
- extern unsigned long __secondary_hold_acknowledge;
- unsigned long *spinloop
+ extern unsigned long __secondary_hold_spinloop;
+ extern unsigned long __secondary_hold_acknowledge;
+ unsigned long *spinloop
= (void *)virt_to_abs(&__secondary_hold_spinloop);
- unsigned long *acknowledge
+ unsigned long *acknowledge
= (void *)virt_to_abs(&__secondary_hold_acknowledge);
- unsigned long secondary_hold
+ unsigned long secondary_hold
= virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold));
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
struct paca_struct *lpaca = PTRRELOC(&paca[0]);
struct prom_t *_prom = PTRRELOC(&prom);
#ifdef CONFIG_SMP
prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
- /* Set the common spinloop variable, so all of the secondary cpus
+ /* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
* This must occur for both SMP and non SMP kernels, since OF will
* be trashed when we move the kernel.
- */
- *spinloop = 0;
+ */
+ *spinloop = 0;
#ifdef CONFIG_HMT
for (i=0; i < NR_CPUS; i++) {
if (strcmp(type, RELOC("okay")) != 0)
continue;
- reg = -1;
+ reg = -1;
prom_getprop(node, "reg", ®, sizeof(reg));
path = (char *) mem;
ihandle prom_options = 0;
char option[9];
unsigned long offset = reloc_offset();
- struct naca_struct *_naca = RELOC(naca);
+ struct naca_struct *_naca = RELOC(naca);
char found = 0;
if (strstr(RELOC(cmd_line), RELOC("smt-enabled="))) {
struct prom_t *_prom = PTRRELOC(&prom);
u32 val;
- if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
- prom_panic("cannot find stdout");
+ if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
+ prom_panic("cannot find stdout");
- _prom->stdout = val;
+ _prom->stdout = val;
}
static int __init prom_find_machine_type(void)
ihandle ih;
int i, j;
unsigned long offset = reloc_offset();
- struct prom_t *_prom = PTRRELOC(&prom);
+ struct prom_t *_prom = PTRRELOC(&prom);
char type[16], *path;
static unsigned char default_colors[] = {
0x00, 0x00, 0x00,
break;
#endif /* CONFIG_LOGO_LINUX_CLUT224 */
}
-
+
return DOUBLEWORD_ALIGN(mem);
}
unsigned long needed, unsigned long align)
{
void *ret;
+ unsigned long offset = reloc_offset();
*mem_start = ALIGN(*mem_start, align);
if (*mem_start + needed > *mem_end) {
#ifdef CONFIG_BLK_DEV_INITRD
- unsigned long offset = reloc_offset();
/* FIXME: Apple OF doesn't map unclaimed mem. If this
* ever happened on G5, we'd need to fix. */
unsigned long initrd_len;
prom_panic("couldn't get device tree root\n");
}
allnextp = &RELOC(allnodes);
- inspect_node(root, NULL, &mem_start, &mem_end, &allnextp);
- *allnextp = NULL;
+ inspect_node(root, 0, &mem_start, &mem_end, &allnextp);
+ *allnextp = 0;
return mem_start;
}
{
struct bi_record *first, *last;
- prom_debug("birec_verify: r6=0x%x\n", (unsigned long)bi_recs);
+ prom_debug("birec_verify: r6=0x%x\n", (unsigned long)bi_recs);
if (bi_recs != NULL)
prom_debug(" tag=0x%x\n", bi_recs->tag);
last = (struct bi_record *)(long)bi_recs->data[0];
- prom_debug(" last=0x%x\n", (unsigned long)last);
+ prom_debug(" last=0x%x\n", (unsigned long)last);
if (last != NULL)
prom_debug(" last_tag=0x%x\n", last->tag);
return NULL;
first = (struct bi_record *)(long)last->data[0];
- prom_debug(" first=0x%x\n", (unsigned long)first);
+ prom_debug(" first=0x%x\n", (unsigned long)first);
if ( first == NULL || first != bi_recs )
return NULL;
/* Init prom stdout device */
prom_init_stdout();
- prom_debug("klimit=0x%x\n", RELOC(klimit));
- prom_debug("offset=0x%x\n", offset);
- prom_debug("->mem=0x%x\n", RELOC(klimit) - offset);
+ prom_debug("klimit=0x%x\n", RELOC(klimit));
+ prom_debug("offset=0x%x\n", offset);
+ prom_debug("->mem=0x%x\n", RELOC(klimit) - offset);
/* check out if we have bi_recs */
_prom->bi_recs = prom_bi_rec_verify((struct bi_record *)r6);
copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
/* Start storing things at klimit */
- mem = RELOC(klimit) - offset;
+ mem = RELOC(klimit) - offset;
/* Get the full OF pathname of the stdout device */
p = (char *) mem;
_prom->encode_phys_size = (getprop_rval == 1) ? 32 : 64;
/* Determine which cpu is actually running right _now_ */
- if (prom_getprop(_prom->chosen, "cpu",
+ if (prom_getprop(_prom->chosen, "cpu",
&prom_cpu, sizeof(prom_cpu)) <= 0)
- prom_panic("cannot find boot cpu");
+ prom_panic("cannot find boot cpu");
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
RELOC(boot_cpuid) = 0;
- prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
+ prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
/* Get the boot device and translate it to a full OF pathname. */
p = (char *) mem;
if (_systemcfg->platform != PLATFORM_POWERMAC)
prom_instantiate_rtas();
- /* Initialize some system info into the Naca early... */
- prom_initialize_naca();
+ /* Initialize some system info into the Naca early... */
+ prom_initialize_naca();
smt_setup();
- /* If we are on an SMP machine, then we *MUST* do the
- * following, regardless of whether we have an SMP
- * kernel or not.
- */
+ /* If we are on an SMP machine, then we *MUST* do the
+ * following, regardless of whether we have an SMP
+ * kernel or not.
+ */
prom_hold_cpus(mem);
- prom_debug("after basic inits, mem=0x%x\n", mem);
+ prom_debug("after basic inits, mem=0x%x\n", mem);
#ifdef CONFIG_BLK_DEV_INITRD
prom_debug("initrd_start=0x%x\n", RELOC(initrd_start));
prom_debug("initrd_end=0x%x\n", RELOC(initrd_end));
RELOC(klimit) = mem + offset;
prom_debug("new klimit is\n");
- prom_debug("klimit=0x%x\n", RELOC(klimit));
+ prom_debug("klimit=0x%x\n", RELOC(klimit));
prom_debug(" ->mem=0x%x\n", mem);
lmb_reserve(0, __pa(RELOC(klimit)));
* Find out the size of each entry of the interrupts property
* for a node.
*/
-int __devinit prom_n_intr_cells(struct device_node *np)
+static int __devinit
+prom_n_intr_cells(struct device_node *np)
{
struct device_node *p;
unsigned int *icp;
|| get_property(p, "interrupt-map", NULL) != NULL) {
printk("oops, node %s doesn't have #interrupt-cells\n",
p->full_name);
- return 1;
+ return 1;
}
}
#ifdef DEBUG_IRQ
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct pci_reg_property)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = pci_addrs[i].addr.a_hi;
adr[i].address = pci_addrs[i].addr.a_lo;
adr[i].size = pci_addrs[i].size_lo;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 2;
adr[i].address = rp[i].address + base_address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 2;
adr[i].address = rp[i].address + base_address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = rp[i].space;
adr[i].address = rp[i].address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= rpsize) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 0;
adr[i].address = rp[naddrc - 1];
adr[i].size = rp[naddrc + nsizec - 1];
struct device_node *child;
int *ip;
- np->name = get_property(np, "name", NULL);
- np->type = get_property(np, "device_type", NULL);
+ np->name = get_property(np, "name", 0);
+ np->type = get_property(np, "device_type", 0);
if (!np->name)
np->name = "<NULL>";
mem_start = finish_node_interrupts(np, mem_start, measure_only);
/* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
nsizec = *ip;
* expect for the name -- Cort
*/
if (!strcmp(np->name, "display"))
- np->name = get_property(np, "compatible", NULL);
+ np->name = get_property(np, "compatible", 0);
if (!strcmp(np->name, "device-tree") || np->parent == NULL)
ifunc = interpret_root_props;
return mem_start;
}
-/**
+/*
* finish_device_tree is called once things are running normally
* (i.e. with text and data mapped to the address they were linked at).
* It traverses the device tree and fills in the name, type,
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#address-cells", NULL);
+ ip = (int *) get_property(np, "#address-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#size-cells", NULL);
+ ip = (int *) get_property(np, "#size-cells", 0);
if (ip != NULL)
return *ip;
} while (np->parent);
return 1;
}
-/**
+/*
* Work out the sense (active-low level / active-high edge)
* of each interrupt from the device tree.
*/
}
}
-/**
+/*
* Construct and return a list of the device_nodes with a given name.
*/
struct device_node *
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/**
+/*
* Construct and return a list of the device_nodes with a given type.
*/
struct device_node *
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/**
+/*
* Returns all nodes linked together
*/
struct device_node *
*prevp = np;
prevp = &np->next;
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/** Checks if the given "compat" string matches one of the strings in
+/* Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
int
}
-/**
+/*
* Indicates whether the root node has a given value in its
* compatible property.
*/
{
struct device_node *root;
int rc = 0;
-
+
root = of_find_node_by_path("/");
if (root) {
rc = device_is_compatible(root, compat);
return rc;
}
-/**
+/*
* Construct and return a list of the device_nodes with a given type
* and compatible property.
*/
prevp = &np->next;
}
}
- *prevp = NULL;
+ *prevp = 0;
return head;
}
-/**
+/*
* Find the device_node with a given full_name.
*/
struct device_node *
u32 *regs;
int err = 0;
phandle *ibm_phandle;
-
- node->name = get_property(node, "name", NULL);
- node->type = get_property(node, "device_type", NULL);
+
+ node->name = get_property(node, "name", 0);
+ node->type = get_property(node, "device_type", 0);
if (!parent) {
err = -ENODEV;
}
/* now do the work of finish_node_interrupts */
- if (get_property(node, "interrupts", NULL)) {
+ if (get_property(node, "interrupts", 0)) {
err = of_finish_dynamic_node_interrupts(node);
if (err) goto out;
}
- /* now do the rough equivalent of update_dn_pci_info, this
- * probably is not correct for phb's, but should work for
- * IOAs and slots.
- */
+ /* now do the rough equivalent of update_dn_pci_info, this
+ * probably is not correct for phb's, but should work for
+ * IOAs and slots.
+ */
- node->phb = parent->phb;
+ node->phb = parent->phb;
- regs = (u32 *)get_property(node, "reg", NULL);
- if (regs) {
- node->busno = (regs[0] >> 16) & 0xff;
- node->devfn = (regs[0] >> 8) & 0xff;
- }
+ regs = (u32 *)get_property(node, "reg", 0);
+ if (regs) {
+ node->busno = (regs[0] >> 16) & 0xff;
+ node->devfn = (regs[0] >> 8) & 0xff;
+ }
/* fixing up iommu_table */
- if (strcmp(node->name, "pci") == 0 &&
- get_property(node, "ibm,dma-window", NULL)) {
- node->bussubno = node->busno;
- iommu_devnode_init(node);
- } else
+ if(strcmp(node->name, "pci") == 0 &&
+ get_property(node, "ibm,dma-window", NULL)) {
+ node->bussubno = node->busno;
+ iommu_devnode_init(node);
+ }
+ else
node->iommu_table = parent->iommu_table;
out:
*lenp = pp->length;
return pp->value;
}
- return NULL;
+ return 0;
}
/*
break;
}
- case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message, (unsigned int __user *) data);
- break;
+
default:
ret = ptrace_request(child, request, addr, data);
#include <asm/rtas.h>
#include <asm/ppcdebug.h>
-static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
-static spinlock_t ras_log_buf_lock = SPIN_LOCK_UNLOCKED;
-
-static int ras_get_sensor_state_token;
-static int ras_check_exception_token;
-
-#define EPOW_SENSOR_TOKEN 9
-#define EPOW_SENSOR_INDEX 0
-#define RAS_VECTOR_OFFSET 0x500
-
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
struct pt_regs * regs);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
/* #define DEBUG */
-static void request_ras_irqs(struct device_node *np, char *propname,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- const char *name)
-{
- unsigned int *ireg, len, i;
- int virq, n_intr;
-
- ireg = (unsigned int *)get_property(np, propname, &len);
- if (ireg == NULL)
- return;
- n_intr = prom_n_intr_cells(np);
- len /= n_intr * sizeof(*ireg);
-
- for (i = 0; i < len; i++) {
- virq = virt_irq_create_mapping(*ireg);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", np->full_name);
- return;
- }
- if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
- printk(KERN_ERR "Unable to request interrupt %d for "
- "%s\n", irq_offset_up(virq), np->full_name);
- return;
- }
- ireg += n_intr;
- }
-}
-
/*
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
static int __init init_ras_IRQ(void)
{
struct device_node *np;
-
- ras_get_sensor_state_token = rtas_token("get-sensor-state");
- ras_check_exception_token = rtas_token("check-exception");
-
- /* Internal Errors */
- np = of_find_node_by_path("/event-sources/internal-errors");
- if (np != NULL) {
- request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
- "RAS_ERROR");
- request_ras_irqs(np, "interrupts", ras_error_interrupt,
- "RAS_ERROR");
- of_node_put(np);
+ unsigned int *ireg, len, i;
+ int virq;
+
+ if ((np = of_find_node_by_path("/event-sources/internal-errors")) &&
+ (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
+ &len))) {
+ for (i=0; i<(len / sizeof(*ireg)); i++) {
+ virq = virt_irq_create_mapping(*(ireg));
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ break;
+ }
+ request_irq(irq_offset_up(virq),
+ ras_error_interrupt, 0,
+ "RAS_ERROR", NULL);
+ ireg++;
+ }
}
-
- /* EPOW Events */
- np = of_find_node_by_path("/event-sources/epow-events");
- if (np != NULL) {
- request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
- "RAS_EPOW");
- request_ras_irqs(np, "interrupts", ras_epow_interrupt,
- "RAS_EPOW");
- of_node_put(np);
+ of_node_put(np);
+
+ if ((np = of_find_node_by_path("/event-sources/epow-events")) &&
+ (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
+ &len))) {
+ for (i=0; i<(len / sizeof(*ireg)); i++) {
+ virq = virt_irq_create_mapping(*(ireg));
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ " number for %s\n", np->full_name);
+ break;
+ }
+ request_irq(irq_offset_up(virq),
+ ras_epow_interrupt, 0,
+ "RAS_EPOW", NULL);
+ ireg++;
+ }
}
+ of_node_put(np);
return 1;
}
__initcall(init_ras_IRQ);
+static struct rtas_error_log log_buf;
+static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
+
/*
* Handle power subsystem events (EPOW).
*
static irqreturn_t
ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
+ struct rtas_error_log log_entry;
+ unsigned int size = sizeof(log_entry);
int status = 0xdeadbeef;
- int state = 0;
- int critical;
- status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
- EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
+ spin_lock(&log_lock);
- if (state > 3)
- critical = 1; /* Time Critical */
- else
- critical = 0;
+ status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
+ 0x500, irq,
+ RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
+ 1, /* Time Critical */
+ __pa(&log_buf), size);
- spin_lock(&ras_log_buf_lock);
+ log_entry = log_buf;
- status = rtas_call(ras_check_exception_token, 6, 1, NULL,
- RAS_VECTOR_OFFSET,
- virt_irq_to_real(irq_offset_down(irq)),
- RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
- critical, __pa(&ras_log_buf), RTAS_ERROR_LOG_MAX);
+ spin_unlock(&log_lock);
- udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status, state);
- printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status, state);
+ udbg_printf("EPOW <0x%lx 0x%x>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_WARNING
+ "EPOW <0x%lx 0x%x>\n",*((unsigned long *)&log_entry), status);
/* format and print the extended information */
- log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
-
- spin_unlock(&ras_log_buf_lock);
+ log_error((char *)&log_entry, ERR_TYPE_RTAS_LOG, 0);
+
return IRQ_HANDLED;
}
static irqreturn_t
ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
- struct rtas_error_log *rtas_elog;
+ struct rtas_error_log log_entry;
+ unsigned int size = sizeof(log_entry);
int status = 0xdeadbeef;
int fatal;
- spin_lock(&ras_log_buf_lock);
+ spin_lock(&log_lock);
- status = rtas_call(ras_check_exception_token, 6, 1, NULL,
- RAS_VECTOR_OFFSET,
- virt_irq_to_real(irq_offset_down(irq)),
- RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
- __pa(&ras_log_buf), RTAS_ERROR_LOG_MAX);
+ status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
+ 0x500, irq,
+ RTAS_INTERNAL_ERROR,
+ 1, /* Time Critical */
+ __pa(&log_buf), size);
- rtas_elog = (struct rtas_error_log *)ras_log_buf;
+ log_entry = log_buf;
- if ((status == 0) && (rtas_elog->severity >= SEVERITY_ERROR_SYNC))
+ spin_unlock(&log_lock);
+
+ if ((status == 0) && (log_entry.severity >= SEVERITY_ERROR_SYNC))
fatal = 1;
else
fatal = 0;
/* format and print the extended information */
- log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
+ log_error((char *)&log_entry, ERR_TYPE_RTAS_LOG, fatal);
if (fatal) {
- udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
- printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
+ udbg_printf("HW Error <0x%lx 0x%x>\n",
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_EMERG
+ "Error: Fatal hardware error <0x%lx 0x%x>\n",
+ *((unsigned long *)&log_entry), status);
#ifndef DEBUG
/* Don't actually power off when debugging so we can test
#endif
} else {
udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
- printk(KERN_WARNING
+ *((unsigned long *)&log_entry), status);
+ printk(KERN_WARNING
"Warning: Recoverable hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
+ *((unsigned long *)&log_entry), status);
}
-
- spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
#include <linux/time.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
/* ****************************************************************** */
/* Declarations */
-static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
-static int ppc_rtas_clock_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_clock_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_progress_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_progress_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_poweron_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_poweron_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-
-static ssize_t ppc_rtas_tone_freq_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v);
-static ssize_t ppc_rtas_tone_volume_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v);
-static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v);
-
-static int sensors_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_sensors_show, NULL);
-}
-
-struct file_operations ppc_rtas_sensors_operations = {
- .open = sensors_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int poweron_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_poweron_show, NULL);
-}
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data);
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos);
+static ssize_t ppc_rtas_rmo_buf_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
struct file_operations ppc_rtas_poweron_operations = {
- .open = poweron_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_poweron_write,
- .release = single_release,
+ .read = ppc_rtas_poweron_read,
+ .write = ppc_rtas_poweron_write
};
-
-static int progress_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_progress_show, NULL);
-}
-
struct file_operations ppc_rtas_progress_operations = {
- .open = progress_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_progress_write,
- .release = single_release,
+ .read = ppc_rtas_progress_read,
+ .write = ppc_rtas_progress_write
};
-static int clock_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_clock_show, NULL);
-}
-
struct file_operations ppc_rtas_clock_operations = {
- .open = clock_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_clock_write,
- .release = single_release,
+ .read = ppc_rtas_clock_read,
+ .write = ppc_rtas_clock_write
};
-static int tone_freq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_tone_freq_show, NULL);
-}
-
struct file_operations ppc_rtas_tone_freq_operations = {
- .open = tone_freq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_tone_freq_write,
- .release = single_release,
+ .read = ppc_rtas_tone_freq_read,
+ .write = ppc_rtas_tone_freq_write
};
-
-static int tone_volume_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_tone_volume_show, NULL);
-}
-
struct file_operations ppc_rtas_tone_volume_operations = {
- .open = tone_volume_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = ppc_rtas_tone_volume_write,
- .release = single_release,
+ .read = ppc_rtas_tone_volume_read,
+ .write = ppc_rtas_tone_volume_write
};
-static int rmo_buf_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ppc_rtas_rmo_buf_show, NULL);
-}
-
-struct file_operations ppc_rtas_rmo_buf_ops = {
- .open = rmo_buf_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static struct file_operations ppc_rtas_rmo_buf_ops = {
+ .read = ppc_rtas_rmo_buf_read,
};
-static int ppc_rtas_find_all_sensors(void);
-static void ppc_rtas_process_sensor(struct seq_file *m,
- struct individual_sensor *s, int state, int error, char *loc);
-static char *ppc_rtas_process_error(int error);
-static void get_location_code(struct seq_file *m,
- struct individual_sensor *s, char *loc);
-static void check_location_string(struct seq_file *m, char *c);
-static void check_location(struct seq_file *m, char *c);
+int ppc_rtas_find_all_sensors (void);
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf);
+char * ppc_rtas_process_error(int error);
+int get_location_code(struct individual_sensor s, char * buf);
+int check_location_string (char *c, char * buf);
+int check_location (char *c, int idx, char * buf);
static int __init proc_rtas_init(void)
{
if (entry)
entry->proc_fops = &ppc_rtas_poweron_operations;
- entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL);
- if (entry)
- entry->proc_fops = &ppc_rtas_sensors_operations;
+ create_proc_read_entry("ppc64/rtas/sensors", S_IRUGO, NULL,
+ ppc_rtas_sensor_read, NULL);
entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO,
NULL);
__initcall(proc_rtas_init);
-static int parse_number(const char __user *p, size_t count, unsigned long *val)
-{
- char buf[40];
- char *end;
-
- if (count > 39)
- return -EINVAL;
-
- if (copy_from_user(buf, p, count))
- return -EFAULT;
-
- buf[count] = 0;
-
- *val = simple_strtoul(buf, &end, 10);
- if (*end && *end != '\n')
- return -EINVAL;
-
- return 0;
-}
-
/* ****************************************************************** */
/* POWER-ON-TIME */
/* ****************************************************************** */
-static ssize_t ppc_rtas_poweron_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
struct rtc_time tm;
unsigned long nowtime;
- int error = parse_number(buf, count, &nowtime);
- if (error)
- return error;
+ char *dest;
+ int error;
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ nowtime = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_poweron_write: Invalid time\n");
+ return count;
+ }
power_on_time = nowtime; /* save the time */
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting poweron time returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_poweron_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
+ int n, sn;
if (power_on_time == 0)
- seq_printf(m, "Power on time not set\n");
+ n = scnprintf(stkbuf,sizeof(stkbuf),"Power on time not set\n");
else
- seq_printf(m, "%lu\n",power_on_time);
- return 0;
+ n = scnprintf(stkbuf,sizeof(stkbuf),"%lu\n",power_on_time);
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* PROGRESS */
/* ****************************************************************** */
-static ssize_t ppc_rtas_progress_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
unsigned long hex;
- if (count >= MAX_LINELENGTH)
- count = MAX_LINELENGTH -1;
- if (copy_from_user(progress_led, buf, count)) { /* save the string */
+ if (count >= MAX_LINELENGTH) count = MAX_LINELENGTH -1;
+ if (copy_from_user (progress_led, buf, count)) { /* save the string */
return -EFAULT;
}
progress_led[count] = 0;
ppc_md.progress ((char *)progress_led, hex);
return count;
- /* clear the line */
- /* ppc_md.progress(" ", 0xffff);*/
+ /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
}
/* ****************************************************************** */
-static int ppc_rtas_progress_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
- if (progress_led)
- seq_printf(m, "%s\n", progress_led);
- return 0;
+ int sn, n = 0;
+ char *tmpbuf;
+
+ if (progress_led == NULL) return 0;
+
+ tmpbuf = kmalloc (MAX_LINELENGTH, GFP_KERNEL);
+ if (!tmpbuf) {
+ printk(KERN_ERR "error: kmalloc failed\n");
+ return -ENOMEM;
+ }
+ n = sprintf (tmpbuf, "%s\n", progress_led);
+
+ sn = strlen (tmpbuf) +1;
+ if (*ppos >= sn) {
+ kfree (tmpbuf);
+ return 0;
+ }
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, tmpbuf + (*ppos), n)) {
+ kfree (tmpbuf);
+ return -EFAULT;
+ }
+ kfree (tmpbuf);
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* CLOCK */
/* ****************************************************************** */
-static ssize_t ppc_rtas_clock_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
struct rtc_time tm;
unsigned long nowtime;
- int error = parse_number(buf, count, &nowtime);
- if (error)
- return error;
+ char *dest;
+ int error;
+
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ nowtime = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_clock_write: Invalid time\n");
+ return count;
+ }
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting the clock returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_clock_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
+ unsigned int year, mon, day, hour, min, sec;
int ret[8];
- int error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
+ int n, sn, error;
+ char stkbuf[40]; /* its small, its on stack */
- if (error) {
+ error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
+
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+
+ if (error != 0){
printk(KERN_WARNING "error: reading the clock returned: %s\n",
ppc_rtas_process_error(error));
- seq_printf(m, "0");
+ n = scnprintf (stkbuf, sizeof(stkbuf), "0");
} else {
- unsigned int year, mon, day, hour, min, sec;
- year = ret[0]; mon = ret[1]; day = ret[2];
- hour = ret[3]; min = ret[4]; sec = ret[5];
- seq_printf(m, "%lu\n",
+ n = scnprintf (stkbuf, sizeof(stkbuf), "%lu\n",
mktime(year, mon, day, hour, min, sec));
}
- return 0;
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* SENSOR STUFF */
/* ****************************************************************** */
-static int ppc_rtas_sensors_show(struct seq_file *m, void *v)
+static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
+ int count, int *eof, void *data)
{
- int i,j;
+ int i,j,n;
int state, error;
+ char *buffer;
int get_sensor_state = rtas_token("get-sensor-state");
- seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n");
- seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n");
- seq_printf(m, "********************************************************\n");
+ if (count < 0)
+ return -EINVAL;
+
+ /* May not be enough */
+ buffer = kmalloc(MAX_LINELENGTH*MAX_SENSORS, GFP_KERNEL);
+
+ if (!buffer)
+ return -ENOMEM;
+
+ memset(buffer, 0, MAX_LINELENGTH*MAX_SENSORS);
+
+ n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ n += sprintf ( buffer+n, "********************************************************\n");
if (ppc_rtas_find_all_sensors() != 0) {
- seq_printf(m, "\nNo sensors are available\n");
- return 0;
+ n += sprintf ( buffer+n, "\nNo sensors are available\n");
+ goto return_string;
}
for (i=0; i<sensors.quant; i++) {
- struct individual_sensor *p = &sensors.sensor[i];
- char rstr[64];
- char *loc;
- int llen, offs;
-
- sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
- loc = (char *) get_property(rtas_node, rstr, &llen);
-
+ j = sensors.sensor[i].quant;
/* A sensor may have multiple instances */
- for (j = 0, offs = 0; j <= p->quant; j++) {
+ while (j >= 0) {
+
error = rtas_call(get_sensor_state, 2, 2, &state,
- p->token, j);
-
- ppc_rtas_process_sensor(m, p, state, error, loc);
- seq_putc(m, '\n');
- if (loc) {
- offs += strlen(loc) + 1;
- loc += strlen(loc) + 1;
- if (offs >= llen)
- loc = NULL;
- }
- }
+ sensors.sensor[i].token,
+ sensors.sensor[i].quant - j);
+
+ n += ppc_rtas_process_sensor(sensors.sensor[i], state,
+ error, buffer+n );
+ n += sprintf (buffer+n, "\n");
+ j--;
+ } /* while */
+ } /* for */
+
+return_string:
+ if (off >= strlen(buffer)) {
+ *eof = 1;
+ kfree(buffer);
+ return 0;
}
- return 0;
+ if (n > strlen(buffer) - off)
+ n = strlen(buffer) - off;
+ if (n > count)
+ n = count;
+ else
+ *eof = 1;
+
+ memcpy(buf, buffer + off, n);
+ *start = buf;
+ kfree(buffer);
+ return n;
}
/* ****************************************************************** */
-static int ppc_rtas_find_all_sensors(void)
+int ppc_rtas_find_all_sensors (void)
{
unsigned int *utmp;
int len, i;
/*
* Builds a string of what rtas returned
*/
-static char *ppc_rtas_process_error(int error)
+char * ppc_rtas_process_error(int error)
{
switch (error) {
case SENSOR_CRITICAL_HIGH:
* Builds a string out of what the sensor said
*/
-static void ppc_rtas_process_sensor(struct seq_file *m,
- struct individual_sensor *s, int state, int error, char *loc)
+int ppc_rtas_process_sensor(struct individual_sensor s, int state,
+ int error, char * buf)
{
/* Defined return vales */
const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t",
int num_states = 0;
int temperature = 0;
int unknown = 0;
+ int n = 0;
/* What kind of sensor do we have here? */
- switch (s->token) {
+ switch (s.token) {
case KEY_SWITCH:
- seq_printf(m, "Key switch:\t");
+ n += sprintf(buf+n, "Key switch:\t");
num_states = sizeof(key_switch) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t", key_switch[state]);
+ n += sprintf(buf+n, "%s\t", key_switch[state]);
have_strings = 1;
}
break;
case ENCLOSURE_SWITCH:
- seq_printf(m, "Enclosure switch:\t");
+ n += sprintf(buf+n, "Enclosure switch:\t");
num_states = sizeof(enclosure_switch) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
enclosure_switch[state]);
have_strings = 1;
}
break;
case THERMAL_SENSOR:
- seq_printf(m, "Temp. (°C/°F):\t");
+ n += sprintf(buf+n, "Temp. (°C/°F):\t");
temperature = 1;
break;
case LID_STATUS:
- seq_printf(m, "Lid status:\t");
+ n += sprintf(buf+n, "Lid status:\t");
num_states = sizeof(lid_status) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t", lid_status[state]);
+ n += sprintf(buf+n, "%s\t", lid_status[state]);
have_strings = 1;
}
break;
case POWER_SOURCE:
- seq_printf(m, "Power source:\t");
+ n += sprintf(buf+n, "Power source:\t");
num_states = sizeof(power_source) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
power_source[state]);
have_strings = 1;
}
break;
case BATTERY_VOLTAGE:
- seq_printf(m, "Battery voltage:\t");
+ n += sprintf(buf+n, "Battery voltage:\t");
break;
case BATTERY_REMAINING:
- seq_printf(m, "Battery remaining:\t");
+ n += sprintf(buf+n, "Battery remaining:\t");
num_states = sizeof(battery_remaining) / sizeof(char *);
if (state < num_states)
{
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
battery_remaining[state]);
have_strings = 1;
}
break;
case BATTERY_PERCENTAGE:
- seq_printf(m, "Battery percentage:\t");
+ n += sprintf(buf+n, "Battery percentage:\t");
break;
case EPOW_SENSOR:
- seq_printf(m, "EPOW Sensor:\t");
+ n += sprintf(buf+n, "EPOW Sensor:\t");
num_states = sizeof(epow_sensor) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t", epow_sensor[state]);
+ n += sprintf(buf+n, "%s\t", epow_sensor[state]);
have_strings = 1;
}
break;
case BATTERY_CYCLESTATE:
- seq_printf(m, "Battery cyclestate:\t");
+ n += sprintf(buf+n, "Battery cyclestate:\t");
num_states = sizeof(battery_cyclestate) /
sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
battery_cyclestate[state]);
have_strings = 1;
}
break;
case BATTERY_CHARGING:
- seq_printf(m, "Battery Charging:\t");
+ n += sprintf(buf+n, "Battery Charging:\t");
num_states = sizeof(battery_charging) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
battery_charging[state]);
have_strings = 1;
}
break;
case IBM_SURVEILLANCE:
- seq_printf(m, "Surveillance:\t");
+ n += sprintf(buf+n, "Surveillance:\t");
break;
case IBM_FANRPM:
- seq_printf(m, "Fan (rpm):\t");
+ n += sprintf(buf+n, "Fan (rpm):\t");
break;
case IBM_VOLTAGE:
- seq_printf(m, "Voltage (mv):\t");
+ n += sprintf(buf+n, "Voltage (mv):\t");
break;
case IBM_DRCONNECTOR:
- seq_printf(m, "DR connector:\t");
+ n += sprintf(buf+n, "DR connector:\t");
num_states = sizeof(ibm_drconnector) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
ibm_drconnector[state]);
have_strings = 1;
}
break;
case IBM_POWERSUPPLY:
- seq_printf(m, "Powersupply:\t");
+ n += sprintf(buf+n, "Powersupply:\t");
break;
case IBM_INTQUEUE:
- seq_printf(m, "Interrupt queue:\t");
+ n += sprintf(buf+n, "Interrupt queue:\t");
num_states = sizeof(ibm_intqueue) / sizeof(char *);
if (state < num_states) {
- seq_printf(m, "%s\t",
+ n += sprintf(buf+n, "%s\t",
ibm_intqueue[state]);
have_strings = 1;
}
break;
default:
- seq_printf(m, "Unknown sensor (type %d), ignoring it\n",
- s->token);
+ n += sprintf(buf+n, "Unknown sensor (type %d), ignoring it\n",
+ s.token);
unknown = 1;
have_strings = 1;
break;
}
if (have_strings == 0) {
if (temperature) {
- seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state));
+ n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
} else
- seq_printf(m, "%10d\t", state);
+ n += sprintf(buf+n, "%10d\t", state);
}
if (unknown == 0) {
- seq_printf(m, "%s\t", ppc_rtas_process_error(error));
- get_location_code(m, s, loc);
+ n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
+ n += get_location_code(s, buf+n);
}
+ return n;
}
/* ****************************************************************** */
-static void check_location(struct seq_file *m, char *c)
+int check_location (char *c, int idx, char * buf)
{
- switch (c[0]) {
+ int n = 0;
+
+ switch (*(c+idx)) {
case LOC_PLANAR:
- seq_printf(m, "Planar #%c", c[1]);
+ n += sprintf ( buf, "Planar #%c", *(c+idx+1));
break;
case LOC_CPU:
- seq_printf(m, "CPU #%c", c[1]);
+ n += sprintf ( buf, "CPU #%c", *(c+idx+1));
break;
case LOC_FAN:
- seq_printf(m, "Fan #%c", c[1]);
+ n += sprintf ( buf, "Fan #%c", *(c+idx+1));
break;
case LOC_RACKMOUNTED:
- seq_printf(m, "Rack #%c", c[1]);
+ n += sprintf ( buf, "Rack #%c", *(c+idx+1));
break;
case LOC_VOLTAGE:
- seq_printf(m, "Voltage #%c", c[1]);
+ n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
break;
case LOC_LCD:
- seq_printf(m, "LCD #%c", c[1]);
+ n += sprintf ( buf, "LCD #%c", *(c+idx+1));
break;
case '.':
- seq_printf(m, "- %c", c[1]);
- break;
+ n += sprintf ( buf, "- %c", *(c+idx+1));
default:
- seq_printf(m, "Unknown location");
+ n += sprintf ( buf, "Unknown location");
break;
}
+ return n;
}
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
* the '.' may be an abbrevation
*/
-static void check_location_string(struct seq_file *m, char *c)
+int check_location_string (char *c, char *buf)
{
- while (*c) {
- if (isalpha(*c) || *c == '.')
- check_location(m, c);
- else if (*c == '/' || *c == '-')
- seq_printf(m, " at ");
- c++;
+ int n=0,i=0;
+
+ while (c[i]) {
+ if (isalpha(c[i]) || c[i] == '.') {
+ n += check_location(c, i, buf+n);
+ }
+ else if (c[i] == '/' || c[i] == '-')
+ n += sprintf(buf+n, " at ");
+ i++;
}
+ return n;
}
/* ****************************************************************** */
-static void get_location_code(struct seq_file *m, struct individual_sensor *s, char *loc)
+int get_location_code(struct individual_sensor s, char * buffer)
{
- if (!loc || !*loc) {
- seq_printf(m, "---");/* does not have a location */
+ char rstr[512], tmp[10], tmp2[10];
+ int n=0, i=0, llen, len;
+ /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
+ char *ret;
+
+ static int pos = 0; /* remember position where buffer was */
+
+ /* construct the sensor number like 0003 */
+ /* fill with zeros */
+ n = sprintf(tmp, "%d", s.token);
+ len = strlen(tmp);
+ while (strlen(tmp) < 4)
+ n += sprintf (tmp+n, "0");
+
+ /* invert the string */
+ while (tmp[i]) {
+ if (i<len)
+ tmp2[4-len+i] = tmp[i];
+ else
+ tmp2[3-i] = tmp[i];
+ i++;
+ }
+ tmp2[4] = '\0';
+
+ sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
+
+ ret = (char *) get_property(rtas_node, rstr, &llen);
+
+ n=0;
+ if (ret == NULL || ret[0] == '\0') {
+ n += sprintf ( buffer+n, "--- ");/* does not have a location */
} else {
- check_location_string(m, loc);
+ char t[50];
+ ret += pos;
+
+ n += check_location_string(ret, buffer + n);
+ n += sprintf ( buffer+n, " ");
+ /* see how many characters we have printed */
+ scnprintf(t, sizeof(t), "%s ", ret);
+
+ pos += strlen(t);
+ if (pos >= llen) pos=0;
}
- seq_putc(m, ' ');
+ return n;
}
/* ****************************************************************** */
/* INDICATORS - Tone Frequency */
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_freq_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
unsigned long freq;
- int error = parse_number(buf, count, &freq);
- if (error)
- return error;
+ char *dest;
+ int error;
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ freq = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
+ return count;
+ }
+ if (freq < 0) freq = 0;
rtas_tone_frequency = freq; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_FREQUENCY, 0, freq);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
- seq_printf(m, "%lu\n", rtas_tone_frequency);
- return 0;
+ int n, sn;
+ char stkbuf[40]; /* its small, its on stack */
+
+ n = scnprintf(stkbuf, 40, "%lu\n", rtas_tone_frequency);
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
/* ****************************************************************** */
/* INDICATORS - Tone Volume */
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_volume_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
{
+ char stkbuf[40]; /* its small, its on stack */
unsigned long volume;
- int error = parse_number(buf, count, &volume);
- if (error)
- return error;
+ char *dest;
+ int error;
- if (volume > 100)
- volume = 100;
+ if (39 < count) count = 39;
+ if (copy_from_user (stkbuf, buf, count)) {
+ return -EFAULT;
+ }
+ stkbuf[count] = 0;
+ volume = simple_strtoul(stkbuf, &dest, 10);
+ if (*dest != '\0' && *dest != '\n') {
+ printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
+ return count;
+ }
+ if (volume < 0) volume = 0;
+ if (volume > 100) volume = 100;
rtas_tone_volume = volume; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_VOLUME, 0, volume);
- if (error)
+ if (error != 0)
printk(KERN_WARNING "error: setting tone volume returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
{
- seq_printf(m, "%lu\n", rtas_tone_volume);
- return 0;
+ int n, sn;
+ char stkbuf[40]; /* its small, its on stack */
+
+ n = scnprintf(stkbuf, 40, "%lu\n", rtas_tone_volume);
+
+ sn = strlen (stkbuf) +1;
+ if (*ppos >= sn)
+ return 0;
+ if (n > sn - *ppos)
+ n = sn - *ppos;
+ if (n > count)
+ n = count;
+ if (copy_to_user (buf, stkbuf + (*ppos), n)) {
+ return -EFAULT;
+ }
+ *ppos += n;
+ return n;
}
#define RMO_READ_BUF_MAX 30
/* RTAS Userspace access */
-static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
+static ssize_t ppc_rtas_rmo_buf_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
- return 0;
+ char kbuf[RMO_READ_BUF_MAX];
+ int n;
+
+ n = sprintf(kbuf, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
+ if (n > count)
+ n = count;
+
+ if (ppos && *ppos != 0)
+ return 0;
+
+ if (copy_to_user(buf, kbuf, n))
+ return -EFAULT;
+
+ if (ppos)
+ *ppos = n;
+
+ return n;
}
#include <asm/delay.h>
#include <asm/uaccess.h>
-struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
+struct flash_block_list_header rtas_firmware_flash_list = {0, 0};
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
if (f->next)
f->next = (struct flash_block_list *)virt_to_abs(f->next);
else
- f->next = NULL;
+ f->next = 0LL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
}
BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
- printk("cpu %u (hwid %u) Ready to die...\n",
+ printk("%u %u Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(rtas_args));
#define DEBUG(A...)
#endif
-static spinlock_t rtasd_log_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait);
if (buf == NULL)
return;
- spin_lock_irqsave(&rtasd_log_lock, s);
+ spin_lock_irqsave(&log_lock, s);
/* get length and increase count */
switch (err_type & ERR_TYPE_MASK) {
break;
case ERR_TYPE_KERNEL_PANIC:
default:
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
return;
}
/* Check to see if we need to or have stopped logging */
if (fatal || no_more_logging) {
no_more_logging = 1;
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
return;
}
else
rtas_log_start += 1;
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
wake_up_interruptible(&rtas_log_wait);
break;
case ERR_TYPE_KERNEL_PANIC:
default:
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
return;
}
return -ENOMEM;
- spin_lock_irqsave(&rtasd_log_lock, s);
+ spin_lock_irqsave(&log_lock, s);
/* if it's 0, then we know we got the last one (the one in NVRAM) */
if (rtas_log_size == 0 && !no_more_logging)
nvram_clear_error_log();
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
if (error)
goto out;
- spin_lock_irqsave(&rtasd_log_lock, s);
+ spin_lock_irqsave(&log_lock, s);
offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK);
memcpy(tmp, &rtas_log_buf[offset], count);
rtas_log_start += 1;
rtas_log_size -= 1;
- spin_unlock_irqrestore(&rtasd_log_lock, s);
+ spin_unlock_irqrestore(&log_lock, s);
error = copy_to_user(buf, tmp, count) ? -EFAULT : count;
out:
else
printk(KERN_ERR "Failed to create error_log proc entry\n");
- if (kernel_thread(rtasd, NULL, CLONE_FS) < 0)
+ if (kernel_thread(rtasd, 0, CLONE_FS) < 0)
printk(KERN_ERR "Failed to start RTAS daemon\n");
return 0;
* ioctls.
*/
-static ssize_t rtc_read(struct file *file, char __user *buf,
+static loff_t rtc_llseek(struct file *file, loff_t offset, int origin);
+
+static ssize_t rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos);
static int rtc_ioctl(struct inode *inode, struct file *file,
* Now all the various file operations that we export.
*/
-static ssize_t rtc_read(struct file *file, char __user *buf,
+static loff_t rtc_llseek(struct file *file, loff_t offset, int origin)
+{
+ return -ESPIPE;
+}
+
+static ssize_t rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
return -EIO;
if (!capable(CAP_SYS_TIME))
return -EACCES;
- if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
+ if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
sizeof(struct rtc_time)))
return -EFAULT;
}
case RTC_EPOCH_READ: /* Read the epoch. */
{
- return put_user (epoch, (unsigned long __user *)arg);
+ return put_user (epoch, (unsigned long *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
default:
return -EINVAL;
}
- return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
return 0;
}
*/
static struct file_operations rtc_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
+ .llseek = rtc_llseek,
.read = rtc_read,
.ioctl = rtc_ioctl,
.open = rtc_open,
return retval;
#ifdef CONFIG_PROC_FS
- if (create_proc_read_entry ("driver/rtc", 0, NULL, rtc_read_proc, NULL) == NULL)
+ if(create_proc_read_entry ("driver/rtc", 0, 0, rtc_read_proc, NULL) == NULL)
misc_deregister(&rtc_dev);
return -ENOMEM;
#endif
void cpu_die(void)
{
local_irq_disable();
- /* Some hardware requires clearing the CPPR, while other hardware does not
- * it is safe either way
- */
- pSeriesLP_cppr_info(0, 0);
rtas_stop_self();
/* Should never get here... */
BUG();
/* Fixup atomic count: it exited inside IRQ handler. */
paca[lcpu].__current->thread_info->preempt_count = 0;
+ /* Fixup SLB round-robin so next segment (kernel) goes in segment 0 */
+ paca[lcpu].stab_next_rr = 0;
/* At boot this is done in prom.c. */
paca[lcpu].hw_cpu_id = pcpu;
}
maxcpus = ireg[num_addr_cell + num_size_cell];
-
- /* Double maxcpus for processors which have SMT capability */
- if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
- maxcpus *= 2;
-
+ /* DRENG need to account for threads here too */
if (maxcpus > NR_CPUS) {
printk(KERN_WARNING
printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(),
atomic_read(&data.started));
- debugger(NULL);
+ debugger(0);
goto out;
}
}
smp_processor_id(),
atomic_read(&data.finished),
atomic_read(&data.started));
- debugger(NULL);
+ debugger(0);
goto out;
}
}
if (smp_ops->give_timebase)
smp_ops->give_timebase();
-
- /* Wait until cpu puts itself in the online map */
- while (!cpu_online(cpu))
- cpu_relax();
-
+ cpu_set(cpu, cpu_online_map);
return 0;
}
#endif
#endif
- spin_lock(&call_lock);
- cpu_set(cpu, cpu_online_map);
- spin_unlock(&call_lock);
-
local_irq_enable();
return cpu_idle(NULL);
#include <asm/naca.h>
#include <asm/cputable.h>
-static int make_ste(unsigned long stab, unsigned long esid,
- unsigned long vsid);
+static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
+static void make_slbe(unsigned long esid, unsigned long vsid, int large,
+ int kernel_segment);
-void slb_initialize(void);
+static inline void slb_add_bolted(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+ unsigned long esid = GET_ESID(VMALLOCBASE);
+ unsigned long vsid = get_kernel_vsid(VMALLOCBASE);
+
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * Bolt in the first vmalloc segment. Since modules end
+ * up there it gets hit very heavily.
+ */
+ get_paca()->stab_next_rr = 1;
+ make_slbe(esid, vsid, 0, 1);
+#endif
+}
/*
* Build an entry for the base kernel segment and put it into
*/
void stab_initialize(unsigned long stab)
{
- unsigned long vsid = get_kernel_vsid(KERNELBASE);
+ unsigned long esid, vsid;
+ int seg0_largepages = 0;
+
+ esid = GET_ESID(KERNELBASE);
+ vsid = get_kernel_vsid(esid << SID_SHIFT);
+
+ if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
+ seg0_largepages = 1;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
- slb_initialize();
+ /* Invalidate the entire SLB & all the ERATS */
+#ifdef CONFIG_PPC_ISERIES
+ asm volatile("isync; slbia; isync":::"memory");
+#else
+ asm volatile("isync":::"memory");
+ asm volatile("slbmte %0,%0"::"r" (0) : "memory");
+ asm volatile("isync; slbia; isync":::"memory");
+ get_paca()->stab_next_rr = 0;
+ make_slbe(esid, vsid, seg0_largepages, 1);
+ asm volatile("isync":::"memory");
+#endif
+
+ slb_add_bolted();
} else {
asm volatile("isync; slbia; isync":::"memory");
- make_ste(stab, GET_ESID(KERNELBASE), vsid);
+ make_ste(stab, esid, vsid);
/* Order update */
asm volatile("sync":::"memory");
* Could not find empty entry, pick one with a round robin selection.
* Search all entries in the two groups.
*/
- castout_entry = get_paca()->stab_rr;
+ castout_entry = get_paca()->stab_next_rr;
for (i = 0; i < 16; i++) {
if (castout_entry < 8) {
global_entry = (esid & 0x1f) << 3;
castout_entry = (castout_entry + 1) & 0xf;
}
- get_paca()->stab_rr = (castout_entry + 1) & 0xf;
+ get_paca()->stab_next_rr = (castout_entry + 1) & 0xf;
/* Modify the old entry to the new value. */
preload_stab(tsk, mm);
}
+
+/*
+ * SLB stuff
+ */
+
+/*
+ * Create a segment buffer entry for the given esid/vsid pair.
+ *
+ * NOTE: A context syncronising instruction is required before and after
+ * this, in the common case we use exception entry and rfid.
+ */
+static void make_slbe(unsigned long esid, unsigned long vsid, int large,
+ int kernel_segment)
+{
+ unsigned long entry, castout_entry;
+ union {
+ unsigned long word0;
+ slb_dword0 data;
+ } esid_data;
+ union {
+ unsigned long word0;
+ slb_dword1 data;
+ } vsid_data;
+ struct paca_struct *lpaca = get_paca();
+
+ /*
+ * We take the next entry, round robin. Previously we tried
+ * to find a free slot first but that took too long. Unfortunately
+ * we dont have any LRU information to help us choose a slot.
+ */
+
+ /*
+ * Never cast out the segment for our kernel stack. Since we
+ * dont invalidate the ERAT we could have a valid translation
+ * for the kernel stack during the first part of exception exit
+ * which gets invalidated due to a tlbie from another cpu at a
+ * non recoverable point (after setting srr0/1) - Anton
+ *
+ * paca Ksave is always valid (even when on the interrupt stack)
+ * so we use that.
+ */
+ castout_entry = lpaca->stab_next_rr;
+ do {
+ entry = castout_entry;
+ castout_entry++;
+ /*
+ * We bolt in the first kernel segment and the first
+ * vmalloc segment.
+ */
+ if (castout_entry >= SLB_NUM_ENTRIES)
+ castout_entry = 2;
+ asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
+ } while (esid_data.data.v &&
+ esid_data.data.esid == GET_ESID(lpaca->kstack));
+
+ lpaca->stab_next_rr = castout_entry;
+
+ /* slbie not needed as the previous mapping is still valid. */
+
+ /*
+ * Write the new SLB entry.
+ */
+ vsid_data.word0 = 0;
+ vsid_data.data.vsid = vsid;
+ vsid_data.data.kp = 1;
+ if (large)
+ vsid_data.data.l = 1;
+ if (kernel_segment)
+ vsid_data.data.c = 1;
+ else
+ vsid_data.data.ks = 1;
+
+ esid_data.word0 = 0;
+ esid_data.data.esid = esid;
+ esid_data.data.v = 1;
+ esid_data.data.index = entry;
+
+ /*
+ * No need for an isync before or after this slbmte. The exception
+ * we enter with and the rfid we exit with are context synchronizing.
+ */
+ asm volatile("slbmte %0,%1" : : "r" (vsid_data), "r" (esid_data));
+}
+
+static inline void __slb_allocate(unsigned long esid, unsigned long vsid,
+ mm_context_t context)
+{
+ int large = 0;
+ int region_id = REGION_ID(esid << SID_SHIFT);
+ unsigned long offset;
+
+ if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) {
+ if (region_id == KERNEL_REGION_ID)
+ large = 1;
+ else if (region_id == USER_REGION_ID)
+ large = in_hugepage_area(context, esid << SID_SHIFT);
+ }
+
+ make_slbe(esid, vsid, large, region_id != USER_REGION_ID);
+
+ if (region_id != USER_REGION_ID)
+ return;
+
+ offset = __get_cpu_var(stab_cache_ptr);
+ if (offset < NR_STAB_CACHE_ENTRIES)
+ __get_cpu_var(stab_cache[offset++]) = esid;
+ else
+ offset = NR_STAB_CACHE_ENTRIES+1;
+ __get_cpu_var(stab_cache_ptr) = offset;
+}
+
+/*
+ * Allocate a segment table entry for the given ea.
+ */
+int slb_allocate(unsigned long ea)
+{
+ unsigned long vsid, esid;
+ mm_context_t context;
+
+ /* Check for invalid effective addresses. */
+ if (unlikely(!IS_VALID_EA(ea)))
+ return 1;
+
+ /* Kernel or user address? */
+ if (REGION_ID(ea) >= KERNEL_REGION_ID) {
+ context = KERNEL_CONTEXT(ea);
+ vsid = get_kernel_vsid(ea);
+ } else {
+ if (unlikely(!current->mm))
+ return 1;
+
+ context = current->mm->context;
+ vsid = get_vsid(context.id, ea);
+ }
+
+ esid = GET_ESID(ea);
+#ifndef CONFIG_PPC_ISERIES
+ BUG_ON((esid << SID_SHIFT) == VMALLOCBASE);
+#endif
+ __slb_allocate(esid, vsid, context);
+
+ return 0;
+}
+
+/*
+ * preload some userspace segments into the SLB.
+ */
+static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
+{
+ unsigned long pc = KSTK_EIP(tsk);
+ unsigned long stack = KSTK_ESP(tsk);
+ unsigned long unmapped_base;
+ unsigned long pc_esid = GET_ESID(pc);
+ unsigned long stack_esid = GET_ESID(stack);
+ unsigned long unmapped_base_esid;
+ unsigned long vsid;
+
+ if (test_tsk_thread_flag(tsk, TIF_32BIT))
+ unmapped_base = TASK_UNMAPPED_BASE_USER32;
+ else
+ unmapped_base = TASK_UNMAPPED_BASE_USER64;
+
+ unmapped_base_esid = GET_ESID(unmapped_base);
+
+ if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
+ return;
+ vsid = get_vsid(mm->context.id, pc);
+ __slb_allocate(pc_esid, vsid, mm->context);
+
+ if (pc_esid == stack_esid)
+ return;
+
+ if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
+ return;
+ vsid = get_vsid(mm->context.id, stack);
+ __slb_allocate(stack_esid, vsid, mm->context);
+
+ if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
+ return;
+
+ if (!IS_VALID_EA(unmapped_base) ||
+ (REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
+ return;
+ vsid = get_vsid(mm->context.id, unmapped_base);
+ __slb_allocate(unmapped_base_esid, vsid, mm->context);
+}
+
+/* Flush all user entries from the segment table of the current processor. */
+void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
+{
+ unsigned long offset = __get_cpu_var(stab_cache_ptr);
+ union {
+ unsigned long word0;
+ slb_dword0 data;
+ } esid_data;
+
+ if (offset <= NR_STAB_CACHE_ENTRIES) {
+ int i;
+ asm volatile("isync" : : : "memory");
+ for (i = 0; i < offset; i++) {
+ esid_data.word0 = 0;
+ esid_data.data.esid = __get_cpu_var(stab_cache[i]);
+ BUG_ON(esid_data.data.esid == GET_ESID(VMALLOCBASE));
+ asm volatile("slbie %0" : : "r" (esid_data));
+ }
+ asm volatile("isync" : : : "memory");
+ } else {
+ asm volatile("isync; slbia; isync" : : : "memory");
+ slb_add_bolted();
+ }
+
+ /* Workaround POWER5 < DD2.1 issue */
+ if (offset == 1 || offset > NR_STAB_CACHE_ENTRIES) {
+ /*
+ * flush segment in EEH region, we dont normally access
+ * addresses in this region.
+ */
+ esid_data.word0 = 0;
+ esid_data.data.esid = EEH_REGION_ID;
+ asm volatile("slbie %0" : : "r" (esid_data));
+ }
+
+ __get_cpu_var(stab_cache_ptr) = 0;
+
+ preload_slb(tsk, mm);
+}
unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
return sprintf(buf, "%lx\n", val); \
} \
-static ssize_t __attribute_used__ \
- store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
+static ssize_t store_##NAME(struct sys_device *dev, const char *buf, \
+ size_t count) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
unsigned long val; \
viodev->dev.platform_data = of_node_get(of_node);
viodev->irq = NO_IRQ;
- irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
+ irq_p = (unsigned int *)get_property(of_node, "interrupts", 0);
if (irq_p) {
int virq = virt_irq_create_mapping(*irq_p);
if (virq == NO_IRQ) {
#include <asm/naca.h>
#include <asm/rtas.h>
#include <asm/xics.h>
+#include <asm/ppcdebug.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
val64);
}
-void pSeriesLP_cppr_info(int n_cpu, u8 value)
+static void pSeriesLP_cppr_info(int n_cpu, u8 value)
{
unsigned long lpar_rc;
#ifdef CONFIG_SMP
static int get_irq_server(unsigned int irq)
{
+ cpumask_t cpumask = irq_affinity[irq];
+ cpumask_t tmp = CPU_MASK_NONE;
unsigned int server;
#ifdef CONFIG_IRQ_ALL_CPUS
/* For the moment only implement delivery to all cpus or one cpu */
if (smp_threads_ready) {
- cpumask_t cpumask = irq_affinity[irq];
- cpumask_t tmp = CPU_MASK_NONE;
if (cpus_equal(cpumask, CPU_MASK_ALL)) {
server = default_distrib_server;
} else {
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_set_xive "
+ printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
"returned %x\n", irq, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_int_on "
+ printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
"returned %x\n", irq, call_status);
return;
}
call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_real_irq: irq=%d: "
+ printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
"ibm_int_off returned %x\n", irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_irq: irq=%d: ibm_set_xive(0xff)"
+ printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
" returned %x\n", irq, call_status);
return;
}
}
}
+extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
+
int xics_get_irq(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
if (irq == NO_IRQ)
irq = real_irq_to_virt_slowpath(vec);
if (irq == NO_IRQ) {
- printk(KERN_ERR "Interrupt %d (real) is invalid,"
+ printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
" disabling it.\n", vec);
xics_disable_real_irq(vec);
} else
#ifdef CONFIG_SMP
+extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
+
irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
ibm_int_off = rtas_token("ibm,int-off");
np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
- if (!np)
- panic("xics_init_IRQ: can't find interrupt presentation");
-
+ if (!np) {
+ printk(KERN_WARNING "Can't find Interrupt Presentation\n");
+ udbg_printf("Can't find Interrupt Presentation\n");
+ while (1);
+ }
nextnode:
- ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
+ ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
if (ireg) {
/*
* set node starting index for this node
}
ireg = (uint *)get_property(np, "reg", &ilen);
- if (!ireg)
- panic("xics_init_IRQ: can't find interrupt reg property");
+ if (!ireg) {
+ printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
+ udbg_printf("Can't find Interrupt Reg Property\n");
+ while (1);
+ }
while (ilen) {
inodes[indx].addr = (unsigned long long)*ireg++ << 32;
np = of_find_node_by_type(NULL, "interrupt-controller");
if (!np) {
- printk(KERN_WARNING "xics: no ISA interrupt controller\n");
+ printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
} else {
- ireg = (uint *) get_property(np, "interrupts", NULL);
- if (!ireg)
- panic("xics_init_IRQ: can't find ISA interrupts property");
-
+ ireg = (uint *) get_property(np, "interrupts", 0);
+ if (!ireg) {
+ printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
+ udbg_printf("Can't find ISA Interrupts Property\n");
+ while (1);
+ }
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade
= virt_irq_create_mapping(xics_irq_8259_cascade_real);
xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
_PAGE_NO_CACHE);
#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_PSERIES
+ /* actually iSeries does not use any of xics...but it has link dependencies
+ * for now, except this new one...
+ */
} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
ops = &pSeriesLP_ops;
+#endif
}
xics_8259_pic.enable = i8259_pic.enable;
if (naca->interrupt_controller == IC_PPC_XIC &&
xics_irq_8259_cascade != -1) {
if (request_irq(irq_offset_up(xics_irq_8259_cascade),
- no_action, 0, "8259 cascade", NULL))
- printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
- "cascade\n");
+ no_action, 0, "8259 cascade", 0))
+ printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
i8259_init();
}
return 0;
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
- "IPI", NULL);
+ "IPI", 0);
get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
}
#endif
irq, newmask, xics_status[1]);
if (status) {
- printk(KERN_ERR "xics_set_affinity: irq=%d ibm,set-xive "
+ printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
"returns %d\n", irq, status);
return;
}
int set_indicator = rtas_token("set-indicator");
const unsigned int giqs = 9005UL; /* Global Interrupt Queue Server */
int status = 0;
- unsigned int irq, virq, cpu = smp_processor_id();
+ unsigned int irq, cpu = smp_processor_id();
+ int xics_status[2];
+ unsigned long flags;
BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
ops->cppr_info(cpu, DEFAULT_PRIORITY);
iosync();
- for_each_irq(virq) {
- irq_desc_t *desc;
- int xics_status[2];
- unsigned long flags;
-
- /* We cant set affinity on ISA interrupts */
- if (virq < irq_offset_value())
- continue;
-
- desc = get_irq_desc(virq);
- irq = virt_irq_to_real(irq_offset_down(virq));
+ printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
+ for_each_irq(irq) {
+ irq_desc_t *desc = get_irq_desc(irq);
/* We need to get IPIs still. */
- if (irq == XICS_IPI || irq == NO_IRQ)
+ if (irq_offset_down(irq) == XICS_IPI)
continue;
/* We only need to migrate enabled IRQS */
if (status) {
printk(KERN_ERR "migrate_irqs_away: irq=%d "
"ibm,get-xive returns %d\n",
- virq, status);
+ irq, status);
goto unlock;
}
goto unlock;
printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
- virq, cpu);
+ irq, cpu);
/* Reset affinity to all cpus */
xics_status[0] = default_distrib_server;
- status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
- xics_status[0], xics_status[1]);
+ status = rtas_call(ibm_set_xive, 3, 1, NULL,
+ irq, xics_status[0], xics_status[1]);
if (status)
- printk(KERN_ERR "migrate_irqs_away: irq=%d "
+ printk(KERN_ERR "migrate_irqs_away irq=%d "
"ibm,set-xive returns %d\n",
- virq, status);
+ irq, status);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
+
}
#endif
EXTRA_CFLAGS += -mno-minimal-toc
-obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o slb_low.o slb.o mmap.o
+obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
unsigned long is_write = error_code & 0x02000000;
unsigned long trap = TRAP(regs);
- BUG_ON((trap == 0x380) || (trap == 0x480));
-
- if (trap == 0x300) {
+ if (trap == 0x300 || trap == 0x380) {
if (debugger_fault_handler(regs))
return 0;
}
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (!user_mode(regs) && (address >= TASK_SIZE))
+ if (!user_mode(regs) && (trap == 0x380 || address >= TASK_SIZE))
return SIGSEGV;
if (error_code & 0x00400000) {
struct mm_struct *mm;
pte_t *ptep;
int ret;
+ int cpu;
int user_region = 0;
int local = 0;
cpumask_t tmp;
if (pgdir == NULL)
return 1;
- tmp = cpumask_of_cpu(smp_processor_id());
+ cpu = get_cpu();
+ tmp = cpumask_of_cpu(cpu);
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1;
ret = hash_huge_page(mm, access, ea, vsid, local);
else {
ptep = find_linux_pte(pgdir, ea);
- if (ptep == NULL)
+ if (ptep == NULL) {
+ put_cpu();
return 1;
+ }
ret = __hash_page(ea, access, vsid, ptep, trap, local);
}
+ put_cpu();
return ret;
}
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
- max_pfn = max_low_pfn;
-
/* add all physical memory to the bootmem map. Also find the first */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ max_pfn = max_low_pfn;
#ifdef CONFIG_DISCONTIGMEM
{
totalram_pages += free_all_bootmem();
- for (addr = KERNELBASE; addr < (unsigned long)__va(lmb_end_of_DRAM());
+ for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
addr += PAGE_SIZE) {
if (!PageReserved(virt_to_page(addr)))
continue;
void *pgdir;
pte_t *ptep;
int local = 0;
+ int cpu;
cpumask_t tmp;
- unsigned long flags;
/* handle i-cache coherency */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) &&
vsid = get_vsid(vma->vm_mm->context.id, ea);
- local_irq_save(flags);
- tmp = cpumask_of_cpu(smp_processor_id());
+ cpu = get_cpu();
+ tmp = cpumask_of_cpu(cpu);
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
- local_irq_restore(flags);
+ put_cpu();
}
void * reserve_phb_iospace(unsigned long size)
+++ /dev/null
-/*
- * linux/arch/ppc64/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_32BIT))
- return 1;
-
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
- max_pfn = max_low_pfn;
if (parse_numa_properties())
setup_nonnuma();
+++ /dev/null
-/*
- * PowerPC64 SLB support.
- *
- * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
- * Based on earlier code writteh by:
- * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
- * Copyright (c) 2001 Dave Engebretsen
- * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <asm/paca.h>
-#include <asm/naca.h>
-#include <asm/cputable.h>
-
-extern void slb_allocate(unsigned long ea);
-
-static inline void create_slbe(unsigned long ea, unsigned long vsid,
- unsigned long flags, unsigned long entry)
-{
- ea = (ea & ESID_MASK) | SLB_ESID_V | entry;
- vsid = (vsid << SLB_VSID_SHIFT) | flags;
- asm volatile("slbmte %0,%1" :
- : "r" (vsid), "r" (ea)
- : "memory" );
-}
-
-static void slb_add_bolted(void)
-{
-#ifndef CONFIG_PPC_ISERIES
- WARN_ON(!irqs_disabled());
-
- /* If you change this make sure you change SLB_NUM_BOLTED
- * appropriately too */
-
- /* Slot 1 - first VMALLOC segment
- * Since modules end up there it gets hit very heavily.
- */
- create_slbe(VMALLOCBASE, get_kernel_vsid(VMALLOCBASE),
- SLB_VSID_KERNEL, 1);
-
- asm volatile("isync":::"memory");
-#endif
-}
-
-/* Flush all user entries from the segment table of the current processor. */
-void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
-{
- unsigned long offset = get_paca()->slb_cache_ptr;
- unsigned long esid_data;
- unsigned long pc = KSTK_EIP(tsk);
- unsigned long stack = KSTK_ESP(tsk);
- unsigned long unmapped_base;
-
- if (offset <= SLB_CACHE_ENTRIES) {
- int i;
- asm volatile("isync" : : : "memory");
- for (i = 0; i < offset; i++) {
- esid_data = (unsigned long)get_paca()->slb_cache[i]
- << SID_SHIFT;
- asm volatile("slbie %0" : : "r" (esid_data));
- }
- asm volatile("isync" : : : "memory");
- } else {
- asm volatile("isync; slbia; isync" : : : "memory");
- slb_add_bolted();
- }
-
- /* Workaround POWER5 < DD2.1 issue */
- if (offset == 1 || offset > SLB_CACHE_ENTRIES) {
- /* flush segment in EEH region, we shouldn't ever
- * access addresses in this region. */
- asm volatile("slbie %0" : : "r"(EEHREGIONBASE));
- }
-
- get_paca()->slb_cache_ptr = 0;
- get_paca()->context = mm->context;
-
- /*
- * preload some userspace segments into the SLB.
- */
- if (test_tsk_thread_flag(tsk, TIF_32BIT))
- unmapped_base = TASK_UNMAPPED_BASE_USER32;
- else
- unmapped_base = TASK_UNMAPPED_BASE_USER64;
-
- if (pc >= KERNELBASE)
- return;
- slb_allocate(pc);
-
- if (GET_ESID(pc) == GET_ESID(stack))
- return;
-
- if (stack >= KERNELBASE)
- return;
- slb_allocate(stack);
-
- if ((GET_ESID(pc) == GET_ESID(unmapped_base))
- || (GET_ESID(stack) == GET_ESID(unmapped_base)))
- return;
-
- if (unmapped_base >= KERNELBASE)
- return;
- slb_allocate(unmapped_base);
-}
-
-void slb_initialize(void)
-{
-#ifdef CONFIG_PPC_ISERIES
- asm volatile("isync; slbia; isync":::"memory");
-#else
- unsigned long flags = SLB_VSID_KERNEL;
-
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
- if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
- flags |= SLB_VSID_L;
-
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
- asm volatile("isync; slbia; isync":::"memory");
- create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE),
- flags, 0);
-
-#endif
- slb_add_bolted();
- get_paca()->stab_rr = SLB_NUM_BOLTED;
-}
+++ /dev/null
-/*
- * arch/ppc64/mm/slb_low.S
- *
- * Low-level SLB routines
- *
- * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
- *
- * Based on earlier C version:
- * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
- * Copyright (c) 2001 Dave Engebretsen
- * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/ppc_asm.h>
-#include <asm/offsets.h>
-#include <asm/cputable.h>
-
-/* void slb_allocate(unsigned long ea);
- *
- * Create an SLB entry for the given EA (user or kernel).
- * r3 = faulting address, r13 = PACA
- * r9, r10, r11 are clobbered by this function
- * No other registers are examined or changed.
- */
-_GLOBAL(slb_allocate)
- /*
- * First find a slot, round robin. Previously we tried to find
- * a free slot first but that took too long. Unfortunately we
- * dont have any LRU information to help us choose a slot.
- */
- ld r10,PACASTABRR(r13)
-3:
- addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
-
- blt+ 4f
- li r10,SLB_NUM_BOLTED
-
- /*
- * Never cast out the segment for our kernel stack. Since we
- * dont invalidate the ERAT we could have a valid translation
- * for the kernel stack during the first part of exception exit
- * which gets invalidated due to a tlbie from another cpu at a
- * non recoverable point (after setting srr0/1) - Anton
- */
-4: slbmfee r11,r10
- srdi r11,r11,27
- /*
- * Use paca->ksave as the value of the kernel stack pointer,
- * because this is valid at all times.
- * The >> 27 (rather than >> 28) is so that the LSB is the
- * valid bit - this way we check valid and ESID in one compare.
- * In order to completely close the tiny race in the context
- * switch (between updating r1 and updating paca->ksave),
- * we check against both r1 and paca->ksave.
- */
- srdi r9,r1,27
- ori r9,r9,1 /* mangle SP for later compare */
- cmpd r11,r9
- beq- 3b
- ld r9,PACAKSAVE(r13)
- srdi r9,r9,27
- ori r9,r9,1
- cmpd r11,r9
- beq- 3b
-
- std r10,PACASTABRR(r13)
-
- /* r3 = faulting address, r10 = entry */
-
- srdi r9,r3,60 /* get region */
- srdi r3,r3,28 /* get esid */
- cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
-
- /* r9 = region, r3 = esid, cr7 = <>KERNELBASE */
-
- rldicr. r11,r3,32,16
- bne- 8f /* invalid ea bits set */
- addi r11,r9,-1
- cmpldi r11,0xb
- blt- 8f /* invalid region */
-
- /* r9 = region, r3 = esid, r10 = entry, cr7 = <>KERNELBASE */
-
- blt cr7,0f /* user or kernel? */
-
- /* kernel address */
- li r11,SLB_VSID_KERNEL
-BEGIN_FTR_SECTION
- bne cr7,9f
- li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
- b 9f
-
-0: /* user address */
- li r11,SLB_VSID_USER
-#ifdef CONFIG_HUGETLB_PAGE
-BEGIN_FTR_SECTION
- /* check against the hugepage ranges */
- cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT)
- bge 6f /* >= TASK_HPAGE_END */
- cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT)
- bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */
- cmpldi r3,16
- bge 6f /* 4GB..TASK_HPAGE_BASE */
-
- lhz r9,PACAHTLBSEGS(r13)
- srd r9,r9,r3
- andi. r9,r9,1
- beq 6f
-
-5: /* this is a hugepage user address */
- li r11,(SLB_VSID_USER|SLB_VSID_L)
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-#endif /* CONFIG_HUGETLB_PAGE */
-
-6: ld r9,PACACONTEXTID(r13)
-
-9: /* r9 = "context", r3 = esid, r11 = flags, r10 = entry */
-
- rldimi r9,r3,15,0 /* r9= VSID ordinal */
-
-7: rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
- oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
-
- /* r9 = ordinal, r3 = esid, r11 = flags, r10 = esid_data */
-
- li r3,VSID_RANDOMIZER@higher
- sldi r3,r3,32
- oris r3,r3,VSID_RANDOMIZER@h
- ori r3,r3,VSID_RANDOMIZER@l
-
- mulld r9,r3,r9 /* r9 = ordinal * VSID_RANDOMIZER */
- clrldi r9,r9,28 /* r9 &= VSID_MASK */
- sldi r9,r9,SLB_VSID_SHIFT /* r9 <<= SLB_VSID_SHIFT */
- or r9,r9,r11 /* r9 |= flags */
-
- /* r9 = vsid_data, r10 = esid_data, cr7 = <>KERNELBASE */
-
- /*
- * No need for an isync before or after this slbmte. The exception
- * we enter with and the rfid we exit with are context synchronizing.
- */
- slbmte r9,r10
-
- bgelr cr7 /* we're done for kernel addresses */
-
- /* Update the slb cache */
- lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
- cmpldi r3,SLB_CACHE_ENTRIES
- bge 1f
-
- /* still room in the slb cache */
- sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
- rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
- add r11,r11,r13 /* r11 = (u16 *)paca + offset */
- sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
- addi r3,r3,1 /* offset++ */
- b 2f
-1: /* offset >= SLB_CACHE_ENTRIES */
- li r3,SLB_CACHE_ENTRIES+1
-2:
- sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
- blr
-
-8: /* invalid EA */
- li r9,0 /* 0 VSID ordinal -> BAD_VSID */
- li r11,SLB_VSID_USER /* flags don't much matter */
- b 7b
If unsure, say N.
-config QDIO_DEBUG
- bool "Extended debugging information"
- depends on QDIO
- help
- Say Y here to get extended debugging output in /proc/s390dbf/qdio...
- Warning: this option reduces the performance of the QDIO module.
-
- If unsure, say N.
-
comment "Misc"
config PREEMPT
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/smp.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/page-flags.h>
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
static int appldata_interval_handler(ctl_table *ctl, int write,
struct file *filp,
void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ size_t *lenp);
static struct ctl_table_header *appldata_sysctl_header;
static struct ctl_table appldata_table[] = {
*/
static int
appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int len;
char buf[2];
- if (!*lenp || *ppos) {
+ if (!*lenp || filp->f_pos) {
*lenp = 0;
return 0;
}
spin_unlock(&appldata_timer_lock);
out:
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
*/
static int
appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int len, interval;
char buf[16];
- if (!*lenp || *ppos) {
+ if (!*lenp || filp->f_pos) {
*lenp = 0;
return 0;
}
interval);
out:
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
*/
static int
appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
struct appldata_ops *ops = NULL, *tmp_ops;
int rc, len, found;
}
spin_unlock_bh(&appldata_ops_lock);
- if (!*lenp || *ppos) {
+ if (!*lenp || filp->f_pos) {
*lenp = 0;
module_put(ops->owner);
return 0;
spin_unlock_bh(&appldata_ops_lock);
out:
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
module_put(ops->owner);
return 0;
}
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
CONFIG_MACHCHK_WARNING=y
CONFIG_QDIO=y
# CONFIG_QDIO_PERF_STATS is not set
-# CONFIG_QDIO_DEBUG is not set
#
# Misc
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
# CONFIG_NET_SCH_HTB is not set
# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_CSZ=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
-# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DELAY is not set
# CONFIG_NET_SCH_INGRESS is not set
CONFIG_NET_QOS=y
CONFIG_NET_ESTIMATOR=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+# CONFIG_FAT_FS is not set
# CONFIG_NTFS_FS is not set
#
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES_GENERIC is not set
+# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
#
# Library routines
#
-# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
# CONFIG_CRC32 is not set
# CONFIG_LIBCRC32C is not set
} _sigev_un;
};
-extern int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from);
-extern int copy_siginfo_from_user32(siginfo_t *to, siginfo_t32 __user *from);
-
#endif /* _ASM_S390X_S390_H */
return err;
}
-int copy_siginfo_from_user32(siginfo_t *to, siginfo_t32 __user *from)
-{
- int err;
- u32 tmp;
-
- if (!access_ok (VERIFY_READ, from, sizeof(siginfo_t32)))
- return -EFAULT;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- if (from->si_code < 0)
- err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (from->si_code >> 16) {
- case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
- case __SI_MESGQ >> 16:
- err |= __get_user(to->si_int, &from->si_int);
- /* fallthrough */
- case __SI_KILL >> 16:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- break;
- case __SI_CHLD >> 16:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_utime, &from->si_utime);
- err |= __get_user(to->si_stime, &from->si_stime);
- err |= __get_user(to->si_status, &from->si_status);
- break;
- case __SI_FAULT >> 16:
- err |= __get_user(tmp, &from->si_addr);
- to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN);
- break;
- case __SI_POLL >> 16:
- case __SI_TIMER >> 16:
- err |= __get_user(to->si_band, &from->si_band);
- err |= __get_user(to->si_fd, &from->si_fd);
- break;
- default:
- break;
- }
- }
- return err;
-}
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE savearea
- stm %r12,%r15,\savearea
- l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
+ .macro SAVE_ALL_BASE psworg,savearea,sync
+ stm %r12,%r15,\savearea
+ l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm
- .macro SAVE_ALL psworg,savearea,sync
- la %r12,\psworg
+ .macro CLEANUP_SAVE_ALL_BASE psworg,savearea,sync
+ l %r1,SP_PSW+4(%r15)
+ cli 1(%r1),0xcf
+ bne BASED(0f)
+ mvc \savearea(16),SP_R12(%r15)
+0: st %r13,SP_R13(%r15)
+ .endm
+
+ .macro SAVE_ALL psworg,savearea,sync
.if \sync
- tm \psworg+1,0x01 # test problem state bit
- bz BASED(2f) # skip stack setup save
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ tm \psworg+1,0x01 # test problem state bit
+ bz BASED(1f) # skip stack setup save
+ l %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else
- tm \psworg+1,0x01 # test problem state bit
- bnz BASED(1f) # from user -> load async stack
- clc \psworg+4(4),BASED(.Lcritical_end)
- bhe BASED(0f)
- clc \psworg+4(4),BASED(.Lcritical_start)
- bl BASED(0f)
- l %r14,BASED(.Lcleanup_critical)
- basr %r14,%r14
- tm 0(%r12),0x01 # retest problem state after cleanup
- bnz BASED(1f)
-0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
- slr %r14,%r15
+ tm \psworg+1,0x01 # test problem state bit
+ bnz BASED(0f) # from user -> load async stack
+ l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
+ slr %r14,%r15
sra %r14,13
- be BASED(2f)
-1: l %r15,__LC_ASYNC_STACK
+ be BASED(1f)
+0: l %r15,__LC_ASYNC_STACK
.endif
-2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
- mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
- la %r12,\psworg
- st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- icm %r12,12,__LC_SVC_ILC
- stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- st %r12,SP_ILC(%r15)
- mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
- la %r12,0
- st %r12,0(%r15) # clear back chain
+1: s %r15,BASED(.Lc_spsize) # make room for registers & psw
+ l %r14,BASED(.L\psworg)
+ slr %r12,%r12
+ icm %r14,12,__LC_SVC_ILC
+ stm %r0,%r11,SP_R0(%r15) # store gprs 0-12 to kernel stack
+ st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+ mvc SP_R12(16,%r15),\savearea # move R13-R15 to stack
+ mvc SP_PSW(8,%r15),\psworg # move user PSW to stack
+ st %r14,SP_ILC(%r15)
+ st %r12,0(%r15) # clear back chain
+ .endm
+
+ .macro CLEANUP_SAVE_ALL psworg,savearea,sync
+ l %r1,\savearea+12
+ .if \sync
+ tm \psworg+1,0x01
+ bz BASED(1f)
+ l %r1,__LC_KERNEL_STACK
+ .else
+ tm \psworg+1,0x01
+ bnz BASED(0f)
+ l %r0,__LC_ASYNC_STACK
+ slr %r0,%r1
+ sra %r0,13
+ bz BASED(1f)
+0: l %r1,__LC_ASYNC_STACK
+ .endif
+1: s %r1,BASED(.Lc_spsize)
+ st %r1,SP_R15(%r15)
+ l %r0,BASED(.L\psworg)
+ xc SP_R12(4,%r15),SP_R12(%r15)
+ icm %r0,12,__LC_SVC_ILC
+ st %r0,SP_R14(%r15)
+ mvc SP_R0(48,%r1),SP_R0(%r15)
+ mvc SP_ORIG_R2(4,%r1),SP_R2(%r15)
+ mvc SP_R12(16,%r1),\savearea
+ mvc SP_PSW(8,%r1),\psworg
+ st %r0,SP_ILC(%r1)
+ xc 0(4,%r1),0(%r1)
.endm
- .macro RESTORE_ALL sync
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
- .if !\sync
- ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
- .endif
- lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- lpsw __LC_RETURN_PSW # back to caller
+ .macro RESTORE_ALL # system exit macro
+ mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lpsw __LC_RETURN_PSW # back to caller
+ .endm
+
+ .macro CLEANUP_RESTORE_ALL
+ l %r1,SP_PSW+4(%r15)
+ cli 0(%r1),0x82
+ bne BASED(0f)
+ mvc SP_PSW(8,%r15),__LC_RETURN_PSW
+ b BASED(1f)
+0: l %r1,SP_R15(%r15)
+ mvc SP_PSW(8,%r15),SP_PSW(%r1)
+ mvc SP_R0(64,%r15),SP_R0(%r1)
+1:
+ .endm
+
+ .macro GET_THREAD_INFO
+ l %r9,__LC_THREAD_INFO
+ .endm
+
+ .macro CHECK_CRITICAL
+ tm SP_PSW+1(%r15),0x01 # test problem state bit
+ bnz BASED(0f) # from user -> not critical
+ clc SP_PSW+4(4,%r15),BASED(.Lcritical_end)
+ bnl BASED(0f)
+ clc SP_PSW+4(4,%r15),BASED(.Lcritical_start)
+ bl BASED(0f)
+ l %r1,BASED(.Lcleanup_critical)
+ basr %r14,%r1
+0:
.endm
/*
.globl system_call
system_call:
- SAVE_ALL_BASE __LC_SAVE_AREA
+ SAVE_ALL_BASE __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
+sysc_enter:
+ GET_THREAD_INFO # load pointer to task_struct to R9
sysc_do_svc:
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
sla %r7,2 # *4 and test for svc 0
bnz BASED(sysc_nr_ok) # svc number > 0
# svc 0: system call number in %r1
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL 1
+ RESTORE_ALL
#
# recheck if there is more work to do
#
sysc_work_loop:
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bz BASED(sysc_leave) # there is no work to do
#
.globl ret_from_fork
ret_from_fork:
l %r13,__LC_SVC_NEW_PSW+4
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO # load pointer to task_struct to R9
l %r1,BASED(.Lschedtail)
basr %r14,%r1
stosm 24(%r15),0x03 # reenable interrupts
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- SAVE_ALL_BASE __LC_SAVE_AREA
+ SAVE_ALL_BASE __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3
pgm_do_call:
l %r7,BASED(.Ljump_table)
sll %r8,2
+ GET_THREAD_INFO
l %r7,0(%r8,%r7) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
la %r14,BASED(sysc_return)
clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
be BASED(pgm_svcper)
# no interesting special case, ignore PER event
- lm %r12,%r15,__LC_SAVE_AREA
+ lm %r13,%r15,__LC_SAVE_AREA
lpsw 0x28
#
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO # load pointer to task_struct to R9
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
.globl io_int_handler
io_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+16
+ SAVE_ALL_BASE __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ stck __LC_INT_CLOCK
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to standard irq handler
tm __TI_flags+3(%r9),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_PREEMPT
io_preempt:
l %r1,BASED(.Lschedule)
basr %r14,%r1 # call schedule
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
xc __TI_precount(4,%r9),__TI_precount(%r9)
b BASED(io_resume_loop)
#endif
stosm 24(%r15),0x03 # reenable interrupts
basr %r14,%r1 # call scheduler
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+3(%r9),_TIF_WORK_INT
bz BASED(io_leave) # there is no work to do
b BASED(io_work_loop)
.globl ext_int_handler
ext_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+16
+ SAVE_ALL_BASE __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ stck __LC_INT_CLOCK
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
la %r2,SP_PTREGS(%r15) # address of register-save area
lh %r3,__LC_EXT_INT_CODE # get interruption code
l %r1,BASED(.Ldo_extint)
.globl mcck_int_handler
mcck_int_handler:
- SAVE_ALL_BASE __LC_SAVE_AREA+32
+ SAVE_ALL_BASE __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
l %r1,BASED(.Ls390_mcck)
basr %r14,%r1 # call machine check handler
mcck_return:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_SMP
/*
restart_go:
#endif
-cleanup_table_system_call:
- .long system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_return:
- .long sysc_return + 0x80000000, sysc_leave + 0x80000000
-cleanup_table_sysc_leave:
- .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
-cleanup_table_sysc_work_loop:
- .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
+cleanup_table:
+ .long system_call, sysc_enter, cleanup_sysc_enter
+ .long sysc_return, sysc_leave, cleanup_sysc_return
+ .long sysc_leave, sysc_work_loop, cleanup_sysc_leave
+ .long sysc_work_loop, sysc_reschedule, cleanup_sysc_return
+cleanup_table_entries=(.-cleanup_table) / 12
cleanup_critical:
- clc 4(4,%r12),BASED(cleanup_table_system_call)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_system_call+4)
- bl BASED(cleanup_system_call)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_return)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
- bl BASED(cleanup_sysc_return)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
- bl BASED(cleanup_sysc_leave)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
- bl BASED(cleanup_sysc_leave)
-0:
+ lhi %r0,cleanup_table_entries
+ la %r1,BASED(cleanup_table)
+ l %r2,SP_PSW+4(%r15)
+ la %r2,0(%r2)
+cleanup_loop:
+ cl %r2,0(%r1)
+ bl BASED(cleanup_cont)
+ cl %r2,4(%r1)
+ bl BASED(cleanup_found)
+cleanup_cont:
+ la %r1,12(%r1)
+ bct %r0,BASED(cleanup_loop)
br %r14
+cleanup_found:
+ l %r1,8(%r1)
+ br %r1
-cleanup_system_call:
- mvc __LC_RETURN_PSW(4),0(%r12)
- clc 4(4,%r12),BASED(cleanup_table_system_call)
- bne BASED(0f)
- mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16
-0: st %r13,__LC_SAVE_AREA+20
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- st %r15,__LC_SAVE_AREA+28
- lh %r7,0x8a
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
- la %r12,__LC_RETURN_PSW
+cleanup_sysc_enter:
+ CLEANUP_SAVE_ALL_BASE __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ lh %r0,0x8a
+ st %r0,SP_R7(%r15)
+ la %r1,BASED(sysc_enter)
+ o %r1,BASED(.Lamode)
+ st %r1,SP_PSW+4(%r15)
br %r14
cleanup_sysc_return:
- mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
- la %r12,__LC_RETURN_PSW
+ la %r1,BASED(sysc_return)
+ o %r1,BASED(.Lamode)
+ st %r1,SP_PSW+4(%r15)
br %r14
cleanup_sysc_leave:
- clc 4(4,%r12),BASED(cleanup_sysc_leave_lpsw)
- be BASED(0f)
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
- mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
- lm %r0,%r11,SP_R0(%r15)
- l %r15,SP_R15(%r15)
-0: la %r12,__LC_RETURN_PSW
+ CLEANUP_RESTORE_ALL
br %r14
-cleanup_sysc_leave_lpsw:
- .long sysc_leave + 10 + 0x80000000
/*
* Integer constants
.Lc_overhead: .long STACK_FRAME_OVERHEAD
.Lc_pactive: .long PREEMPT_ACTIVE
.Lnr_syscalls: .long NR_syscalls
-.L0x018: .short 0x018
-.L0x020: .short 0x020
-.L0x028: .short 0x028
-.L0x030: .short 0x030
-.L0x038: .short 0x038
+.L0x018: .long 0x018
+.L0x020: .long 0x020
+.L0x028: .long 0x028
+.L0x030: .long 0x030
+.L0x038: .long 0x038
+.Lamode: .long 0x80000000
/*
* Symbol constants
_TIF_RESTART_SVC | _TIF_SINGLE_STEP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
-#define BASED(name) name-system_call(%r13)
-
/*
* Register usage in interrupt handlers:
* R9 - pointer to current task structure
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE savearea
- stmg %r12,%r15,\savearea
- larl %r13,system_call
- .endm
-
.macro SAVE_ALL psworg,savearea,sync
- la %r12,\psworg
+ stmg %r13,%r15,\savearea
.if \sync
- tm \psworg+1,0x01 # test problem state bit
- jz 2f # skip stack setup save
- lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ tm \psworg+1,0x01 # test problem state bit
+ jz 1f # skip stack setup save
+ lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else
- tm \psworg+1,0x01 # test problem state bit
- jnz 1f # from user -> load kernel stack
- clc \psworg+8(8),BASED(.Lcritical_end)
- jhe 0f
- clc \psworg+8(8),BASED(.Lcritical_start)
- jl 0f
- brasl %r14,cleanup_critical
- tm 0(%r12),0x01 # retest problem state after cleanup
- jnz 1f
-0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
+ tm \psworg+1,0x01 # test problem state bit
+ jnz 0f # from user -> load kernel stack
+ lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
slgr %r14,%r15
srag %r14,%r14,14
- jz 2f
-1: lg %r15,__LC_ASYNC_STACK # load async stack
+ jz 1f
+0: lg %r15,__LC_ASYNC_STACK # load async stack
.endif
-2: aghi %r15,-SP_SIZE # make room for registers & psw
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
- la %r12,\psworg
- stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- icm %r12,12,__LC_SVC_ILC
- stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- st %r12,SP_ILC(%r15)
- mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
- la %r12,0
- stg %r12,0(%r15)
+1: aghi %r15,-SP_SIZE # make room for registers & psw
+ lghi %r14,\psworg
+ slgr %r13,%r13
+ icm %r14,12,__LC_SVC_ILC
+ stmg %r0,%r12,SP_R0(%r15) # store gprs 0-13 to kernel stack
+ stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+ mvc SP_R13(24,%r15),\savearea # move r13, r14 and r15 to stack
+ mvc SP_PSW(16,%r15),\psworg # move user PSW to stack
+ st %r14,SP_ILC(%r15)
+ stg %r13,0(%r15)
.endm
- .macro RESTORE_ALL sync
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
- .if !\sync
- ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ .macro CLEANUP_SAVE_ALL psworg,savearea,sync
+ lg %r1,SP_PSW+8(%r15)
+ cli 1(%r1),0xdf
+ jne 2f
+ mvc \savearea(24),SP_R13(%r15)
+2: lg %r1,\savearea+16
+ .if \sync
+ tm \psworg+1,0x01
+ jz 1f
+ lg %r1,__LC_KERNEL_STACK
+ .else
+ tm \psworg+1,0x01
+ jnz 0f
+ lg %r0,__LC_ASYNC_STACK
+ slgr %r0,%r1
+ srag %r0,%r0,14
+ jz 1f
+0: lg %r1,__LC_ASYNC_STACK
.endif
- lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- lpswe __LC_RETURN_PSW # back to caller
+1: aghi %r1,-SP_SIZE
+ stg %r1,SP_R15(%r15)
+ lghi %r0,\psworg
+ xc SP_R13(8,%r15),SP_R13(%r15)
+ icm %r0,12,__LC_SVC_ILC
+ stg %r0,SP_R14(%r15)
+ mvc SP_R0(104,%r1),SP_R0(%r15)
+ mvc SP_ORIG_R2(8,%r1),SP_R2(%r15)
+ mvc SP_R13(24,%r1),\savearea
+ mvc SP_PSW(16,%r1),\psworg
+ st %r0,SP_ILC(%r1)
+ xc 0(8,%r1),0(%r1)
+ .endm
+
+ .macro RESTORE_ALL # system exit macro
+ mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lpswe __LC_RETURN_PSW # back to caller
+ .endm
+
+ .macro CLEANUP_RESTORE_ALL
+ lg %r1,SP_PSW+8(%r15)
+ cli 0(%r1),0xb2
+ jne 0f
+ mvc SP_PSW(16,%r15),__LC_RETURN_PSW
+ j 1f
+0: lg %r1,SP_R15(%r15)
+ mvc SP_PSW(16,%r15),SP_PSW(%r1)
+ mvc SP_R0(128,%r15),SP_R0(%r1)
+1:
+ .endm
+
+ .macro GET_THREAD_INFO
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ .endm
+
+ .macro CHECK_CRITICAL
+ tm SP_PSW+1(%r15),0x01 # test problem state bit
+ jnz 0f # from user -> not critical
+ larl %r1,.Lcritical_start
+ clc SP_PSW+8(8,%r15),8(%r1) # compare ip with __critical_end
+ jnl 0f
+ clc SP_PSW+8(8,%r15),0(%r1) # compare ip with __critical_start
+ jl 0f
+ brasl %r14,cleanup_critical
+0:
.endm
/*
.globl system_call
system_call:
- SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+sysc_enter:
+ GET_THREAD_INFO # load pointer to task_struct to R9
sysc_do_svc:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
slag %r7,%r7,2 # *4 and test for svc 0
jnz sysc_nr_ok
# svc 0: system call number in %r1
- cl %r1,BASED(.Lnr_syscalls)
+ lghi %r0,NR_syscalls
+ clr %r1,%r0
jnl sysc_nr_ok
lgfr %r7,%r1 # clear high word in r1
slag %r7,%r7,2 # svc 0: system call number in %r1
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL 1
+ RESTORE_ALL
#
# recheck if there is more work to do
#
sysc_work_loop:
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jz sysc_leave # there is no work to do
#
# a new process exits the kernel with ret_from_fork
#
.globl ret_from_fork
-ret_from_fork:
- lg %r13,__LC_SVC_NEW_PSW+8
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ret_from_fork:
+ GET_THREAD_INFO # load pointer to task_struct to R9
brasl %r14,schedule_tail
stosm 24(%r15),0x03 # reenable interrupts
j sysc_return
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3
pgm_do_call:
sll %r8,3
+ GET_THREAD_INFO
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
je pgm_svcper
# no interesting special case, ignore PER event
- lmg %r12,%r15,__LC_SAVE_AREA
lpswe __LC_PGM_OLD_PSW
#
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ GET_THREAD_INFO # load pointer to task_struct to R9
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
*/
.globl io_int_handler
io_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ stck __LC_INT_CLOCK
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
tm __TI_flags+7(%r9),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_PREEMPT
io_preempt:
stosm 48(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call schedule
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
xc __TI_precount(4,%r9),__TI_precount(%r9)
j io_resume_loop
#endif
stosm 48(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call scheduler
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
+ GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+7(%r9),_TIF_WORK_INT
jz io_leave # there is no work to do
j io_work_loop
*/
.globl ext_int_handler
ext_int_handler:
- stck __LC_INT_CLOCK
- SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ CHECK_CRITICAL
+ GET_THREAD_INFO # load pointer to task_struct to R9
+ stck __LC_INT_CLOCK
la %r2,SP_PTREGS(%r15) # address of register-save area
llgh %r3,__LC_EXT_INT_CODE # get interruption code
brasl %r14,do_extint
*/
.globl mcck_int_handler
mcck_int_handler:
- SAVE_ALL_BASE __LC_SAVE_AREA+64
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
brasl %r14,s390_do_machine_check
mcck_return:
- RESTORE_ALL 0
+ RESTORE_ALL
#ifdef CONFIG_SMP
/*
restart_go:
#endif
-cleanup_table_system_call:
- .quad system_call, sysc_do_svc
-cleanup_table_sysc_return:
- .quad sysc_return, sysc_leave
-cleanup_table_sysc_leave:
- .quad sysc_leave, sysc_work_loop
-cleanup_table_sysc_work_loop:
- .quad sysc_work_loop, sysc_reschedule
+cleanup_table:
+ .quad system_call, sysc_enter, cleanup_sysc_enter
+ .quad sysc_return, sysc_leave, cleanup_sysc_return
+ .quad sysc_leave, sysc_work_loop, cleanup_sysc_leave
+ .quad sysc_work_loop, sysc_reschedule, cleanup_sysc_return
+cleanup_table_entries=(.-cleanup_table) / 24
cleanup_critical:
- clc 8(8,%r12),BASED(cleanup_table_system_call)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_system_call+8)
- jl cleanup_system_call
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_return)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
- jl cleanup_sysc_return
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
- jl cleanup_sysc_leave
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
- jl cleanup_sysc_leave
-0:
+ lghi %r0,cleanup_table_entries
+ larl %r1,cleanup_table
+ lg %r2,SP_PSW+8(%r15)
+cleanup_loop:
+ clg %r2,0(%r1)
+ jl cleanup_cont
+ clg %r2,8(%r1)
+ jl cleanup_found
+cleanup_cont:
+ la %r1,24(%r1)
+ brct %r0,cleanup_loop
br %r14
-
-cleanup_system_call:
- mvc __LC_RETURN_PSW(8),0(%r12)
- clc 8(8,%r12),BASED(cleanup_table_system_call)
- jne 0f
- mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
-0: stg %r13,__LC_SAVE_AREA+40
- SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- stg %r15,__LC_SAVE_AREA+56
- llgh %r7,__LC_SVC_INT_CODE
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
- la %r12,__LC_RETURN_PSW
+cleanup_found:
+ lg %r1,16(%r1)
+ br %r1
+
+cleanup_sysc_enter:
+ CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ llgh %r0,0x8a
+ stg %r0,SP_R7(%r15)
+ larl %r1,sysc_enter
+ stg %r1,SP_PSW+8(%r15)
br %r14
cleanup_sysc_return:
- mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
- la %r12,__LC_RETURN_PSW
+ larl %r1,sysc_return
+ stg %r1,SP_PSW+8(%r15)
br %r14
cleanup_sysc_leave:
- clc 8(8,%r12),BASED(cleanup_sysc_leave_lpsw)
- je 0f
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
- mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
- lmg %r0,%r11,SP_R0(%r15)
- lg %r15,SP_R15(%r15)
-0: la %r12,__LC_RETURN_PSW
+ CLEANUP_RESTORE_ALL
br %r14
-cleanup_sysc_leave_lpsw:
- .quad sysc_leave + 12
/*
* Integer constants
.align 4
.Lconst:
.Lc_pactive: .long PREEMPT_ACTIVE
-.Lnr_syscalls: .long NR_syscalls
-.L0x0130: .short 0x130
-.L0x0140: .short 0x140
-.L0x0150: .short 0x150
-.L0x0160: .short 0x160
-.L0x0170: .short 0x170
.Lcritical_start:
.quad __critical_start
.Lcritical_end:
copied += sizeof(unsigned int);
}
return 0;
- case PTRACE_GETEVENTMSG:
- return put_user((__u32) child->ptrace_message,
- (unsigned int __user *) data);
- case PTRACE_GETSIGINFO:
- if (child->last_siginfo == NULL)
- return -EINVAL;
- return copy_siginfo_to_user32((siginfo_t32 __user *) data,
- child->last_siginfo);
- case PTRACE_SETSIGINFO:
- if (child->last_siginfo == NULL)
- return -EINVAL;
- return copy_siginfo_from_user32(child->last_siginfo,
- (siginfo_t32 __user *) data);
}
return ptrace_request(child, request, addr, data);
}
return s;
}
EXPORT_SYMBOL_NOVERS(memset);
+
+/*
+ * missing exports for string functions defined in lib/string.c
+ */
+EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(strchr);
+EXPORT_SYMBOL_NOVERS(strnchr);
+EXPORT_SYMBOL_NOVERS(strncmp);
+EXPORT_SYMBOL_NOVERS(strpbrk);
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extmem.o mmap.o
+obj-y := init.o fault.o ioremap.o extmem.o
obj-$(CONFIG_CMM) += cmm.o
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
-#include <asm/smp.h>
#include "../../../drivers/s390/net/smsgiucv.h"
static int
cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
char buf[16], *p;
long pages;
int len;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static int
cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp)
{
char buf[64], *p;
long pages, seconds;
int len;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
+++ /dev/null
-/*
- * linux/arch/s390/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
-#ifdef CONFIG_ARCH_S390X
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_31BIT))
- return 1;
-#endif
- return sysctl_legacy_va_layout ||
- (current->personality & ADDR_COMPAT_LAYOUT) ||
- current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
config SMP
bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
- depends on BROKEN
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
config SUN4
bool "Support for SUN4 machines (disables SUN4[CDM] support)"
- depends on !SMP
help
Say Y here if, and only if, your machine is a sun4. Note that
a kernel compiled with this option will run only on sun4.
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
-unsigned long cache_decay_ticks = 100;
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
}
}
-void smp_reschedule_irq(void)
-{
- set_need_resched();
-}
-
void smp_flush_page_to_ram(unsigned long page)
{
/* Current theory is that those who call this are the one's
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
+extern unsigned long cpu_present_map;
extern int smp_num_cpus;
static int smp_highest_cpu;
extern int smp_threads_ready;
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
+ init_idle();
+
/* Get our local ticker going. */
smp_setup_percpu_timer();
extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
+extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
current_set[0] = NULL;
local_irq_enable();
- cpus_clear(cpu_present_map);
+ cpu_present_map = 0;
/* XXX This whole thing has to go. See sparc64. */
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
- cpu_set(mid, cpu_present_map);
- SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
+ cpu_present_map |= (1<<mid);
+ SMP_PRINTK(("cpu_present_map %08lx\n", cpu_present_map));
for(i=0; i < NR_CPUS; i++)
__cpu_number_map[i] = -1;
for(i=0; i < NR_CPUS; i++)
if(i == boot_cpu_id)
continue;
- if (cpu_isset(i, cpu_present_map)) {
+ if(cpu_present_map & (1 << i)) {
extern unsigned long sun4d_cpu_startup;
unsigned long *entry = &sun4d_cpu_startup;
struct task_struct *p;
}
}
if(!(cpu_callin_map[i])) {
- cpu_clear(i, cpu_present_map);
+ cpu_present_map &= ~(1 << i);
__cpu_number_map[i] = -1;
}
}
local_flush_cache_all();
if(cpucount == 0) {
printk("Error: only one Processor found.\n");
- cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id());
+ cpu_present_map = (1 << hard_smp4d_processor_id());
} else {
unsigned long bogosum = 0;
for(i = 0; i < NR_CPUS; i++) {
- if (cpu_isset(i, cpu_present_map)) {
+ if(cpu_present_map & (1 << i)) {
bogosum += cpu_data(i).udelay_val;
smp_highest_cpu = i;
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
- cpumask_t mask;
+ register unsigned long mask;
register int i;
- mask = cpumask_of_cpu(hard_smp4d_processor_id());
- cpus_andnot(mask, cpu_present_map, mask);
+ mask = (cpu_present_map & ~(1 << hard_smp4d_processor_id()));
for(i = 0; i <= high; i++) {
- if (cpu_isset(i, mask)) {
+ if(mask & (1 << i)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4d_send_ipi(i, IRQ_CROSS_CALL);
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
/* And set btfixup... */
- BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
+ BTFIXUPSET_BLACKBOX(smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
+ init_idle();
+
/* Allow master to continue. */
swap((unsigned long *)&cpu_callin_map[cpuid], 1);
extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
+extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
void __init sun4m_init_smp(void)
{
- BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
+ BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
}
sun4_esp_physaddr=SUN4_400_ESP_PHYSADDR;
break;
default:
- ;
}
}
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags);
+ map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
.align 4
smp_do_cpu_idle:
+ call init_idle
+ nop
call cpu_idle
mov 0, %o0
/* Both these macros have to start with exactly the same insn */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c]; \
- st %t4, [%dst + (offset) + 0x10]; \
- st %t5, [%dst + (offset) + 0x14]; \
- st %t6, [%dst + (offset) + 0x18]; \
- st %t7, [%dst + (offset) + 0x1c];
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c]; \
+ st %t4, [%dst + offset + 0x10]; \
+ st %t5, [%dst + offset + 0x14]; \
+ st %t6, [%dst + offset + 0x18]; \
+ st %t7, [%dst + offset + 0x1c];
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- std %t0, [%dst + (offset) + 0x00]; \
- std %t2, [%dst + (offset) + 0x08]; \
- std %t4, [%dst + (offset) + 0x10]; \
- std %t6, [%dst + (offset) + 0x18];
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ std %t0, [%dst + offset + 0x00]; \
+ std %t2, [%dst + offset + 0x08]; \
+ std %t4, [%dst + offset + 0x10]; \
+ std %t6, [%dst + offset + 0x18];
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- st %t0, [%dst - (offset) - 0x10]; \
- st %t1, [%dst - (offset) - 0x0c]; \
- st %t2, [%dst - (offset) - 0x08]; \
- st %t3, [%dst - (offset) - 0x04];
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ st %t0, [%dst - offset - 0x10]; \
+ st %t1, [%dst - offset - 0x0c]; \
+ st %t2, [%dst - offset - 0x08]; \
+ st %t3, [%dst - offset - 0x04];
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduh [%src + (offset) + 0x00], %t0; \
- lduh [%src + (offset) + 0x02], %t1; \
- lduh [%src + (offset) + 0x04], %t2; \
- lduh [%src + (offset) + 0x06], %t3; \
- sth %t0, [%dst + (offset) + 0x00]; \
- sth %t1, [%dst + (offset) + 0x02]; \
- sth %t2, [%dst + (offset) + 0x04]; \
- sth %t3, [%dst + (offset) + 0x06];
+ lduh [%src + offset + 0x00], %t0; \
+ lduh [%src + offset + 0x02], %t1; \
+ lduh [%src + offset + 0x04], %t2; \
+ lduh [%src + offset + 0x06], %t3; \
+ sth %t0, [%dst + offset + 0x00]; \
+ sth %t1, [%dst + offset + 0x02]; \
+ sth %t2, [%dst + offset + 0x04]; \
+ sth %t3, [%dst + offset + 0x06];
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - (offset) - 0x02], %t0; \
- ldub [%src - (offset) - 0x01], %t1; \
- stb %t0, [%dst - (offset) - 0x02]; \
- stb %t1, [%dst - (offset) - 0x01];
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
.text
.align 4
#endif
/* Both these macros have to start with exactly the same insn */
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c]; \
- st %t4, [%dst + (offset) + 0x10]; \
- st %t5, [%dst + (offset) + 0x14]; \
- st %t6, [%dst + (offset) + 0x18]; \
- st %t7, [%dst + (offset) + 0x1c];
-
-#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- std %t0, [%dst + (offset) + 0x00]; \
- std %t2, [%dst + (offset) + 0x08]; \
- std %t4, [%dst + (offset) + 0x10]; \
- std %t6, [%dst + (offset) + 0x18];
-
-#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- st %t0, [%dst - (offset) - 0x10]; \
- st %t1, [%dst - (offset) - 0x0c]; \
- st %t2, [%dst - (offset) - 0x08]; \
- st %t3, [%dst - (offset) - 0x04];
-
-#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- std %t0, [%dst - (offset) - 0x10]; \
- std %t2, [%dst - (offset) - 0x08];
-
-#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - (offset) - 0x02], %t0; \
- ldub [%src - (offset) - 0x01], %t1; \
- stb %t0, [%dst - (offset) - 0x02]; \
- stb %t1, [%dst - (offset) - 0x01];
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c]; \
+ st %t4, [%dst + offset + 0x10]; \
+ st %t5, [%dst + offset + 0x14]; \
+ st %t6, [%dst + offset + 0x18]; \
+ st %t7, [%dst + offset + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ std %t0, [%dst + offset + 0x00]; \
+ std %t2, [%dst + offset + 0x08]; \
+ std %t4, [%dst + offset + 0x10]; \
+ std %t6, [%dst + offset + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ st %t0, [%dst - offset - 0x10]; \
+ st %t1, [%dst - offset - 0x0c]; \
+ st %t2, [%dst - offset - 0x08]; \
+ st %t3, [%dst - offset - 0x04];
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ std %t0, [%dst - offset - 0x10]; \
+ std %t2, [%dst - offset - 0x08];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
/* Both these macros have to start with exactly the same insn */
-#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - (offset) - 0x20], %t0; \
- ldd [%src - (offset) - 0x18], %t2; \
- ldd [%src - (offset) - 0x10], %t4; \
- ldd [%src - (offset) - 0x08], %t6; \
- st %t0, [%dst - (offset) - 0x20]; \
- st %t1, [%dst - (offset) - 0x1c]; \
- st %t2, [%dst - (offset) - 0x18]; \
- st %t3, [%dst - (offset) - 0x14]; \
- st %t4, [%dst - (offset) - 0x10]; \
- st %t5, [%dst - (offset) - 0x0c]; \
- st %t6, [%dst - (offset) - 0x08]; \
- st %t7, [%dst - (offset) - 0x04];
-
-#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - (offset) - 0x20], %t0; \
- ldd [%src - (offset) - 0x18], %t2; \
- ldd [%src - (offset) - 0x10], %t4; \
- ldd [%src - (offset) - 0x08], %t6; \
- std %t0, [%dst - (offset) - 0x20]; \
- std %t2, [%dst - (offset) - 0x18]; \
- std %t4, [%dst - (offset) - 0x10]; \
- std %t6, [%dst - (offset) - 0x08];
-
-#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c];
-
-#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src + (offset) + 0x00], %t0; \
- ldub [%src + (offset) + 0x01], %t1; \
- stb %t0, [%dst + (offset) + 0x00]; \
- stb %t1, [%dst + (offset) + 0x01];
-
-#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- srl %t0, shir, %t5; \
- srl %t1, shir, %t6; \
- sll %t0, shil, %t0; \
- or %t5, %prev, %t5; \
- sll %t1, shil, %prev; \
- or %t6, %t0, %t0; \
- srl %t2, shir, %t1; \
- srl %t3, shir, %t6; \
- sll %t2, shil, %t2; \
- or %t1, %prev, %t1; \
- std %t4, [%dst + (offset) + (offset2) - 0x04]; \
- std %t0, [%dst + (offset) + (offset2) + 0x04]; \
- sll %t3, shil, %prev; \
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src - offset - 0x20], %t0; \
+ ldd [%src - offset - 0x18], %t2; \
+ ldd [%src - offset - 0x10], %t4; \
+ ldd [%src - offset - 0x08], %t6; \
+ st %t0, [%dst - offset - 0x20]; \
+ st %t1, [%dst - offset - 0x1c]; \
+ st %t2, [%dst - offset - 0x18]; \
+ st %t3, [%dst - offset - 0x14]; \
+ st %t4, [%dst - offset - 0x10]; \
+ st %t5, [%dst - offset - 0x0c]; \
+ st %t6, [%dst - offset - 0x08]; \
+ st %t7, [%dst - offset - 0x04];
+
+#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src - offset - 0x20], %t0; \
+ ldd [%src - offset - 0x18], %t2; \
+ ldd [%src - offset - 0x10], %t4; \
+ ldd [%src - offset - 0x08], %t6; \
+ std %t0, [%dst - offset - 0x20]; \
+ std %t2, [%dst - offset - 0x18]; \
+ std %t4, [%dst - offset - 0x10]; \
+ std %t6, [%dst - offset - 0x08];
+
+#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c];
+
+#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src + offset + 0x00], %t0; \
+ ldub [%src + offset + 0x01], %t1; \
+ stb %t0, [%dst + offset + 0x00]; \
+ stb %t1, [%dst + offset + 0x01];
+
+#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ srl %t0, shir, %t5; \
+ srl %t1, shir, %t6; \
+ sll %t0, shil, %t0; \
+ or %t5, %prev, %t5; \
+ sll %t1, shil, %prev; \
+ or %t6, %t0, %t0; \
+ srl %t2, shir, %t1; \
+ srl %t3, shir, %t6; \
+ sll %t2, shil, %t2; \
+ or %t1, %prev, %t1; \
+ std %t4, [%dst + offset + offset2 - 0x04]; \
+ std %t0, [%dst + offset + offset2 + 0x04]; \
+ sll %t3, shil, %prev; \
or %t6, %t2, %t4;
-#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- srl %t0, shir, %t4; \
- srl %t1, shir, %t5; \
- sll %t0, shil, %t6; \
- or %t4, %prev, %t0; \
- sll %t1, shil, %prev; \
- or %t5, %t6, %t1; \
- srl %t2, shir, %t4; \
- srl %t3, shir, %t5; \
- sll %t2, shil, %t6; \
- or %t4, %prev, %t2; \
- sll %t3, shil, %prev; \
- or %t5, %t6, %t3; \
- std %t0, [%dst + (offset) + (offset2) + 0x00]; \
- std %t2, [%dst + (offset) + (offset2) + 0x08];
+#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ srl %t0, shir, %t4; \
+ srl %t1, shir, %t5; \
+ sll %t0, shil, %t6; \
+ or %t4, %prev, %t0; \
+ sll %t1, shil, %prev; \
+ or %t5, %t6, %t1; \
+ srl %t2, shir, %t4; \
+ srl %t3, shir, %t5; \
+ sll %t2, shil, %t6; \
+ or %t4, %prev, %t2; \
+ sll %t3, shil, %prev; \
+ or %t5, %t6, %t3; \
+ std %t0, [%dst + offset + offset2 + 0x00]; \
+ std %t2, [%dst + offset + offset2 + 0x08];
.text
.align 4
static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- FLUSH_BEGIN(vma->vm_mm)
+ struct mm_struct *mm = vma->vm_mm;
+
+ FLUSH_BEGIN(mm)
flush_user_windows();
turbosparc_idflash_clear();
FLUSH_END
static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- FLUSH_BEGIN(vma->vm_mm)
+ struct mm_struct *mm = vma->vm_mm;
+
+ FLUSH_BEGIN(mm)
srmmu_flush_whole_tlb();
FLUSH_END
}
fly. Currently there are only sparc64 drivers for UltraSPARC-III
and UltraSPARC-IIe processors.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
help
This adds the CPUFreq driver for UltraSPARC-III processors.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
- For details, take a look at <file:Documentation/cpu-freq>.
+ For details, take a look at linux/Documentation/cpu-freq.
If in doubt, say N.
config SUNOS_EMUL
bool "SunOS binary emulation"
- depends on BINFMT_AOUT32
help
This allows you to run most SunOS binaries. If you want to do this,
say Y here and place appropriate files in /usr/gnemul/sunos. See
config SOLARIS_EMUL
tristate "Solaris binary emulation (EXPERIMENTAL)"
- depends on SPARC32_COMPAT && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This is experimental code which will enable you to run (many)
Solaris binaries on your SPARC Linux machine.
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
# CONFIG_BINFMT_AOUT32 is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
+# CONFIG_SUNOS_EMUL is not set
CONFIG_SOLARIS_EMUL=m
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_FW_LOADER=m
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_FB_CIRRUS is not set
CONFIG_FB_PM2=y
# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
+# CONFIG_FB_CYBER2000 is not set
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_BW2 is not set
#
# Serial drivers
#
+# CONFIG_SERIAL_8250 is not set
#
# Non-8250 serial port support
CONFIG_SUN_MOSTEK_RTC=y
CONFIG_OBP_FLASH=m
# CONFIG_SUN_BPP is not set
+# CONFIG_SUN_VIDEOPIX is not set
+# CONFIG_SUN_AURORA is not set
#
# Memory Technology Devices (MTD)
CONFIG_SCSI_SATA_SIS=m
CONFIG_SCSI_SATA_VIA=m
CONFIG_SCSI_SATA_VITESSE=m
+# CONFIG_SCSI_BUSLOGIC is not set
CONFIG_SCSI_DMX3191D=m
+# CONFIG_SCSI_EATA is not set
CONFIG_SCSI_EATA_PIO=m
# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
CONFIG_SCSI_IPS=m
CONFIG_SCSI_INIA100=m
CONFIG_SCSI_PPA=m
# CONFIG_SCSI_QLA6312 is not set
# CONFIG_SCSI_QLA6322 is not set
CONFIG_SCSI_DC395x=m
-# CONFIG_SCSI_DC390T is not set
+CONFIG_SCSI_DC390T=m
CONFIG_SCSI_DEBUG=m
CONFIG_SCSI_SUNESP=y
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-# CONFIG_NET_SCH_CLK_JIFFIES is not set
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-CONFIG_NET_SCH_CLK_CPU=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
#
# Wireless 802.11b ISA/PCI cards support
#
+CONFIG_AIRO=m
CONFIG_HERMES=m
CONFIG_PLX_HERMES=m
CONFIG_TMD_HERMES=m
#
CONFIG_I2C_SENSOR=m
CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_ASB100=m
CONFIG_SENSORS_DS1621=m
CONFIG_SENSORS_FSCHER=m
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
CONFIG_SENSORS_LM78=m
CONFIG_SENSORS_LM80=m
CONFIG_SENSORS_LM83=m
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-# CONFIG_CIFS_XATTR is not set
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
# CONFIG_NCPFS_PACKET_SIGNING is not set
CONFIG_SND_SUN_AMD7930=m
CONFIG_SND_SUN_CS4231=m
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+
#
# USB support
#
# CONFIG_USB_OV511 is not set
CONFIG_USB_PWC=m
# CONFIG_USB_SE401 is not set
-CONFIG_USB_SN9C102=m
# CONFIG_USB_STV680 is not set
CONFIG_USB_W9968CF=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
loff_t pos = fd_offset;
/* Fuck me plenty... */
error = do_brk(N_TXTADDR(ex), ex.a_text);
- bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
ex.a_text, &pos);
error = do_brk(N_DATADDR(ex), ex.a_data);
- bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex),
ex.a_data, &pos);
goto beyond_if;
}
loff_t pos = fd_offset;
do_brk(N_TXTADDR(ex) & PAGE_MASK,
ex.a_text+ex.a_data + PAGE_SIZE - 1);
- bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
} else {
static unsigned long error_time;
if (!bprm->file->f_op->mmap) {
loff_t pos = fd_offset;
do_brk(0, ex.a_text+ex.a_data);
- bprm->file->f_op->read(bprm->file,
- (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
goto beyond_if;
}
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
*/
- .globl sunos_execv
+ .globl sunos_execv, sys_execve, sys32_execve
sys_execve:
sethi %hi(sparc_execve), %g1
ba,pt %xcc, execve_merge
or %g1, %lo(sparc_execve), %g1
-#ifdef CONFIG_COMPAT
- .globl sys_execve
sunos_execv:
stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
- .globl sys32_execve
sys32_execve:
sethi %hi(sparc32_execve), %g1
or %g1, %lo(sparc32_execve), %g1
-#endif
execve_merge:
flushw
jmpl %g1, %g0
add %sp, PTREGS_OFF, %o0
.globl sys_pipe, sys_sigpause, sys_nis_syscall
- .globl sys_sigsuspend, sys_rt_sigsuspend
+ .globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
.globl sys_rt_sigreturn
- .globl sys_ptrace
- .globl sys_sigaltstack
+ .globl sys32_sigreturn, sys32_rt_sigreturn
+ .globl sys32_execve, sys_ptrace
+ .globl sys_sigaltstack, sys32_sigaltstack
+ .globl sys32_sigstack
.align 32
sys_pipe: ba,pt %xcc, sparc_pipe
add %sp, PTREGS_OFF, %o0
add %sp, PTREGS_OFF, %o1
sys_sigaltstack:ba,pt %xcc, do_sigaltstack
add %i6, STACK_BIAS, %o2
-#ifdef CONFIG_COMPAT
- .globl sys32_sigstack
sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
mov %i6, %o2
- .globl sys32_sigaltstack
sys32_sigaltstack:
ba,pt %xcc, do_sys32_sigaltstack
mov %i6, %o2
-#endif
+
.align 32
sys_sigsuspend: add %sp, PTREGS_OFF, %o0
call do_sigsuspend
call do_rt_sigsuspend
add %o7, 1f-.-4, %o7
nop
-#ifdef CONFIG_COMPAT
- .globl sys32_rt_sigsuspend
sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
srl %o0, 0, %o0
add %sp, PTREGS_OFF, %o2
call do_rt_sigsuspend32
add %o7, 1f-.-4, %o7
-#endif
/* NOTE: %o0 has a correct value already */
sys_sigpause: add %sp, PTREGS_OFF, %o1
call do_sigpause
add %o7, 1f-.-4, %o7
nop
-#ifdef CONFIG_COMPAT
- .globl sys32_sigreturn
sys32_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_sigreturn32
add %o7, 1f-.-4, %o7
nop
-#endif
sys_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn
add %o7, 1f-.-4, %o7
nop
-#ifdef CONFIG_COMPAT
- .globl sys32_rt_sigreturn
sys32_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn32
add %o7, 1f-.-4, %o7
nop
-#endif
sys_ptrace: add %sp, PTREGS_OFF, %o0
call do_ptrace
add %o7, 1f-.-4, %o7
/* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
+ call cheetah_patch_pgcopyops
+ nop
call cheetah_patch_cachetlbops
nop
/* Use this to get at 32-bit user passed pointers.
* See sys_sparc32.c for description about it.
*/
-#define A(__x) compat_ptr(__x)
+#define A(__x) ((void __user *)(unsigned long)(__x))
static __inline__ void *alloc_user_space(long len)
{
static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fbcmap32 __user *argp = (void __user *)arg;
- struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
- u32 addr;
+ struct fbcmap f;
int ret;
+ char red[256], green[256], blue[256];
+ u32 r, g, b;
+ mm_segment_t old_fs = get_fs();
- ret = copy_in_user(p, argp, 2 * sizeof(int));
- ret |= get_user(addr, &argp->red);
- ret |= put_user(compat_ptr(addr), &p->red);
- ret |= get_user(addr, &argp->green);
- ret |= put_user(compat_ptr(addr), &p->green);
- ret |= get_user(addr, &argp->blue);
- ret |= put_user(compat_ptr(addr), &p->blue);
+ ret = get_user(f.index, &(((struct fbcmap32 __user *)arg)->index));
+ ret |= __get_user(f.count, &(((struct fbcmap32 __user *)arg)->count));
+ ret |= __get_user(r, &(((struct fbcmap32 __user *)arg)->red));
+ ret |= __get_user(g, &(((struct fbcmap32 __user *)arg)->green));
+ ret |= __get_user(b, &(((struct fbcmap32 __user *)arg)->blue));
if (ret)
return -EFAULT;
- return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
+ if ((f.index < 0) || (f.index > 255)) return -EINVAL;
+ if (f.index + f.count > 256)
+ f.count = 256 - f.index;
+ if (cmd == FBIOPUTCMAP32) {
+ ret = copy_from_user (red, A(r), f.count);
+ ret |= copy_from_user (green, A(g), f.count);
+ ret |= copy_from_user (blue, A(b), f.count);
+ if (ret)
+ return -EFAULT;
+ }
+ f.red = red; f.green = green; f.blue = blue;
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (long)&f);
+ set_fs (old_fs);
+ if (!ret && cmd == FBIOGETCMAP32) {
+ ret = copy_to_user (A(r), red, f.count);
+ ret |= copy_to_user (A(g), green, f.count);
+ ret |= copy_to_user (A(b), blue, f.count);
+ }
+ return ret ? -EFAULT : 0;
}
struct fbcursor32 {
static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
- struct fbcursor32 __user *argp = (void __user *)arg;
- compat_uptr_t addr;
+ struct fbcursor f;
int ret;
+ char red[2], green[2], blue[2];
+ char image[128], mask[128];
+ u32 r, g, b;
+ u32 m, i;
+ mm_segment_t old_fs = get_fs();
- ret = copy_in_user(p, argp,
+ ret = copy_from_user (&f, (struct fbcursor32 __user *) arg,
2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
- ret |= get_user(addr, &argp->cmap.red);
- ret |= put_user(compat_ptr(addr), &p->cmap.red);
- ret |= get_user(addr, &argp->cmap.green);
- ret |= put_user(compat_ptr(addr), &p->cmap.green);
- ret |= get_user(addr, &argp->cmap.blue);
- ret |= put_user(compat_ptr(addr), &p->cmap.blue);
- ret |= get_user(addr, &argp->mask);
- ret |= put_user(compat_ptr(addr), &p->mask);
- ret |= get_user(addr, &argp->image);
- ret |= put_user(compat_ptr(addr), &p->image);
+ ret |= __get_user(f.size.x,
+ &(((struct fbcursor32 __user *)arg)->size.x));
+ ret |= __get_user(f.size.y,
+ &(((struct fbcursor32 __user *)arg)->size.y));
+ ret |= __get_user(f.cmap.index,
+ &(((struct fbcursor32 __user *)arg)->cmap.index));
+ ret |= __get_user(f.cmap.count,
+ &(((struct fbcursor32 __user *)arg)->cmap.count));
+ ret |= __get_user(r, &(((struct fbcursor32 __user *)arg)->cmap.red));
+ ret |= __get_user(g, &(((struct fbcursor32 __user *)arg)->cmap.green));
+ ret |= __get_user(b, &(((struct fbcursor32 __user *)arg)->cmap.blue));
+ ret |= __get_user(m, &(((struct fbcursor32 __user *)arg)->mask));
+ ret |= __get_user(i, &(((struct fbcursor32 __user *)arg)->image));
if (ret)
return -EFAULT;
- return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
+ if (f.set & FB_CUR_SETCMAP) {
+ if ((uint) f.size.y > 32)
+ return -EINVAL;
+ ret = copy_from_user (mask, A(m), f.size.y * 4);
+ ret |= copy_from_user (image, A(i), f.size.y * 4);
+ if (ret)
+ return -EFAULT;
+ f.image = image; f.mask = mask;
+ }
+ if (f.set & FB_CUR_SETCMAP) {
+ ret = copy_from_user (red, A(r), 2);
+ ret |= copy_from_user (green, A(g), 2);
+ ret |= copy_from_user (blue, A(b), 2);
+ if (ret)
+ return -EFAULT;
+ f.cmap.red = red; f.cmap.green = green; f.cmap.blue = blue;
+ }
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, FBIOSCURSOR, (long)&f);
+ set_fs (old_fs);
+ return ret;
}
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
- drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
+ char __user *name_ptr, *date_ptr, *desc_ptr;
+ u32 tmp1, tmp2, tmp3;
+ drm_version_t kversion;
+ mm_segment_t old_fs;
int ret;
- if (clear_user(p, 3 * sizeof(int)) ||
- get_user(n, &uversion->name_len) ||
- put_user(n, &p->name_len) ||
- get_user(addr, &uversion->name) ||
- put_user(compat_ptr(addr), &p->name) ||
- get_user(n, &uversion->date_len) ||
- put_user(n, &p->date_len) ||
- get_user(addr, &uversion->date) ||
- put_user(compat_ptr(addr), &p->date) ||
- get_user(n, &uversion->desc_len) ||
- put_user(n, &p->desc_len) ||
- get_user(addr, &uversion->desc) ||
- put_user(compat_ptr(addr), &p->desc))
+ memset(&kversion, 0, sizeof(kversion));
+ if (get_user(kversion.name_len, &uversion->name_len) ||
+ get_user(kversion.date_len, &uversion->date_len) ||
+ get_user(kversion.desc_len, &uversion->desc_len) ||
+ get_user(tmp1, &uversion->name) ||
+ get_user(tmp2, &uversion->date) ||
+ get_user(tmp3, &uversion->desc))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
- if (ret)
- return ret;
-
- if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
- get_user(n, &p->name_len) ||
- put_user(n, &uversion->name_len) ||
- get_user(n, &p->date_len) ||
- put_user(n, &uversion->date_len) ||
- get_user(n, &p->desc_len) ||
- put_user(n, &uversion->desc_len))
- return -EFAULT;
+ name_ptr = A(tmp1);
+ date_ptr = A(tmp2);
+ desc_ptr = A(tmp3);
- return 0;
+ ret = -ENOMEM;
+ if (kversion.name_len && name_ptr) {
+ kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
+ if (!kversion.name)
+ goto out;
+ }
+ if (kversion.date_len && date_ptr) {
+ kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
+ if (!kversion.date)
+ goto out;
+ }
+ if (kversion.desc_len && desc_ptr) {
+ kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
+ if (!kversion.desc)
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if ((kversion.name &&
+ copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
+ (kversion.date &&
+ copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
+ (kversion.desc &&
+ copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
+ ret = -EFAULT;
+ if (put_user(kversion.version_major, &uversion->version_major) ||
+ put_user(kversion.version_minor, &uversion->version_minor) ||
+ put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
+ put_user(kversion.name_len, &uversion->name_len) ||
+ put_user(kversion.date_len, &uversion->date_len) ||
+ put_user(kversion.desc_len, &uversion->desc_len))
+ ret = -EFAULT;
+ }
+
+out:
+ if (kversion.name)
+ kfree(kversion.name);
+ if (kversion.date)
+ kfree(kversion.date);
+ if (kversion.desc)
+ kfree(kversion.desc);
+ return ret;
}
typedef struct drm32_unique {
static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
- drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
+ drm_unique_t karg;
+ mm_segment_t old_fs;
+ char __user *uptr;
+ u32 tmp;
int ret;
- if (get_user(n, &uarg->unique_len) ||
- put_user(n, &p->unique_len) ||
- get_user(addr, &uarg->unique) ||
- put_user(compat_ptr(addr), &p->unique))
+ if (get_user(karg.unique_len, &uarg->unique_len))
return -EFAULT;
+ karg.unique = NULL;
+
+ if (get_user(tmp, &uarg->unique))
+ return -EFAULT;
+
+ uptr = A(tmp);
+ if (uptr) {
+ karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
+ if (!karg.unique)
+ return -ENOMEM;
+ if (cmd == DRM32_IOCTL_SET_UNIQUE &&
+ copy_from_user(karg.unique, uptr, karg.unique_len)) {
+ kfree(karg.unique);
+ return -EFAULT;
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
if (cmd == DRM32_IOCTL_GET_UNIQUE)
- ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
+ ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
else
- ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
+ ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
+ set_fs(old_fs);
- if (ret)
- return ret;
+ if (!ret) {
+ if (cmd == DRM32_IOCTL_GET_UNIQUE &&
+ uptr != NULL &&
+ copy_to_user(uptr, karg.unique, karg.unique_len))
+ ret = -EFAULT;
+ if (put_user(karg.unique_len, &uarg->unique_len))
+ ret = -EFAULT;
+ }
- if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
- return -EFAULT;
+ if (karg.unique != NULL)
+ kfree(karg.unique);
- return 0;
+ return ret;
}
typedef struct drm32_map {
static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
- drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
- int ret;
+ drm_buf_desc_t __user *ulist;
+ drm_buf_info_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
- if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
- get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
- if (ret)
- return ret;
+ ulist = A(tmp);
- if (get_user(n, &p->count) || put_user(n, &uarg->count))
+ orig_count = karg.count;
+
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
+ if (!karg.list)
return -EFAULT;
- return 0;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (karg.count <= orig_count &&
+ (copy_to_user(ulist, karg.list,
+ karg.count * sizeof(drm_buf_desc_t))))
+ ret = -EFAULT;
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ kfree(karg.list);
+
+ return ret;
}
typedef struct drm32_buf_free {
static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
- drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
+ drm_buf_free_t karg;
+ mm_segment_t old_fs;
+ int __user *ulist;
+ int ret;
+ u32 tmp;
- if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
- get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
return -EFAULT;
- return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
+ ulist = A(tmp);
+
+ karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
+ goto out;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+out:
+ kfree(karg.list);
+
+ return ret;
}
typedef struct drm32_buf_pub {
{
drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
drm32_buf_pub_t __user *ulist;
- drm_buf_map_t __user *arg64;
- drm_buf_pub_t __user *list;
+ drm_buf_map_t karg;
+ mm_segment_t old_fs;
int orig_count, ret, i;
- int n;
- compat_uptr_t addr;
+ u32 tmp1, tmp2;
- if (get_user(orig_count, &uarg->count))
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp1, &uarg->virtual) ||
+ get_user(tmp2, &uarg->list))
return -EFAULT;
- arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
- (size_t)orig_count * sizeof(drm_buf_pub_t));
- list = (void __user *)(arg64 + 1);
+ karg.virtual = (void *) (unsigned long) tmp1;
+ ulist = A(tmp2);
- if (put_user(orig_count, &arg64->count) ||
- put_user(list, &arg64->list) ||
- get_user(addr, &uarg->virtual) ||
- put_user(compat_ptr(addr), &arg64->virtual) ||
- get_user(addr, &uarg->list))
- return -EFAULT;
+ orig_count = karg.count;
- ulist = compat_ptr(addr);
-
- for (i = 0; i < orig_count; i++) {
- if (get_user(n, &ulist[i].idx) ||
- put_user(n, &list[i].idx) ||
- get_user(n, &ulist[i].total) ||
- put_user(n, &list[i].total) ||
- get_user(n, &ulist[i].used) ||
- put_user(n, &list[i].used) ||
- get_user(addr, &ulist[i].address) ||
- put_user(compat_ptr(addr), &list[i].address))
- return -EFAULT;
- }
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
- ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
- if (ret)
- return ret;
-
- for (i = 0; i < orig_count; i++) {
- void __user *p;
- if (get_user(n, &list[i].idx) ||
- put_user(n, &ulist[i].idx) ||
- get_user(n, &list[i].total) ||
- put_user(n, &ulist[i].total) ||
- get_user(n, &list[i].used) ||
- put_user(n, &ulist[i].used) ||
- get_user(p, &list[i].address) ||
- put_user((unsigned long)p, &ulist[i].address))
- return -EFAULT;
+ ret = -EFAULT;
+ for (i = 0; i < karg.count; i++) {
+ if (get_user(karg.list[i].idx, &ulist[i].idx) ||
+ get_user(karg.list[i].total, &ulist[i].total) ||
+ get_user(karg.list[i].used, &ulist[i].used) ||
+ get_user(tmp1, &ulist[i].address))
+ goto out;
+
+ karg.list[i].address = (void *) (unsigned long) tmp1;
}
- if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
- return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ for (i = 0; i < orig_count; i++) {
+ tmp1 = (u32) (long) karg.list[i].address;
+ if (put_user(karg.list[i].idx, &ulist[i].idx) ||
+ put_user(karg.list[i].total, &ulist[i].total) ||
+ put_user(karg.list[i].used, &ulist[i].used) ||
+ put_user(tmp1, &ulist[i].address)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
- return 0;
+out:
+ kfree(karg.list);
+ return ret;
}
typedef struct drm32_dma {
static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
- drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
+ int __user *u_si, *u_ss, *u_ri, *u_rs;
+ drm_dma_t karg;
+ mm_segment_t old_fs;
int ret;
-
- if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
- get_user(addr, &uarg->send_indices) ||
- put_user(compat_ptr(addr), &p->send_indices) ||
- get_user(addr, &uarg->send_sizes) ||
- put_user(compat_ptr(addr), &p->send_sizes) ||
- copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
- copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
- copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
- get_user(addr, &uarg->request_indices) ||
- put_user(compat_ptr(addr), &p->request_indices) ||
- get_user(addr, &uarg->request_sizes) ||
- put_user(compat_ptr(addr), &p->request_sizes) ||
- copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
+ u32 tmp1, tmp2, tmp3, tmp4;
+
+ karg.send_indices = karg.send_sizes = NULL;
+ karg.request_indices = karg.request_sizes = NULL;
+
+ if (get_user(karg.context, &uarg->context) ||
+ get_user(karg.send_count, &uarg->send_count) ||
+ get_user(tmp1, &uarg->send_indices) ||
+ get_user(tmp2, &uarg->send_sizes) ||
+ get_user(karg.flags, &uarg->flags) ||
+ get_user(karg.request_count, &uarg->request_count) ||
+ get_user(karg.request_size, &uarg->request_size) ||
+ get_user(tmp3, &uarg->request_indices) ||
+ get_user(tmp4, &uarg->request_sizes) ||
+ get_user(karg.granted_count, &uarg->granted_count))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
- if (ret)
- return ret;
+ u_si = A(tmp1);
+ u_ss = A(tmp2);
+ u_ri = A(tmp3);
+ u_rs = A(tmp4);
+
+ if (karg.send_count) {
+ karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+ karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.send_indices || !karg.send_sizes)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.send_indices, u_si,
+ (karg.send_count * sizeof(int))) ||
+ copy_from_user(karg.send_sizes, u_ss,
+ (karg.send_count * sizeof(int))))
+ goto out;
+ }
- if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
- copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
- copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
- copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
- copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
- return -EFAULT;
+ if (karg.request_count) {
+ karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+ karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.request_indices || !karg.request_sizes)
+ goto out;
- return 0;
+ ret = -EFAULT;
+ if (copy_from_user(karg.request_indices, u_ri,
+ (karg.request_count * sizeof(int))) ||
+ copy_from_user(karg.request_sizes, u_rs,
+ (karg.request_count * sizeof(int))))
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (put_user(karg.context, &uarg->context) ||
+ put_user(karg.send_count, &uarg->send_count) ||
+ put_user(karg.flags, &uarg->flags) ||
+ put_user(karg.request_count, &uarg->request_count) ||
+ put_user(karg.request_size, &uarg->request_size) ||
+ put_user(karg.granted_count, &uarg->granted_count))
+ ret = -EFAULT;
+
+ if (karg.send_count) {
+ if (copy_to_user(u_si, karg.send_indices,
+ (karg.send_count * sizeof(int))) ||
+ copy_to_user(u_ss, karg.send_sizes,
+ (karg.send_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ if (karg.request_count) {
+ if (copy_to_user(u_ri, karg.request_indices,
+ (karg.request_count * sizeof(int))) ||
+ copy_to_user(u_rs, karg.request_sizes,
+ (karg.request_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ }
+
+out:
+ if (karg.send_indices)
+ kfree(karg.send_indices);
+ if (karg.send_sizes)
+ kfree(karg.send_sizes);
+ if (karg.request_indices)
+ kfree(karg.request_indices);
+ if (karg.request_sizes)
+ kfree(karg.request_sizes);
+
+ return ret;
}
typedef struct drm32_ctx_res {
static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
- drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int ret;
+ drm_ctx_t __user *ulist;
+ drm_ctx_res_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
- if (copy_in_user(p, uarg, sizeof(int)) ||
- get_user(addr, &uarg->contexts) ||
- put_user(compat_ptr(addr), &p->contexts))
+ karg.contexts = NULL;
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->contexts))
return -EFAULT;
- ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
- if (ret)
- return ret;
+ ulist = A(tmp);
- if (copy_in_user(uarg, p, sizeof(int)))
- return -EFAULT;
+ orig_count = karg.count;
+ if (karg.count && ulist) {
+ karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
+ if (!karg.contexts)
+ return -ENOMEM;
+ if (copy_from_user(karg.contexts, ulist,
+ (karg.count * sizeof(drm_ctx_t)))) {
+ kfree(karg.contexts);
+ return -EFAULT;
+ }
+ }
- return 0;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (orig_count) {
+ if (copy_to_user(ulist, karg.contexts,
+ (orig_count * sizeof(drm_ctx_t))))
+ ret = -EFAULT;
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ if (karg.contexts)
+ kfree(karg.contexts);
+
+ return ret;
}
#endif
#include <stdarg.h>
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/config.h>
#include <linux/reboot.h>
#include <linux/delay.h>
-#include <linux/compat.h>
#include <linux/init.h>
#include <asm/oplib.h>
clone_flags &= ~CLONE_IDLETASK;
-#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
- } else
-#endif
- {
+ } else {
parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
}
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq:
rtrap_clr_l6: clr %l6
-rtrap:
- ldub [%g6 + TI_CPU], %l0
+rtrap: ldub [%g6 + TI_CPU], %l0
sethi %hi(irq_stat), %l2 ! &softirq_active
or %l2, %lo(irq_stat), %l2 ! &softirq_active
irqsz_patchme: sllx %l0, 0, %l0
*
* On SYSIO, using an 8K page size we have 1GB of SBUS
* DMA space mapped. We divide this space into equally
- * sized clusters. We allocate a DMA mapping from the
- * cluster that matches the order of the allocation, or
- * if the order is greater than the number of clusters,
- * we try to allocate from the last cluster.
+ * sized clusters. Currently we allow clusters up to a
+ * size of 1MB. If anything begins to generate DMA
+ * mapping requests larger than this we will need to
+ * increase things a bit.
*/
#define NCLUSTERS 8UL
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
{
- iopte_t *iopte, *limit, *first, *cluster;
- unsigned long cnum, ent, nent, flush_point, found;
+ iopte_t *iopte, *limit, *first;
+ unsigned long cnum, ent, flush_point;
cnum = 0;
- nent = 1;
while ((1UL << cnum) < npages)
cnum++;
- if(cnum >= NCLUSTERS) {
- nent = 1UL << (cnum - NCLUSTERS);
- cnum = NCLUSTERS - 1;
- }
iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
if (cnum == 0)
flush_point = iommu->alloc_info[cnum].flush;
first = iopte;
- cluster = NULL;
- found = 0;
for (;;) {
if (iopte_val(*iopte) == 0UL) {
- found++;
- if (!cluster)
- cluster = iopte;
- } else {
- /* Used cluster in the way */
- cluster = NULL;
- found = 0;
- }
-
- if (found == nent)
+ if ((iopte + (1 << cnum)) >= limit)
+ ent = 0;
+ else
+ ent = ent + 1;
+ iommu->alloc_info[cnum].next = ent;
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
break;
-
+ }
iopte += (1 << cnum);
ent++;
if (iopte >= limit) {
iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
ent = 0;
-
- /* Multiple cluster allocations must not wrap */
- cluster = NULL;
- found = 0;
}
if (ent == flush_point)
__iommu_flushall(iommu);
goto bad;
}
- /* ent/iopte points to the last cluster entry we're going to use,
- * so save our place for the next allocation.
- */
- if ((iopte + (1 << cnum)) >= limit)
- ent = 0;
- else
- ent = ent + 1;
- iommu->alloc_info[cnum].next = ent;
- if (ent == flush_point)
- __iommu_flushall(iommu);
-
/* I've got your streaming cluster right here buddy boy... */
- return cluster;
+ return iopte;
bad:
printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{
- unsigned long cnum, ent, nent;
+ unsigned long cnum, ent;
iopte_t *iopte;
cnum = 0;
- nent = 1;
while ((1UL << cnum) < npages)
cnum++;
- if(cnum >= NCLUSTERS) {
- nent = 1UL << (cnum - NCLUSTERS);
- cnum = NCLUSTERS - 1;
- }
ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
- do {
- iopte_val(*iopte) = 0UL;
- iopte += 1 << cnum;
- } while(--nent);
+ iopte_val(*iopte) = 0UL;
/* If the global flush might not have caught this entry,
* adjust the flush point such that we will flush before
#include <asm/uaccess.h>
#include <asm/timer.h>
#include <asm/starfire.h>
-#include <asm/tlb.h>
extern int linux_num_cpus;
extern void calibrate_delay(void);
}
}
+extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_range;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all_spitfire;
extern unsigned long xcall_flush_tlb_all_cheetah;
int cpu = get_cpu();
if (atomic_read(&mm->mm_users) == 1) {
+ /* See smp_flush_tlb_page for info about this. */
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
}
}
}
-void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
+ start &= PAGE_MASK;
+ end = PAGE_ALIGN(end);
+
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
- } else {
- /* This optimization is not valid. Normally
- * we will be holding the page_table_lock, but
- * there is an exception which is copy_page_range()
- * when forking. The lock is held during the individual
- * page table updates in the parent, but not at the
- * top level, which is where we are invoked.
- */
- if (0) {
- cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
- /* By virtue of running under the mm->page_table_lock,
- * and mmu_context.h:switch_mm doing the same, the
- * following operation is safe.
- */
- if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
- goto local_flush_and_out;
- }
}
- smp_cross_call_masked(&xcall_flush_tlb_pending,
- ctx, nr, (unsigned long) vaddrs,
+ smp_cross_call_masked(&xcall_flush_tlb_range,
+ ctx, start, end,
mm->cpu_vm_mask);
-local_flush_and_out:
- __flush_tlb_pending(ctx, nr, vaddrs);
+ local_flush_and_out:
+ __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
+ end, PAGE_SIZE, (end-start));
put_cpu();
}
}
}
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
+{
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
+
+ page &= PAGE_MASK;
+ if (mm == current->active_mm &&
+ atomic_read(&mm->mm_users) == 1) {
+ /* By virtue of being the current address space, and
+ * having the only reference to it, the following
+ * operation is safe.
+ *
+ * It would not be a win to perform the xcall tlb
+ * flush in this case, because even if we switch back
+ * to one of the other processors in cpu_vm_mask it
+ * is almost certain that all TLB entries for this
+ * context will be replaced by the time that happens.
+ */
+ mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ goto local_flush_and_out;
+ } else {
+ cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+
+ /* By virtue of running under the mm->page_table_lock,
+ * and mmu_context.h:switch_mm doing the same, the
+ * following operation is safe.
+ */
+ if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
+ goto local_flush_and_out;
+ }
+
+ /* OK, we have to actually perform the cross call. Most
+ * likely this is a cloned mm or kswapd is kicking out pages
+ * for a task which has run recently on another cpu.
+ */
+ smp_cross_call_masked(&xcall_flush_tlb_page,
+ ctx, page, 0,
+ mm->cpu_vm_mask);
+ if (!cpu_isset(cpu, mm->cpu_vm_mask))
+ return;
+
+ local_flush_and_out:
+ __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
+
+ put_cpu();
+ }
+}
+
/* CPU capture. */
/* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture;
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
/* Out of line spin-locking implementation. */
-EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_lock_flags);
#endif
EXPORT_SYMBOL(synchronize_irq);
#if defined(CONFIG_MCOUNT)
-extern void _mcount(void);
-EXPORT_SYMBOL_NOVERS(_mcount);
+extern void mcount(void);
+EXPORT_SYMBOL_NOVERS(mcount);
#endif
/* CPU online map and active count. */
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(__pte_alloc_one_kernel);
+EXPORT_SYMBOL(pte_alloc_one_kernel);
#ifndef CONFIG_SMP
EXPORT_SYMBOL(pgt_quicklists);
#endif
#endif
/* Special internal versions of library functions. */
+EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(csum_partial_copy_sparc64);
EXPORT_SYMBOL(ip_fast_csum);
-/* Moving data to/from/in userspace. */
+/* Moving data to/from userspace. */
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_in_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__bzero_noasi);
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_unmapped_area(NULL, addr, len, pgoff, flags);
+ return get_unmapped_area(NULL, addr, len, pgoff, flags, 0);
}
flags &= ~MAP_SHARED;
align_goal = (64UL * 1024);
do {
- addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags, 0);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags, 0);
return addr;
}
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags);
+ map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
return sys_ftruncate(fd, (high << 32) | low);
}
+/* readdir & getdents */
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
+
+struct old_linux_dirent32 {
+ u32 d_ino;
+ u32 d_offset;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct readdir_callback32 {
+ struct old_linux_dirent32 __user * dirent;
+ int count;
+};
+
+static int fillonedir(void * __buf, const char * name, int namlen,
+ loff_t offset, ino_t ino, unsigned int d_type)
+{
+ struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
+ struct old_linux_dirent32 __user * dirent;
+
+ if (buf->count)
+ return -EINVAL;
+ buf->count++;
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(offset, &dirent->d_offset);
+ put_user(namlen, &dirent->d_namlen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ return 0;
+}
+
+asmlinkage long old32_readdir(unsigned int fd, struct old_linux_dirent32 __user *dirent, unsigned int count)
+{
+ int error = -EBADF;
+ struct file * file;
+ struct readdir_callback32 buf;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.count = 0;
+ buf.dirent = dirent;
+
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (error < 0)
+ goto out_putf;
+ error = buf.count;
+
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+struct linux_dirent32 {
+ u32 d_ino;
+ u32 d_off;
+ unsigned short d_reclen;
+ char d_name[1];
+};
+
+struct getdents_callback32 {
+ struct linux_dirent32 __user *current_dir;
+ struct linux_dirent32 __user *previous;
+ int count;
+ int error;
+};
+
+static int filldir(void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
+ unsigned int d_type)
+{
+ struct linux_dirent32 __user * dirent;
+ struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->current_dir;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ put_user(d_type, (char __user *) dirent + reclen - 1);
+ dirent = (void __user *) dirent + reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent, unsigned int count)
+{
+ struct file * file;
+ struct linux_dirent32 __user *lastdirent;
+ struct getdents_callback32 buf;
+ int error = -EBADF;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+ lastdirent = buf.previous;
+ error = buf.error;
+ if (lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = count - buf.count;
+ }
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+/* end of readdir & getdents */
+
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
{
int err;
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags);
+ map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
.text
.align 4
-#ifdef CONFIG_COMPAT
/* First, the 32-bit Linux native syscall table. */
.globl sys_call_table32
.word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
-/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
+/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, sys32_getdents
.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
.word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
/*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
.word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
-/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_old_readdir
+/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, old32_readdir
.word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
.word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, sys_ni_syscall
/*280*/ .word sys_ni_syscall, sys_ni_syscall, sys_ni_syscall
-#endif /* CONFIG_COMPAT */
-
/* Now the 64-bit native Linux syscall table. */
.align 4
sys_call_table:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
+/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown, sys_mknod
/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
#define TICK_SIZE (tick_nsec / 1000)
-static inline void timer_check_rtc(void)
+static __inline__ void timer_check_rtc(void)
{
/* last time the cmos clock got updated */
static long last_rtc_update;
void sparc64_do_profile(struct pt_regs *regs)
{
- unsigned long pc;
+ unsigned long pc = regs->tpc;
+ unsigned long o7 = regs->u_regs[UREG_RETPC];
profile_hook(regs);
if (!prof_buffer)
return;
- pc = regs->tpc;
-
- pc -= (unsigned long) _stext;
- pc >>= prof_shift;
-
- if(pc >= prof_len)
- pc = prof_len - 1;
- atomic_inc((atomic_t *)&prof_buffer[pc]);
+ {
+ extern int rwlock_impl_begin, rwlock_impl_end;
+ extern int atomic_impl_begin, atomic_impl_end;
+ extern int __memcpy_begin, __memcpy_end;
+ extern int __bzero_begin, __bzero_end;
+ extern int __bitops_begin, __bitops_end;
+
+ if ((pc >= (unsigned long) &atomic_impl_begin &&
+ pc < (unsigned long) &atomic_impl_end) ||
+ (pc >= (unsigned long) &rwlock_impl_begin &&
+ pc < (unsigned long) &rwlock_impl_end) ||
+ (pc >= (unsigned long) &__memcpy_begin &&
+ pc < (unsigned long) &__memcpy_end) ||
+ (pc >= (unsigned long) &__bzero_begin &&
+ pc < (unsigned long) &__bzero_end) ||
+ (pc >= (unsigned long) &__bitops_begin &&
+ pc < (unsigned long) &__bitops_end))
+ pc = o7;
+
+ pc -= (unsigned long) _stext;
+ pc >>= prof_shift;
+
+ if(pc >= prof_len)
+ pc = prof_len - 1;
+ atomic_inc((atomic_t *)&prof_buffer[pc]);
+ }
}
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
*/
void do_gettimeofday(struct timeval *tv)
{
+ unsigned long flags;
unsigned long seq;
unsigned long usec, sec;
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
unsigned long lost;
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
lost = jiffies - wall_jiffies;
usec += lost * tick_usec;
sec = xtime.tv_sec;
-
- /* Believe it or not, this divide shows up on
- * kernel profiles. The problem is that it is
- * both 64-bit and signed. Happily, 32-bits
- * of precision is all we really need and in
- * doing so gcc ends up emitting a cheap multiply.
- *
- * XXX Why is tv_nsec 'long' and 'signed' in
- * XXX the first place, can it even be negative?
- */
- usec += ((unsigned int) xtime.tv_nsec / 1000U);
- } while (read_seqretry(&xtime_lock, seq));
+ usec += (xtime.tv_nsec / 1000);
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
+lib-y := PeeCeeI.o blockops.o strlen.o strncmp.o \
memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \
VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \
-/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
+/* $Id: U3copy_from_user.S,v 1.4 2002/01/15 07:16:26 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy from userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-
-#define XCC xcc
-
-#define EXNV_RAW(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: ba U3cfu_fixup; \
- a, b, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: add %o1, %o3, %o0; \
+99: VISExitHalf; \
ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV4(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: add %o1, %o3, %o0; \
- a, b, %o1; \
- ba U3cfu_fixup; \
- add %o1, 4, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXNV8(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: add %o1, %o3, %o0; \
- a, b, %o1; \
- ba U3cfu_fixup; \
- add %o1, 8, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-
- .register %g2,#scratch
- .register %g3,#scratch
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EX2(x,y) x,y;
+#define EX3(x,y) x,y;
+#define EX4(x,y) x,y;
+#endif
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_from_user
-U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 80f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
-
- /* Here len >= 256 and condition codes reflect execution
+ .globl U3copy_from_user
+U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_from_user_short_ret! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_from_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_from_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_from_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3copy_from_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_from_user_short ! BR
+ stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
+
+U3copy_from_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3copy_from_user_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV_RAW(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
- bg,pt %XCC, 1b
- stb %o3, [%o0 + -1]
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
-
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetcha [%o1 + 0x000] %asi, #one_read
- prefetcha [%o1 + 0x040] %asi, #one_read
- andn %o2, (0x40 - 1), %o4
- prefetcha [%o1 + 0x080] %asi, #one_read
- prefetcha [%o1 + 0x0c0] %asi, #one_read
- EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0)
- prefetcha [%o1 + 0x100] %asi, #one_read
- EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0)
- prefetcha [%o1 + 0x140] %asi, #one_read
- EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0)
- prefetcha [%o1 + 0x180] %asi, #one_read
- faligndata %f0, %f2, %f16
- EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0)
- faligndata %f2, %f4, %f18
- EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0)
- faligndata %f4, %f6, %f20
- EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0)
- faligndata %f6, %f8, %f22
-
- EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0)
- faligndata %f8, %f10, %f24
- EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0)
- faligndata %f10, %f12, %f26
- EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0)
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_from_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- EX3(ldda [%o1 + 0x008] %asi, %f2)
- faligndata %f12, %f14, %f28
- EX3(ldda [%o1 + 0x010] %asi, %f4)
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- EX3(ldda [%o1 + 0x018] %asi, %f6)
- faligndata %f0, %f2, %f16
-
- EX3(ldda [%o1 + 0x020] %asi, %f8)
- faligndata %f2, %f4, %f18
- EX3(ldda [%o1 + 0x028] %asi, %f10)
- faligndata %f4, %f6, %f20
- EX3(ldda [%o1 + 0x030] %asi, %f12)
- faligndata %f6, %f8, %f22
- EX3(ldda [%o1 + 0x038] %asi, %f14)
- faligndata %f8, %f10, %f24
-
- EX3(ldda [%o1 + 0x040] %asi, %f0)
- prefetcha [%o1 + 0x180] %asi, #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3copy_from_user_begin:
+#ifdef __KERNEL__
+ .globl U3copy_from_user_nop_1_6
+U3copy_from_user_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
+ prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
+ EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
+1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
+1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_from_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_from_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_from_user_loop1:
+ EX2(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ EX2(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ EX2(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ EX2(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ EX2(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ EX2(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ EX2(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ EX2(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_from_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_from_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_from_user_loop2:
+ EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ EX3(ldda [%o1 + 0x040] %asi, %f0) ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_from_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- EX3(ldda [%o1 + 0x008] %asi, %f2)
- faligndata %f12, %f14, %f28
- EX3(ldda [%o1 + 0x010] %asi, %f4)
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- EX3(ldda [%o1 + 0x018] %asi, %f6)
- faligndata %f0, %f2, %f16
- EX3(ldda [%o1 + 0x020] %asi, %f8)
- faligndata %f2, %f4, %f18
- EX3(ldda [%o1 + 0x028] %asi, %f10)
- faligndata %f4, %f6, %f20
- EX3(ldda [%o1 + 0x030] %asi, %f12)
- faligndata %f6, %f8, %f22
- EX3(ldda [%o1 + 0x038] %asi, %f14)
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- EX4(ldda [%o1 + 0x040] %asi, %f0)
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
-
- membar #Sync
+U3copy_from_user_loopfini:
+ EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ stda %f16, [%o0] ASI_BLK_P ! MS Group20
+ EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ EX4(ldda [%o1 + 0x040] %asi, %f0) ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3copy_from_user_nop_2_3
+U3copy_from_user_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_from_user_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 10f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 10f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0)
-
-1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0)
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- std %f8, [%o0 + 0x00]
- be,pn %XCC, 10f
- add %o0, 0x8, %o0
- EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0)
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- std %f8, [%o0 + 0x00]
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3copy_from_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_from_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_from_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
+
+1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_from_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-10:
+U3copy_from_user_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3copy_from_user_short_ret
nop
- EXNV(ldxa [%o1] %asi, %o5, add %o2, %g0)
- stx %o5, [%o1 + %o3]
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3copy_from_user_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- EXNV(lduwa [%o1] %asi, %o5, and %o2, 0x7)
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_from_user_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- EXNV(lduha [%o1] %asi, %o5, and %o2, 0x3)
- sth %o5, [%o1 + %o3]
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- EXNV(lduba [%o1] %asi, %o5, and %o2, 0x1)
- ba,pt %xcc, 85f
- stb %o5, [%o1 + %o3]
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- EXNV8(ldxa [%o1] %asi, %o5, add %o2, %o4)
- stx %o5, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- EXNV4(lduwa [%o1] %asi, %o5, add %o2, %g0)
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
+
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-1:
- subcc %o2, 4, %o2
- EXNV(lduwa [%o1] %asi, %g1, add %o2, %g0)
- stw %g1, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-85: retl
- clr %o0
+2: VISEntryHalf ! MS+MS
- .align 32
-90:
- subcc %o2, 1, %o2
- EXNV(lduba [%o1] %asi, %g1, add %o2, %g0)
- stb %g1, [%o1 + %o3]
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- clr %o0
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_from_user_short_ret! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_from_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_from_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ stb %o3, [%o0 + -1]
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: EXNV(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ stx %o3, [%o0 + -8]
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_from_user_short
+ nop
+ ba,a,pt %xcc, U3copy_from_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
+#ifdef __KERNEL__
+ .globl U3cfu_fixup
U3cfu_fixup:
/* Since this is copy_from_user(), zero out the rest of the
* kernel buffer.
2: retl
mov %o1, %o0
+#endif
-/* U3copy_in_user.S: UltraSparc-III optimized memcpy.
+/* $Id: U3copy_in_user.S,v 1.4 2001/03/21 05:58:47 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy within userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
-#include <asm/dcu.h>
-#include <asm/spitfire.h>
-
-#define XCC xcc
-
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV1(x,y,a,b) \
+#define EXNV2(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV4(x,y,a,b) \
+#define EXNV3(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
- add %o0, 4, %o0; \
+ add %o0, 8, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV8(x,y,a,b) \
+#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: a, b, %o0; \
+99: VISExitHalf; \
retl; \
- add %o0, 8, %o0; \
+ a, b, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
+#define EXBLK1(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ add %o4, 0x1c0, %o1; \
+ and %o2, (0x40 - 1), %o2; \
+ retl; \
+ add %o1, %o2, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXBLK2(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ sll %o3, 6, %o3; \
+ and %o2, (0x40 - 1), %o2; \
+ add %o3, 0x80, %o1; \
+ retl; \
+ add %o1, %o2, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXBLK3(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ and %o2, (0x40 - 1), %o2; \
+ retl; \
+ add %o2, 0x80, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXBLK4(x,y) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: VISExitHalf; \
+ and %o2, (0x40 - 1), %o2; \
+ retl; \
+ add %o2, 0x40, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#else
+#define ASI_AIUS 0x80
+#define ASI_BLK_AIUS 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EXNV2(x,y,a,b) x,y;
+#define EXNV3(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EXBLK1(x,y) x,y;
+#define EXBLK2(x,y) x,y;
+#define EXBLK3(x,y) x,y;
+#define EXBLK4(x,y) x,y;
+#endif
- .register %g2,#scratch
- .register %g3,#scratch
+ /* Special/non-trivial issues of this code:
+ *
+ * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+ * 2) Only low 32 FPU registers are used so that only the
+ * lower half of the FPU register set is dirtied by this
+ * code. This is especially important in the kernel.
+ * 3) This code never prefetches cachelines past the end
+ * of the source buffer.
+ *
+ * XXX Actually, Cheetah can buffer up to 8 concurrent
+ * XXX prefetches, revisit this...
+ */
.text
.align 32
- /* Don't try to get too fancy here, just nice and
- * simple. This is predominantly used for well aligned
- * small copies in the compat layer. It is also used
- * to copy register windows around during thread cloning.
+ /* The cheetah's flexible spine, oversized liver, enlarged heart,
+ * slender muscular body, and claws make it the swiftest hunter
+ * in Africa and the fastest animal on land. Can reach speeds
+ * of up to 2.4GB per second.
*/
- .globl U3copy_in_user
-U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */
+ .globl U3copy_in_user
+U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
- rd %asi, %g1
- cmp %g1, ASI_AIUS
- bne,pn %icc, U3memcpy_user_stub
- nop
+ rd %asi, %g1 ! MS Group (4 cycles)
+ cmp %g1, ASI_AIUS ! A0 Group
+ bne U3memcpy ! BR
+ nop ! A1
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_in_user_short_ret ! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_in_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_in_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_in_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+ .align 32
+U3copy_in_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_in_user_short ! BR
+ EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall)
+
+U3copy_in_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
+ * of "andcc %o0, 0x7, %g2", done by caller.
+ */
+ .align 64
+U3copy_in_user_enter:
+ /* Is 'dst' already aligned on an 64-byte boundary? */
+ be,pt %xcc, 2f ! BR
+
+ /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
+ * of bytes to copy to make 'dst' 64-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
+
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
+
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
+
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_in_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
+
+ .align 64
+U3copy_in_user_begin:
+ prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
+ prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
+ EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
+1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
+1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_in_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_in_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_in_user_loop1:
+ EXBLK1(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ EXBLK1(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ EXBLK1(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ EXBLK1(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ EXBLK1(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ EXBLK1(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ EXBLK1(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ EXBLK1(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_in_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_in_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_in_user_loop2:
+ EXBLK2(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ EXBLK2(ldda [%o1 + 0x010] %asi, %f4) ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ EXBLK2(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ EXBLK2(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ EXBLK2(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ EXBLK2(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ EXBLK2(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ EXBLK2(ldda [%o1 + 0x040] %asi, %f0) ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_in_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+ /* Finally we copy the last full 64-byte block. */
+U3copy_in_user_loopfini:
+ EXBLK3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ EXBLK3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20
+ EXBLK4(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ EXBLK4(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ EXBLK4(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ EXBLK4(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ EXBLK4(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ EXBLK4(ldda [%o1 + 0x040] %asi, %f0) ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+ membar #Sync ! MS Group26 (7-cycle stall)
+
+ /* Now we copy the (len modulo 64) bytes at the end.
+ * Note how we borrow the %f0 loaded above.
+ *
+ * Also notice how this code is careful not to perform a
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_in_user_toosmall processing.
+ */
+U3copy_in_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_in_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_in_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
+
+1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_in_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
+
+ /* If anything is left, we copy it one byte at a time.
+ * Note that %g1 is (src & 0x3) saved above before the
+ * alignaddr was performed.
+ */
+U3copy_in_user_endcruft:
cmp %o2, 0
- be,pn %XCC, out
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, small_copy
- or %o3, %o2, %o3
-
-medium_copy: /* 16 < len <= 64 */
- andcc %o3, 0x7, %g0
- bne,pn %XCC, small_copy_unaligned
- sub %o0, %o1, %o3
-
-medium_copy_aligned:
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- EXNV8(ldxa [%o1] %asi, %o5, add %o4, %o2)
- EXNV8(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2)
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- EXNV4(lduwa [%o1] %asi, %o5, add %o4, %o2)
- EXNV4(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2)
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, out
- nop
- ba,pt %xcc, small_copy_unaligned
+ add %o1, %g1, %o1
+ VISExitHalf
+ be,pn %icc, U3copy_in_user_short_ret
nop
+ ba,a,pt %xcc, U3copy_in_user_short
-small_copy: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, small_copy_unaligned
- sub %o0, %o1, %o3
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_in_user_toosmall:
-small_copy_aligned:
- subcc %o2, 4, %o2
- EXNV4(lduwa [%o1] %asi, %g1, add %o2, %g0)
- EXNV4(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, small_copy_aligned
- add %o1, 4, %o1
+#ifdef SMALL_COPY_USES_FPU
-out: retl
- clr %o0
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
- .align 32
-small_copy_unaligned:
- subcc %o2, 1, %o2
- EXNV1(lduba [%o1] %asi, %g1, add %o2, %g0)
- EXNV1(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, small_copy_unaligned
- add %o1, 1, %o1
- retl
- clr %o0
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
+
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV2(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
+
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
+
+2: VISEntryHalf ! MS+MS
+
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_in_user_short_ret ! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_in_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_in_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: EXNV2(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: EXNV3(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2)
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_in_user_short
+ nop
+ ba,a,pt %xcc, U3copy_in_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
-/* U3copy_to_user.S: UltraSparc-III optimized memcpy.
+/* $Id: U3copy_to_user.S,v 1.3 2000/11/01 09:29:19 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy to userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-
-#define XCC xcc
-
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.text; \
.align 4;
#define EXNV3(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: a, b, %o0; \
- retl; \
- add %o0, 4, %o0; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-
- .register %g2,#scratch
- .register %g3,#scratch
+#else
+#define ASI_AIUS 0x80
+#define ASI_BLK_AIUS 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EXNV2(x,y,a,b) x,y;
+#define EXNV3(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EXBLK1(x,y) x,y;
+#define EXBLK2(x,y) x,y;
+#define EXBLK3(x,y) x,y;
+#define EXBLK4(x,y) x,y;
+#endif
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_to_user
-U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
+ .globl U3copy_to_user
+U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
- rd %asi, %g1
- cmp %g1, ASI_AIUS
- bne,pn %icc, U3memcpy_user_stub
- nop
-
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 80f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
-
- /* Here len >= 256 and condition codes reflect execution
+ rd %asi, %g1 ! MS Group (4 cycles)
+ cmp %g1, ASI_AIUS ! A0 Group
+ bne U3memcpy ! BR
+ nop ! A1
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_to_user_short_ret ! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_to_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_to_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_to_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3copy_to_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ ldub [%o1 + 0x00], %o3 ! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_to_user_short ! BR
+ EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall)
+
+U3copy_to_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3copy_to_user_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
-
- bg,pt %XCC, 1b
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+1: ldub [%o1 + 0x00], %o3 ! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetch [%o1 + 0x000], #one_read
- prefetch [%o1 + 0x040], #one_read
- andn %o2, (0x40 - 1), %o4
- prefetch [%o1 + 0x080], #one_read
- prefetch [%o1 + 0x0c0], #one_read
- ldd [%o1 + 0x000], %f0
- prefetch [%o1 + 0x100], #one_read
- ldd [%o1 + 0x008], %f2
- prefetch [%o1 + 0x140], #one_read
- ldd [%o1 + 0x010], %f4
- prefetch [%o1 + 0x180], #one_read
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x018], %f6
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x020], %f8
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x028], %f10
- faligndata %f6, %f8, %f22
-
- ldd [%o1 + 0x030], %f12
- faligndata %f8, %f10, %f24
- ldd [%o1 + 0x038], %f14
- faligndata %f10, %f12, %f26
- ldd [%o1 + 0x040], %f0
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_to_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS)
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
-
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
-
- ldd [%o1 + 0x040], %f0
- prefetch [%o1 + 0x180], #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3copy_to_user_begin:
+#ifdef __KERNEL__
+ .globl U3copy_to_user_nop_1_6
+U3copy_to_user_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetch [%o1 + 0x000], #one_read ! MS Group1
+ prefetch [%o1 + 0x040], #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetch [%o1 + 0x080], #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetch [%o1 + 0x0c0], #one_read ! MS Group4
+ ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x100], #one_read ! MS Group6
+1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetch [%o1 + 0x140], #one_read ! MS Group7
+1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x180], #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_to_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_to_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_to_user_loop1:
+ ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
+ prefetch [%o1 + 0x180], #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_to_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_to_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_to_user_loop2:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ ldd [%o1 + 0x010], %f4 ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ ldd [%o1 + 0x040], %f0 ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_to_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS)
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- ldd [%o1 + 0x040], %f0
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS)
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
-
- membar #Sync
+U3copy_to_user_loopfini:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ ldd [%o1 + 0x010], %f4 ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ ldd [%o1 + 0x040], %f0 ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3copy_to_user_nop_2_3
+U3copy_to_user_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_to_user_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 2f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 2f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- ldd [%o1 + 0x00], %f0
-
-1: ldd [%o1 + 0x08], %f2
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
- be,pn %XCC, 2f
- add %o0, 0x8, %o0
- ldd [%o1 + 0x08], %f0
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3copy_to_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_to_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_to_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ ldd [%o1 + 0x00], %f0 ! MS
+
+1: ldd [%o1 + 0x08], %f2 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_to_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ ldd [%o1 + 0x08], %f0 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-2:
+U3copy_to_user_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3copy_to_user_short_ret
nop
- ldx [%o1], %o5
- EXNV(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3copy_to_user_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- lduw [%o1], %o5
- EXNV(stwa %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x7)
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_to_user_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- lduh [%o1], %o5
- EXNV(stha %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x3)
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- ldub [%o1], %o5
- ba,pt %xcc, 85f
- EXNV(stba %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x1)
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- ldx [%o1], %o5
- EXNV4(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %o4)
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- lduw [%o1], %o5
- EXNV3(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-1:
- subcc %o2, 4, %o2
- lduw [%o1], %g1
- EXNV3(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-85: retl
- clr %o0
+ bg,pt %icc, 1b ! BR Group
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
- .align 32
-90:
- subcc %o2, 1, %o2
- ldub [%o1], %g1
- EXNV2(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- clr %o0
+2: VISEntryHalf ! MS+MS
+
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: ldd [%g1 + 0x00], %f2 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ ldd [%g1 + 0x00], %f0 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_to_user_short_ret ! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_to_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_to_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: ldx [%o1 + 0x00], %o3
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2)
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_to_user_short
+ nop
+ ba,a,pt %xcc, U3copy_to_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
-/* U3memcpy.S: UltraSparc-III optimized memcpy.
+/* $Id: U3memcpy.S,v 1.2 2000/11/01 09:29:19 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized memcpy.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
#ifdef __KERNEL__
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
+#undef SMALL_COPY_USES_FPU
#else
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
#endif
-#ifndef XCC
-#define XCC xcc
-#endif
-
- .register %g2,#scratch
- .register %g3,#scratch
-
/* Special/non-trivial issues of this code:
*
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
* of up to 2.4GB per second.
*/
- .globl U3memcpy
-U3memcpy: /* %o0=dst, %o1=src, %o2=len */
- mov %o0, %g5
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 70f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 80f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
+ .globl U3memcpy
+U3memcpy: /* %o0=dst, %o1=src, %o2=len */
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3memcpy_short_ret ! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3memcpy_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3memcpy_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3memcpy_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3memcpy_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ ldub [%o1 + 0x00], %o3 ! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3memcpy_short ! BR
+ stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
+
+U3memcpy_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
- /* Here len >= 256 and condition codes reflect execution
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3memcpy_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
+1: ldub [%o1 + 0x00], %o3 ! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
- bg,pt %XCC, 1b
- stb %o3, [%o0 + -1]
-
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetch [%o1 + 0x000], #one_read
- prefetch [%o1 + 0x040], #one_read
- andn %o2, (0x40 - 1), %o4
- prefetch [%o1 + 0x080], #one_read
- prefetch [%o1 + 0x0c0], #one_read
- ldd [%o1 + 0x000], %f0
- prefetch [%o1 + 0x100], #one_read
- ldd [%o1 + 0x008], %f2
- prefetch [%o1 + 0x140], #one_read
- ldd [%o1 + 0x010], %f4
- prefetch [%o1 + 0x180], #one_read
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x018], %f6
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x020], %f8
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x028], %f10
- faligndata %f6, %f8, %f22
-
- ldd [%o1 + 0x030], %f12
- faligndata %f8, %f10, %f24
- ldd [%o1 + 0x038], %f14
- faligndata %f10, %f12, %f26
- ldd [%o1 + 0x040], %f0
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3memcpy_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
-
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
-
- ldd [%o1 + 0x040], %f0
- prefetch [%o1 + 0x180], #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3memcpy_begin:
+#ifdef __KERNEL__
+ .globl U3memcpy_nop_1_6
+U3memcpy_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetch [%o1 + 0x000], #one_read ! MS Group1
+ prefetch [%o1 + 0x040], #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetch [%o1 + 0x080], #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetch [%o1 + 0x0c0], #one_read ! MS Group4
+ ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x100], #one_read ! MS Group6
+1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetch [%o1 + 0x140], #one_read ! MS Group7
+1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetch [%o1 + 0x180], #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3memcpy_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3memcpy_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3memcpy_loop1:
+ ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
+ prefetch [%o1 + 0x180], #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3memcpy_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3memcpy_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3memcpy_loop2:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ ldd [%o1 + 0x010], %f4 ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ ldd [%o1 + 0x040], %f0 ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3memcpy_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- ldd [%o1 + 0x008], %f2
- faligndata %f12, %f14, %f28
- ldd [%o1 + 0x010], %f4
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x018], %f6
- faligndata %f0, %f2, %f16
- ldd [%o1 + 0x020], %f8
- faligndata %f2, %f4, %f18
- ldd [%o1 + 0x028], %f10
- faligndata %f4, %f6, %f20
- ldd [%o1 + 0x030], %f12
- faligndata %f6, %f8, %f22
- ldd [%o1 + 0x038], %f14
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- ldd [%o1 + 0x040], %f0
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
- membar #Sync
+U3memcpy_loopfini:
+ ldd [%o1 + 0x008], %f2 ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ ldd [%o1 + 0x010], %f4 ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ stda %f16, [%o0] ASI_BLK_P ! MS Group20
+ ldd [%o1 + 0x018], %f6 ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ ldd [%o1 + 0x020], %f8 ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ ldd [%o1 + 0x028], %f10 ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ ldd [%o1 + 0x030], %f12 ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ ldd [%o1 + 0x038], %f14 ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ ldd [%o1 + 0x040], %f0 ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3memcpy_nop_2_3
+U3memcpy_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3memcpy_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 2f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 2f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- ldd [%o1 + 0x00], %f0
-
-1: ldd [%o1 + 0x08], %f2
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- std %f8, [%o0 + 0x00]
- be,pn %XCC, 2f
- add %o0, 0x8, %o0
- ldd [%o1 + 0x08], %f0
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- std %f8, [%o0 + 0x00]
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3memcpy_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3memcpy_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3memcpy_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ ldd [%o1 + 0x00], %f0 ! MS
+
+1: ldd [%o1 + 0x08], %f2 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3memcpy_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ ldd [%o1 + 0x08], %f0 ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-2:
+U3memcpy_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3memcpy_short_ret
nop
- ldx [%o1], %o5
- stx %o5, [%o1 + %o3]
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3memcpy_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- lduw [%o1], %o5
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3memcpy_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- lduh [%o1], %o5
- sth %o5, [%o1 + %o3]
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- ldub [%o1], %o5
- ba,pt %xcc, 85f
- stb %o5, [%o1 + %o3]
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- ldx [%o1], %o5
- stx %o5, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- lduw [%o1], %o5
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
+
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-1:
- subcc %o2, 4, %o2
- lduw [%o1], %g1
- stw %g1, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-85: retl
- mov %g5, %o0
+2: VISEntryHalf ! MS+MS
- .align 32
-90:
- subcc %o2, 1, %o2
- ldub [%o1], %g1
- stb %g1, [%o1 + %o3]
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- mov %g5, %o0
-
- /* Act like copy_{to,in}_user(), ie. return zero instead
- * of original destination pointer. This is invoked when
- * copy_{to,in}_user() finds that %asi is kernel space.
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
*/
- .globl U3memcpy_user_stub
-U3memcpy_user_stub:
- save %sp, -192, %sp
- mov %i0, %o0
- mov %i1, %o1
- call U3memcpy
- mov %i2, %o2
- ret
- restore %g0, %g0, %o0
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: ldd [%g1 + 0x00], %f2 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ ldd [%g1 + 0x00], %f0 ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3memcpy_short_ret ! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3memcpy_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3memcpy_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ stb %o3, [%o0 + -1]
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: ldx [%o1 + 0x00], %o3
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ stx %o3, [%o0 + -8]
+
+ cmp %o2, 0
+ bne,pn %icc, U3memcpy_short
+ nop
+ ba,a,pt %xcc, U3memcpy_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
.text
.align 32
#ifdef __KERNEL__
+ .globl __bzero_begin
+__bzero_begin:
.globl __bzero, __bzero_noasi
__bzero_noasi:
rd %asi, %g5
ba,pt %xcc, VISbzerofixup_ret0
sub %o1, %g2, %o0
#endif
+ .globl __bzero_end
+__bzero_end:
.type bcopy,@function
#ifdef __KERNEL__
+ .globl __memcpy_begin
+__memcpy_begin:
+
+ .globl __memcpy
+ .type __memcpy,@function
+
memcpy_private:
+__memcpy:
memcpy: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov ASI_P, asi_dest ! IEU1
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
+#define ULTRA3_PCACHE_DO_NOP(symbol) \
+ sethi %hi(symbol##_nop_1_6), %g1; \
+ or %g1, %lo(symbol##_nop_1_6), %g1; \
+ sethi %hi(NOP), %g2; \
+ stw %g2, [%g1 + 0x00]; \
+ stw %g2, [%g1 + 0x04]; \
+ flush %g1 + 0x00; \
+ stw %g2, [%g1 + 0x08]; \
+ stw %g2, [%g1 + 0x0c]; \
+ flush %g1 + 0x08; \
+ stw %g2, [%g1 + 0x10]; \
+ stw %g2, [%g1 + 0x04]; \
+ flush %g1 + 0x10; \
+ sethi %hi(symbol##_nop_2_3), %g1; \
+ or %g1, %lo(symbol##_nop_2_3), %g1; \
+ stw %g2, [%g1 + 0x00]; \
+ stw %g2, [%g1 + 0x04]; \
+ flush %g1 + 0x00; \
+ stw %g2, [%g1 + 0x08]; \
+ flush %g1 + 0x08;
+
+#include <asm/dcu.h>
.globl cheetah_patch_copyops
cheetah_patch_copyops:
ULTRA3_DO_PATCH(__copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(__copy_to_user, U3copy_to_user)
ULTRA3_DO_PATCH(__copy_in_user, U3copy_in_user)
+#if 0 /* Causes data corruption, nop out the optimization
+ * for now -DaveM
+ */
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ andcc %g3, %o3, %g0
+ be,pn %xcc, pcache_disabled
+ nop
+#endif
+ ULTRA3_PCACHE_DO_NOP(U3memcpy)
+ ULTRA3_PCACHE_DO_NOP(U3copy_from_user)
+ ULTRA3_PCACHE_DO_NOP(U3copy_to_user)
+ ULTRA3_PCACHE_DO_NOP(cheetah_copy_user_page)
+#if 0
+pcache_disabled:
+#endif
retl
nop
#undef BRANCH_ALWAYS
FPU_RETL
#ifdef __KERNEL__
+ .globl __memcpy_end
+__memcpy_end:
+
.section .fixup
.align 4
VIScopyfixup_reto2:
.text
.align 64
+ .globl atomic_impl_begin, atomic_impl_end
+
.globl __atomic_add
+atomic_impl_begin:
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
lduw [%o1], %g5
add %g5, %o0, %g7
retl
sub %g7, %o0, %o0
+atomic_impl_end:
.text
.align 64
+ .globl __bitops_begin
+__bitops_begin:
+
.globl ___test_and_set_bit
___test_and_set_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
lduwa [%o1] ASI_PL, %g7
2: retl
membar #StoreLoad | #StoreStore
+
+ .globl __bitops_end
+__bitops_end:
--- /dev/null
+/* $Id: blockops.S,v 1.42 2002/02/09 19:49:30 davem Exp $
+ * blockops.S: UltraSparc block zero optimized routines.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include "VIS.h"
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/dcu.h>
+#include <asm/spitfire.h>
+#include <asm/pgtable.h>
+
+#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
+ fmovd %reg0, %f48; fmovd %reg1, %f50; \
+ fmovd %reg2, %f52; fmovd %reg3, %f54; \
+ fmovd %reg4, %f56; fmovd %reg5, %f58; \
+ fmovd %reg6, %f60; fmovd %reg7, %f62;
+
+#define DCACHE_SIZE (PAGE_SIZE * 2)
+#define TLBTEMP_ENT1 (60 << 3)
+#define TLBTEMP_ENT2 (61 << 3)
+#define TLBTEMP_ENTSZ (1 << 3)
+
+#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
+#define PAGE_SIZE_REM 0x80
+#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+#define PAGE_SIZE_REM 0x100
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+ .text
+
+ .align 32
+ .globl copy_user_page
+ .type copy_user_page,@function
+copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
+ VISEntry
+ sethi %hi(PAGE_SIZE), %g3
+ sethi %uhi(PAGE_OFFSET), %g2
+ sllx %g2, 32, %g2
+ sub %o0, %g2, %g1
+ and %o2, %g3, %o0
+ sethi %hi(TLBTEMP_BASE), %o3
+ sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
+ sub %o1, %g2, %g2
+ sllx %g3, 32, %g3
+ mov TLB_TAG_ACCESS, %o2
+ or %g3, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), %g3
+ sethi %hi(DCACHE_SIZE), %o1
+ or %g1, %g3, %g1
+ or %g2, %g3, %g2
+ add %o0, %o3, %o0
+ add %o0, %o1, %o1
+#define FIX_INSN_1 0x96102060 /* mov (12 << 3), %o3 */
+cheetah_patch_1:
+ mov TLBTEMP_ENT1, %o3
+ rdpr %pstate, %g3
+ wrpr %g3, PSTATE_IE, %pstate
+
+ /* Do this now, before loading the fixed TLB entries for copying,
+ * so we do not risk a multiple TLB match condition later when
+ * restoring those entries.
+ */
+ ldx [%g6 + TI_FLAGS], %g3
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %o4
+ stxa %g0, [%o4] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_TAG_READ, %o4
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %o5
+ stxa %g0, [%o5] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %o5
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g1, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ add %o3, (TLBTEMP_ENTSZ), %o3
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g5
+ stxa %g0, [%g5] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_TAG_READ, %g5
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g7
+ stxa %o1, [%o2] ASI_DMMU
+ stxa %g2, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ andcc %g3, _TIF_BLKCOMMIT, %g0
+ bne,pn %xcc, copy_page_using_blkcommit
+ nop
+
+ BRANCH_IF_ANY_CHEETAH(g3,o2,cheetah_copy_user_page)
+ ba,pt %xcc, spitfire_copy_user_page
+ nop
+
+cheetah_copy_user_page:
+ .globl cheetah_copy_user_page_nop_1_6
+cheetah_copy_user_page_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o2
+ sllx %o2, 32, %o2
+ or %g3, %o2, %o2
+ stxa %o2, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+
+ sethi %hi((PAGE_SIZE/64)-7), %o2 ! A0 Group
+ prefetch [%o1 + 0x000], #one_read ! MS
+ or %o2, %lo((PAGE_SIZE/64)-7), %o2 ! A1 Group
+ prefetch [%o1 + 0x040], #one_read ! MS
+ prefetch [%o1 + 0x080], #one_read ! MS Group
+ prefetch [%o1 + 0x0c0], #one_read ! MS Group
+ ldd [%o1 + 0x000], %f0 ! MS Group
+ prefetch [%o1 + 0x100], #one_read ! MS Group
+ ldd [%o1 + 0x008], %f2 ! AX
+ prefetch [%o1 + 0x140], #one_read ! MS Group
+ ldd [%o1 + 0x010], %f4 ! AX
+ prefetch [%o1 + 0x180], #one_read ! MS Group
+ fmovd %f0, %f32 ! FGA Group
+ ldd [%o1 + 0x018], %f6 ! AX
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x020], %f8 ! MS
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x028], %f10 ! AX
+ membar #StoreStore ! MS
+ fmovd %f6, %f38 ! FGA Group
+ ldd [%o1 + 0x030], %f12 ! MS
+ fmovd %f8, %f40 ! FGA Group
+ ldd [%o1 + 0x038], %f14 ! AX
+ fmovd %f10, %f42 ! FGA Group
+ ldd [%o1 + 0x040], %f16 ! MS
+1: ldd [%o1 + 0x048], %f2 ! AX (Group)
+ fmovd %f12, %f44 ! FGA
+ ldd [%o1 + 0x050], %f4 ! MS
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x058], %f6 ! AX
+ fmovd %f16, %f32 ! FGA Group (8-cycle stall)
+ ldd [%o1 + 0x060], %f8 ! MS
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x068], %f10 ! AX
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x070], %f12 ! MS
+ fmovd %f6, %f38 ! FGA Group
+ ldd [%o1 + 0x078], %f14 ! AX
+ fmovd %f8, %f40 ! FGA Group
+ ldd [%o1 + 0x080], %f16 ! AX
+ prefetch [%o1 + 0x180], #one_read ! MS
+ fmovd %f10, %f42 ! FGA Group
+ subcc %o2, 1, %o2 ! A0
+ add %o0, 0x40, %o0 ! A1
+ bne,pt %xcc, 1b ! BR
+ add %o1, 0x40, %o1 ! A0 Group
+
+ mov 5, %o2 ! A0 Group
+1: ldd [%o1 + 0x048], %f2 ! AX
+ fmovd %f12, %f44 ! FGA
+ ldd [%o1 + 0x050], %f4 ! MS
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x058], %f6 ! AX
+ fmovd %f16, %f32 ! FGA Group (8-cycle stall)
+ ldd [%o1 + 0x060], %f8 ! MS
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x068], %f10 ! AX
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x070], %f12 ! MS
+ fmovd %f6, %f38 ! FGA Group
+ ldd [%o1 + 0x078], %f14 ! AX
+ fmovd %f8, %f40 ! FGA Group
+ ldd [%o1 + 0x080], %f16 ! MS
+ fmovd %f10, %f42 ! FGA Group
+ subcc %o2, 1, %o2 ! A0
+ add %o0, 0x40, %o0 ! A1
+ bne,pt %xcc, 1b ! BR
+ add %o1, 0x40, %o1 ! A0 Group
+
+ ldd [%o1 + 0x048], %f2 ! AX
+ fmovd %f12, %f44 ! FGA
+ ldd [%o1 + 0x050], %f4 ! MS
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ ldd [%o1 + 0x058], %f6 ! AX
+ fmovd %f16, %f32 ! FGA Group (8-cycle stall)
+ ldd [%o1 + 0x060], %f8 ! MS
+ fmovd %f2, %f34 ! FGA Group
+ ldd [%o1 + 0x068], %f10 ! AX
+ fmovd %f4, %f36 ! FGA Group
+ ldd [%o1 + 0x070], %f12 ! MS
+ fmovd %f6, %f38 ! FGA Group
+ add %o0, 0x40, %o0 ! A0
+ ldd [%o1 + 0x078], %f14 ! AX
+ fmovd %f8, %f40 ! FGA Group
+ fmovd %f10, %f42 ! FGA Group
+ fmovd %f12, %f44 ! FGA Group
+ fmovd %f14, %f46 ! FGA Group
+ stda %f32, [%o0] ASI_BLK_P ! MS
+ .globl cheetah_copy_user_page_nop_2_3
+cheetah_copy_user_page_nop_2_3:
+ mov PRIMARY_CONTEXT, %o2
+ stxa %g0, [%o2] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+ ba,a,pt %xcc, copy_user_page_continue
+
+spitfire_copy_user_page:
+ ldda [%o1] ASI_BLK_P, %f0
+ add %o1, 0x40, %o1
+ ldda [%o1] ASI_BLK_P, %f16
+ add %o1, 0x40, %o1
+ sethi %hi(PAGE_SIZE), %o2
+1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+ ldda [%o1] ASI_BLK_P, %f16
+ stda %f48, [%o0] ASI_BLK_P
+ sub %o2, 0x40, %o2
+ add %o1, 0x40, %o1
+ cmp %o2, PAGE_SIZE_REM
+ bne,pt %xcc, 1b
+ add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ stda %f0, [%o0] ASI_BLK_P
+#else
+ membar #Sync
+ stda %f0, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ stda %f16, [%o0] ASI_BLK_P
+#endif
+copy_user_page_continue:
+ membar #Sync
+ VISExit
+
+ mov TLB_TAG_ACCESS, %o2
+ stxa %g5, [%o2] ASI_DMMU
+ stxa %g7, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ sub %o3, (TLBTEMP_ENTSZ), %o3
+ stxa %o4, [%o2] ASI_DMMU
+ stxa %o5, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ rdpr %pstate, %g3
+ jmpl %o7 + 0x8, %g0
+ wrpr %g3, PSTATE_IE, %pstate
+
+copy_page_using_blkcommit:
+ membar #LoadStore | #StoreStore | #StoreLoad
+ ldda [%o1] ASI_BLK_P, %f0
+ add %o1, 0x40, %o1
+ ldda [%o1] ASI_BLK_P, %f16
+ add %o1, 0x40, %o1
+ sethi %hi(PAGE_SIZE), %o2
+1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+ ldda [%o1] ASI_BLK_P, %f16
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ sub %o2, 0x40, %o2
+ add %o1, 0x40, %o1
+ cmp %o2, PAGE_SIZE_REM
+ bne,pt %xcc, 1b
+ add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_COMMIT_P
+ add %o0, 0x40, %o0
+ ba,pt %xcc, copy_user_page_continue
+ stda %f0, [%o0] ASI_BLK_COMMIT_P
+#else
+ membar #Sync
+ stda %f0, [%o0] ASI_BLK_COMMIT_P
+ add %o0, 0x40, %o0
+ ba,pt %xcc, copy_user_page_continue
+ stda %f16, [%o0] ASI_BLK_COMMIT_P
+#endif
+
+ .align 32
+ .globl _clear_page
+ .type _clear_page,@function
+_clear_page: /* %o0=dest */
+ VISEntryHalf
+ ba,pt %xcc, clear_page_common
+ clr %o4
+
+ .align 32
+ .globl clear_user_page
+ .type clear_user_page,@function
+clear_user_page: /* %o0=dest, %o1=vaddr */
+ VISEntryHalf
+ sethi %hi(PAGE_SIZE), %g3
+ sethi %uhi(PAGE_OFFSET), %g2
+ sllx %g2, 32, %g2
+ sub %o0, %g2, %g1
+ and %o1, %g3, %o0
+ mov TLB_TAG_ACCESS, %o2
+ sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
+ sethi %hi(TLBTEMP_BASE), %o3
+ sllx %g3, 32, %g3
+ or %g3, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), %g3
+ or %g1, %g3, %g1
+ add %o0, %o3, %o0
+#define FIX_INSN_2 0x96102068 /* mov (13 << 3), %o3 */
+cheetah_patch_2:
+ mov TLBTEMP_ENT2, %o3
+ rdpr %pstate, %g3
+ wrpr %g3, PSTATE_IE, %pstate
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g5
+ stxa %g0, [%g5] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_TAG_READ, %g5
+
+ /* Spitfire Errata #32 workaround */
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%o3] ASI_DTLB_DATA_ACCESS, %g7
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g1, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ mov 1, %o4
+
+clear_page_common:
+ membar #StoreLoad | #StoreStore | #LoadStore ! LSU Group
+ fzero %f0 ! FPA Group
+ sethi %hi(PAGE_SIZE/256), %o1 ! IEU0
+ fzero %f2 ! FPA Group
+ or %o1, %lo(PAGE_SIZE/256), %o1 ! IEU0
+ faddd %f0, %f2, %f4 ! FPA Group
+ fmuld %f0, %f2, %f6 ! FPM
+ faddd %f0, %f2, %f8 ! FPA Group
+ fmuld %f0, %f2, %f10 ! FPM
+
+ faddd %f0, %f2, %f12 ! FPA Group
+ fmuld %f0, %f2, %f14 ! FPM
+1: stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+ add %o0, 0x40, %o0 ! IEU0
+ stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+ add %o0, 0x40, %o0 ! IEU0
+ stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+
+ add %o0, 0x40, %o0 ! IEU0 Group
+ stda %f0, [%o0 + %g0] ASI_BLK_P ! Store Group
+ subcc %o1, 1, %o1 ! IEU1
+ bne,pt %icc, 1b ! CTI
+ add %o0, 0x40, %o0 ! IEU0 Group
+ membar #Sync ! LSU Group
+ VISExitHalf
+
+ brnz,pt %o4, 1f
+ nop
+
+ retl
+ nop
+
+1:
+ stxa %g5, [%o2] ASI_DMMU
+ stxa %g7, [%o3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ jmpl %o7 + 0x8, %g0
+ wrpr %g3, 0x0, %pstate
+
+ .globl cheetah_patch_pgcopyops
+cheetah_patch_pgcopyops:
+ sethi %hi(FIX_INSN_1), %g1
+ or %g1, %lo(FIX_INSN_1), %g1
+ sethi %hi(cheetah_patch_1), %g2
+ or %g2, %lo(cheetah_patch_1), %g2
+ stw %g1, [%g2]
+ flush %g2
+ sethi %hi(FIX_INSN_2), %g1
+ or %g1, %lo(FIX_INSN_2), %g1
+ sethi %hi(cheetah_patch_2), %g2
+ or %g2, %lo(cheetah_patch_2), %g2
+ stw %g1, [%g2]
+ flush %g2
+ retl
+ nop
+
+#undef FIX_INSN1
+#undef FIX_INSN2
+#undef PAGE_SIZE_REM
+++ /dev/null
-/* clear_page.S: UltraSparc optimized clear page.
- *
- * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
- * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
- */
-
-#include <asm/visasm.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/spitfire.h>
-
- /* What we used to do was lock a TLB entry into a specific
- * TLB slot, clear the page with interrupts disabled, then
- * restore the original TLB entry. This was great for
- * disturbing the TLB as little as possible, but it meant
- * we had to keep interrupts disabled for a long time.
- *
- * Now, we simply use the normal TLB loading mechanism,
- * and this makes the cpu choose a slot all by itself.
- * Then we do a normal TLB flush on exit. We need only
- * disable preemption during the clear.
- */
-
-#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
-
- .text
-
- .globl _clear_page
-_clear_page: /* %o0=dest */
- ba,pt %xcc, clear_page_common
- clr %o4
-
- /* This thing is pretty important, it shows up
- * on the profiles via do_anonymous_page().
- */
- .align 32
- .globl clear_user_page
-clear_user_page: /* %o0=dest, %o1=vaddr */
- lduw [%g6 + TI_PRE_COUNT], %o2
- sethi %uhi(PAGE_OFFSET), %g2
- sethi %hi(PAGE_SIZE), %o4
-
- sllx %g2, 32, %g2
- sethi %uhi(TTE_BITS_TOP), %g3
-
- sllx %g3, 32, %g3
- sub %o0, %g2, %g1 ! paddr
-
- or %g3, TTE_BITS_BOTTOM, %g3
- and %o1, %o4, %o0 ! vaddr D-cache alias bit
-
- or %g1, %g3, %g1 ! TTE data
- sethi %hi(TLBTEMP_BASE), %o3
-
- add %o2, 1, %o4
- add %o0, %o3, %o0 ! TTE vaddr
-
- /* Disable preemption. */
- mov TLB_TAG_ACCESS, %g3
- stw %o4, [%g6 + TI_PRE_COUNT]
-
- /* Load TLB entry. */
- rdpr %pstate, %o4
- wrpr %o4, PSTATE_IE, %pstate
- stxa %o0, [%g3] ASI_DMMU
- stxa %g1, [%g0] ASI_DTLB_DATA_IN
- flush %g6
- wrpr %o4, 0x0, %pstate
-
- mov 1, %o4
-
-clear_page_common:
- VISEntryHalf
- membar #StoreLoad | #StoreStore | #LoadStore
- fzero %f0
- sethi %hi(PAGE_SIZE/64), %o1
- mov %o0, %g1 ! remember vaddr for tlbflush
- fzero %f2
- or %o1, %lo(PAGE_SIZE/64), %o1
- faddd %f0, %f2, %f4
- fmuld %f0, %f2, %f6
- faddd %f0, %f2, %f8
- fmuld %f0, %f2, %f10
-
- faddd %f0, %f2, %f12
- fmuld %f0, %f2, %f14
-1: stda %f0, [%o0 + %g0] ASI_BLK_P
- subcc %o1, 1, %o1
- bne,pt %icc, 1b
- add %o0, 0x40, %o0
- membar #Sync
- VISExitHalf
-
- brz,pn %o4, out
- nop
-
- stxa %g0, [%g1] ASI_DMMU_DEMAP
- membar #Sync
- stw %o2, [%g6 + TI_PRE_COUNT]
-
-out: retl
- nop
-
+++ /dev/null
-/* clear_page.S: UltraSparc optimized copy page.
- *
- * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
- * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
- */
-
-#include <asm/visasm.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/spitfire.h>
-#include <asm/head.h>
-
- /* What we used to do was lock a TLB entry into a specific
- * TLB slot, clear the page with interrupts disabled, then
- * restore the original TLB entry. This was great for
- * disturbing the TLB as little as possible, but it meant
- * we had to keep interrupts disabled for a long time.
- *
- * Now, we simply use the normal TLB loading mechanism,
- * and this makes the cpu choose a slot all by itself.
- * Then we do a normal TLB flush on exit. We need only
- * disable preemption during the clear.
- */
-
-#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
-#define DCACHE_SIZE (PAGE_SIZE * 2)
-
-#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
-#define PAGE_SIZE_REM 0x80
-#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
-#define PAGE_SIZE_REM 0x100
-#else
-#error Wrong PAGE_SHIFT specified
-#endif
-
-#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
- fmovd %reg0, %f48; fmovd %reg1, %f50; \
- fmovd %reg2, %f52; fmovd %reg3, %f54; \
- fmovd %reg4, %f56; fmovd %reg5, %f58; \
- fmovd %reg6, %f60; fmovd %reg7, %f62;
-
- .text
-
- .align 32
- .globl copy_user_page
-copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
- lduw [%g6 + TI_PRE_COUNT], %o4
- sethi %uhi(PAGE_OFFSET), %g2
- sethi %hi(PAGE_SIZE), %o3
-
- sllx %g2, 32, %g2
- sethi %uhi(TTE_BITS_TOP), %g3
-
- sllx %g3, 32, %g3
- sub %o0, %g2, %g1 ! dest paddr
-
- sub %o1, %g2, %g2 ! src paddr
- or %g3, TTE_BITS_BOTTOM, %g3
-
- and %o2, %o3, %o0 ! vaddr D-cache alias bit
- or %g1, %g3, %g1 ! dest TTE data
-
- or %g2, %g3, %g2 ! src TTE data
- sethi %hi(TLBTEMP_BASE), %o3
-
- sethi %hi(DCACHE_SIZE), %o1
- add %o0, %o3, %o0 ! dest TTE vaddr
-
- add %o4, 1, %o2
- add %o0, %o1, %o1 ! src TTE vaddr
-
- /* Disable preemption. */
- mov TLB_TAG_ACCESS, %g3
- stw %o2, [%g6 + TI_PRE_COUNT]
-
- /* Load TLB entries. */
- rdpr %pstate, %o2
- wrpr %o2, PSTATE_IE, %pstate
- stxa %o0, [%g3] ASI_DMMU
- stxa %g1, [%g0] ASI_DTLB_DATA_IN
- membar #Sync
- stxa %o1, [%g3] ASI_DMMU
- stxa %g2, [%g0] ASI_DTLB_DATA_IN
- membar #Sync
- wrpr %o2, 0x0, %pstate
-
- BRANCH_IF_ANY_CHEETAH(g3,o2,1f)
- ba,pt %xcc, 9f
- nop
-
-1:
- VISEntryHalf
- membar #StoreLoad | #StoreStore | #LoadStore
- sethi %hi((PAGE_SIZE/64)-2), %o2
- mov %o0, %g1
- prefetch [%o1 + 0x000], #one_read
- or %o2, %lo((PAGE_SIZE/64)-2), %o2
- prefetch [%o1 + 0x040], #one_read
- prefetch [%o1 + 0x080], #one_read
- prefetch [%o1 + 0x0c0], #one_read
- ldd [%o1 + 0x000], %f0
- prefetch [%o1 + 0x100], #one_read
- ldd [%o1 + 0x008], %f2
- prefetch [%o1 + 0x140], #one_read
- ldd [%o1 + 0x010], %f4
- prefetch [%o1 + 0x180], #one_read
- fmovd %f0, %f16
- ldd [%o1 + 0x018], %f6
- fmovd %f2, %f18
- ldd [%o1 + 0x020], %f8
- fmovd %f4, %f20
- ldd [%o1 + 0x028], %f10
- fmovd %f6, %f22
- ldd [%o1 + 0x030], %f12
- fmovd %f8, %f24
- ldd [%o1 + 0x038], %f14
- fmovd %f10, %f26
- ldd [%o1 + 0x040], %f0
-1: ldd [%o1 + 0x048], %f2
- fmovd %f12, %f28
- ldd [%o1 + 0x050], %f4
- fmovd %f14, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x058], %f6
- fmovd %f0, %f16
- ldd [%o1 + 0x060], %f8
- fmovd %f2, %f18
- ldd [%o1 + 0x068], %f10
- fmovd %f4, %f20
- ldd [%o1 + 0x070], %f12
- fmovd %f6, %f22
- ldd [%o1 + 0x078], %f14
- fmovd %f8, %f24
- ldd [%o1 + 0x080], %f0
- prefetch [%o1 + 0x180], #one_read
- fmovd %f10, %f26
- subcc %o2, 1, %o2
- add %o0, 0x40, %o0
- bne,pt %xcc, 1b
- add %o1, 0x40, %o1
-
- ldd [%o1 + 0x048], %f2
- fmovd %f12, %f28
- ldd [%o1 + 0x050], %f4
- fmovd %f14, %f30
- stda %f16, [%o0] ASI_BLK_P
- ldd [%o1 + 0x058], %f6
- fmovd %f0, %f16
- ldd [%o1 + 0x060], %f8
- fmovd %f2, %f18
- ldd [%o1 + 0x068], %f10
- fmovd %f4, %f20
- ldd [%o1 + 0x070], %f12
- fmovd %f6, %f22
- add %o0, 0x40, %o0
- ldd [%o1 + 0x078], %f14
- fmovd %f8, %f24
- fmovd %f10, %f26
- fmovd %f12, %f28
- fmovd %f14, %f30
- stda %f16, [%o0] ASI_BLK_P
- membar #Sync
- VISExitHalf
- ba,pt %xcc, 5f
- nop
-
-9:
- VISEntry
- ldub [%g6 + TI_FAULT_CODE], %g3
- mov %o0, %g1
- cmp %g3, 0
- rd %asi, %g3
- be,a,pt %icc, 1f
- wr %g0, ASI_BLK_P, %asi
- wr %g0, ASI_BLK_COMMIT_P, %asi
-1: ldda [%o1] ASI_BLK_P, %f0
- add %o1, 0x40, %o1
- ldda [%o1] ASI_BLK_P, %f16
- add %o1, 0x40, %o1
- sethi %hi(PAGE_SIZE), %o2
-1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
- ldda [%o1] ASI_BLK_P, %f32
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
- ldda [%o1] ASI_BLK_P, %f0
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
- ldda [%o1] ASI_BLK_P, %f16
- stda %f48, [%o0] %asi
- sub %o2, 0x40, %o2
- add %o1, 0x40, %o1
- cmp %o2, PAGE_SIZE_REM
- bne,pt %xcc, 1b
- add %o0, 0x40, %o0
-#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
- TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
- ldda [%o1] ASI_BLK_P, %f32
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
- ldda [%o1] ASI_BLK_P, %f0
- stda %f48, [%o0] %asi
- add %o1, 0x40, %o1
- sub %o2, 0x40, %o2
- add %o0, 0x40, %o0
- membar #Sync
- stda %f32, [%o0] %asi
- add %o0, 0x40, %o0
- stda %f0, [%o0] %asi
-#else
- membar #Sync
- stda %f0, [%o0] %asi
- add %o0, 0x40, %o0
- stda %f16, [%o0] %asi
-#endif
- membar #Sync
- wr %g3, 0x0, %asi
- VISExit
-
-5:
- stxa %g0, [%g1] ASI_DMMU_DEMAP
- membar #Sync
-
- sethi %hi(DCACHE_SIZE), %g2
- stxa %g0, [%g1 + %g2] ASI_DMMU_DEMAP
- membar #Sync
-
- retl
- stw %o4, [%g6 + TI_PRE_COUNT]
.text
.align 64
+ .globl rwlock_impl_begin, rwlock_impl_end
+
/* The non-contention read lock usage is 2 cache lines. */
.globl __read_lock, __read_unlock
+rwlock_impl_begin:
__read_lock: /* %o0 = lock_ptr */
ldsw [%o0], %g5
brlz,pn %g5, __read_wait_for_writer
__write_trylock_fail:
retl
mov 0, %o0
+rwlock_impl_end:
.text
.align 64
- .globl _raw_spin_lock
-_raw_spin_lock: /* %o0 = lock_ptr */
-1: ldstub [%o0], %g7
- brnz,pn %g7, 2f
- membar #StoreLoad | #StoreStore
- retl
- nop
-2: ldub [%o0], %g7
- brnz,pt %g7, 2b
- membar #LoadLoad
- ba,a,pt %xcc, 1b
-
.globl _raw_spin_lock_flags
_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
1: ldstub [%o0], %g7
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o
+obj-y := ultra.o fault.o init.o generic.o extable.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
- set_thread_fault_code(fault_code |
- FAULT_CODE_BLKCOMMIT);
+ set_thread_flag(TIF_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
fault_done:
/* These values are no longer needed, clear them. */
set_thread_fault_code(0);
+ clear_thread_flag(TIF_BLKCOMMIT);
current_thread_info()->fault_address = 0;
}
#include <asm/spitfire.h>
#include <asm/sections.h>
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
extern void device_scan(void);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
put_cpu();
}
+/* When shared+writable mmaps of files go away, we lose all dirty
+ * page state, so we have to deal with D-cache aliasing here.
+ *
+ * This code relies on the fact that flush_cache_range() is always
+ * called for an area composed by a single VMA. It also assumes that
+ * the MM's page_table_lock is held.
+ */
+static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
+{
+ unsigned long offset;
+ pte_t *ptep;
+
+ if (pmd_none(*pmd))
+ return;
+ ptep = pte_offset_map(pmd, address);
+ offset = address & ~PMD_MASK;
+ if (offset + size > PMD_SIZE)
+ size = PMD_SIZE - offset;
+ size &= PAGE_MASK;
+ for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
+ pte_t pte = *ptep;
+
+ if (pte_none(pte))
+ continue;
+
+ if (pte_present(pte) && pte_dirty(pte)) {
+ struct page *page;
+ unsigned long pgaddr, uaddr;
+ unsigned long pfn = pte_pfn(pte);
+
+ if (!pfn_valid(pfn))
+ continue;
+ page = pfn_to_page(pfn);
+ if (PageReserved(page) || !page_mapping(page))
+ continue;
+ pgaddr = (unsigned long) page_address(page);
+ uaddr = address + offset;
+ if ((pgaddr ^ uaddr) & (1 << 13))
+ flush_dcache_page_all(mm, page);
+ }
+ }
+ pte_unmap(ptep - 1);
+}
+
+static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
+{
+ pmd_t *pmd;
+ unsigned long end;
+
+ if (pgd_none(*dir))
+ return;
+ pmd = pmd_offset(dir, address);
+ end = address + size;
+ if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+ end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+ do {
+ flush_cache_pte_range(mm, pmd, address, end - address);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *dir = pgd_offset(mm, start);
+
+ if (mm == current->mm)
+ flushw_user();
+
+ if (vma->vm_file == NULL ||
+ ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
+ return;
+
+ do {
+ flush_cache_pmd_range(mm, dir, start, end - start);
+ start = (start + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ } while (start && (start < end));
+}
+
void flush_icache_range(unsigned long start, unsigned long end)
{
/* Cheetah has coherent I-cache. */
#else
#define DC_ALIAS_SHIFT 0
#endif
-pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
struct page *page;
unsigned long color;
+++ /dev/null
-/* arch/sparc64/mm/tlb.c
- *
- * Copyright (C) 2004 David S. Miller <davem@redhat.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/percpu.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-#include <asm/tlb.h>
-
-/* Heavily inspired by the ppc64 code. */
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
- { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
-
-void flush_tlb_pending(void)
-{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
-
- if (mp->tlb_nr) {
- unsigned long context = mp->mm->context;
-
- if (CTX_VALID(context)) {
-#ifdef CONFIG_SMP
- smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
- &mp->vaddrs[0]);
-#else
- __flush_tlb_pending(CTX_HWBITS(context), mp->tlb_nr,
- &mp->vaddrs[0]);
-#endif
- }
- mp->tlb_nr = 0;
- }
-}
-
-void tlb_batch_add(pte_t *ptep, pte_t orig)
-{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
- struct page *ptepage;
- struct mm_struct *mm;
- unsigned long vaddr, nr;
-
- ptepage = virt_to_page(ptep);
- mm = (struct mm_struct *) ptepage->mapping;
-
- /* It is more efficient to let flush_tlb_kernel_range()
- * handle these cases.
- */
- if (mm == &init_mm)
- return;
-
- vaddr = ptepage->index +
- (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE);
- if (pte_exec(orig))
- vaddr |= 0x1UL;
-
- if (pte_dirty(orig)) {
- unsigned long paddr, pfn = pte_pfn(orig);
- struct address_space *mapping;
- struct page *page;
-
- if (!pfn_valid(pfn))
- goto no_cache_flush;
-
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- goto no_cache_flush;
-
- /* A real file page? */
- mapping = page_mapping(page);
- if (!mapping)
- goto no_cache_flush;
-
- paddr = (unsigned long) page_address(page);
- if ((paddr ^ vaddr) & (1 << 13))
- flush_dcache_page_all(mm, page);
- }
-
-no_cache_flush:
- if (mp->tlb_frozen)
- return;
-
- nr = mp->tlb_nr;
-
- if (unlikely(nr != 0 && mm != mp->mm)) {
- flush_tlb_pending();
- nr = 0;
- }
-
- if (nr == 0)
- mp->mm = mm;
-
- mp->vaddrs[nr] = vaddr;
- mp->tlb_nr = ++nr;
- if (nr >= TLB_BATCH_NR)
- flush_tlb_pending();
-}
-
-void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
- unsigned long nr = mp->tlb_nr;
- long s = start, e = end, vpte_base;
-
- if (mp->tlb_frozen)
- return;
-
- /* Nobody should call us with start below VM hole and end above.
- * See if it is really true.
- */
- BUG_ON(s > e);
-
-#if 0
- /* Currently free_pgtables guarantees this. */
- s &= PMD_MASK;
- e = (e + PMD_SIZE - 1) & PMD_MASK;
-#endif
- vpte_base = (tlb_type == spitfire ?
- VPTE_BASE_SPITFIRE :
- VPTE_BASE_CHEETAH);
-
- if (unlikely(nr != 0 && mm != mp->mm)) {
- flush_tlb_pending();
- nr = 0;
- }
-
- if (nr == 0)
- mp->mm = mm;
-
- start = vpte_base + (s >> (PAGE_SHIFT - 3));
- end = vpte_base + (e >> (PAGE_SHIFT - 3));
- while (start < end) {
- mp->vaddrs[nr] = start;
- mp->tlb_nr = ++nr;
- if (nr >= TLB_BATCH_NR) {
- flush_tlb_pending();
- nr = 0;
- }
- start += PAGE_SIZE;
- }
- if (nr)
- flush_tlb_pending();
-}
-
-unsigned long __ptrs_per_pmd(void)
-{
- if (test_thread_flag(TIF_32BIT))
- return (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT));
- return REAL_PTRS_PER_PMD;
-}
*/
.text
.align 32
- .globl __flush_tlb_mm
+ .globl __flush_tlb_page, __flush_tlb_mm, __flush_tlb_range
+__flush_tlb_page: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=page&PAGE_MASK, %o2=SECONDARY_CONTEXT */
+ ldxa [%o2] ASI_DMMU, %g2
+ cmp %g2, %o0
+ bne,pn %icc, __spitfire_flush_tlb_page_slow
+ or %o1, 0x10, %g3
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ retl
+ flush %g6
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
ldxa [%o1] ASI_DMMU, %g2
cmp %g2, %o0
nop
nop
- .align 32
- .globl __flush_tlb_pending
-__flush_tlb_pending:
- /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
- rdpr %pstate, %g5
- sllx %o1, 3, %o1
- andn %g5, PSTATE_IE, %g2
- wrpr %g2, %pstate
- mov SECONDARY_CONTEXT, %o4
- ldxa [%o4] ASI_DMMU, %g2
- stxa %o0, [%o4] ASI_DMMU
-1: sub %o1, (1 << 3), %o1
- ldx [%o2 + %o1], %o3
- andcc %o3, 1, %g0
- andn %o3, 1, %o3
- be,pn %icc, 2f
- or %o3, 0x10, %o3
- stxa %g0, [%o3] ASI_IMMU_DEMAP
-2: stxa %g0, [%o3] ASI_DMMU_DEMAP
- membar #Sync
- brnz,pt %o1, 1b
- nop
- stxa %g2, [%o4] ASI_DMMU
+__flush_tlb_range: /* %o0=(ctx&TAG_CONTEXT_BITS), %o1=start&PAGE_MASK, %o2=SECONDARY_CONTEXT,
+ * %o3=end&PAGE_MASK, %o4=PAGE_SIZE, %o5=(end - start)
+ */
+#define TLB_MAGIC 207 /* Students, do you know how I calculated this? -DaveM */
+ cmp %o5, %o4
+ bleu,pt %xcc, __flush_tlb_page
+ srlx %o5, PAGE_SHIFT, %g5
+ cmp %g5, TLB_MAGIC
+ bgeu,pn %icc, __spitfire_flush_tlb_range_constant_time
+ or %o1, 0x10, %g5
+ ldxa [%o2] ASI_DMMU, %g2
+ cmp %g2, %o0
+__spitfire_flush_tlb_range_page_by_page:
+ bne,pn %icc, __spitfire_flush_tlb_range_pbp_slow
+ sub %o5, %o4, %o5
+1: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
+ stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP
+ brnz,pt %o5, 1b
+ sub %o5, %o4, %o5
+ retl
+ flush %g6
+__spitfire_flush_tlb_range_constant_time: /* %o0=ctx, %o1=start, %o3=end */
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ mov TLB_TAG_ACCESS, %g3
+ mov ((SPITFIRE_HIGHEST_LOCKED_TLBENT-1) << 3), %g2
+
+ /* Spitfire Errata #32 workaround. */
+ mov 0x8, %o4
+ stxa %g0, [%o4] ASI_DMMU
flush %g6
+
+1: ldxa [%g2] ASI_ITLB_TAG_READ, %o4
+ and %o4, TAG_CONTEXT_BITS, %o5
+ cmp %o5, %o0
+ bne,pt %icc, 2f
+ andn %o4, TAG_CONTEXT_BITS, %o4
+ cmp %o4, %o1
+ blu,pt %xcc, 2f
+ cmp %o4, %o3
+ blu,pn %xcc, 4f
+2: ldxa [%g2] ASI_DTLB_TAG_READ, %o4
+ and %o4, TAG_CONTEXT_BITS, %o5
+ cmp %o5, %o0
+ andn %o4, TAG_CONTEXT_BITS, %o4
+ bne,pt %icc, 3f
+ cmp %o4, %o1
+ blu,pt %xcc, 3f
+ cmp %o4, %o3
+ blu,pn %xcc, 5f
+ nop
+3: brnz,pt %g2, 1b
+ sub %g2, (1 << 3), %g2
retl
- wrpr %g5, 0x0, %pstate
+ wrpr %g1, 0x0, %pstate
+4: stxa %g0, [%g3] ASI_IMMU
+ stxa %g0, [%g2] ASI_ITLB_DATA_ACCESS
+ flush %g6
+
+ /* Spitfire Errata #32 workaround. */
+ mov 0x8, %o4
+ stxa %g0, [%o4] ASI_DMMU
+ flush %g6
+
+ ba,pt %xcc, 2b
+ nop
+
+5: stxa %g0, [%g3] ASI_DMMU
+ stxa %g0, [%g2] ASI_DTLB_DATA_ACCESS
+ flush %g6
+
+ /* Spitfire Errata #32 workaround. */
+ mov 0x8, %o4
+ stxa %g0, [%o4] ASI_DMMU
+ flush %g6
+
+ ba,pt %xcc, 3b
+ nop
.align 32
.globl __flush_tlb_kernel_range
retl
wrpr %g1, 0, %pstate
+__spitfire_flush_tlb_page_slow:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ flush %g6
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ retl
+ wrpr %g1, 0, %pstate
+
+__spitfire_flush_tlb_range_pbp_slow:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ stxa %o0, [%o2] ASI_DMMU
+
+2: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
+ stxa %g0, [%g5 + %o5] ASI_IMMU_DEMAP
+ brnz,pt %o5, 2b
+ sub %o5, %o4, %o5
+ flush %g6
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ retl
+ wrpr %g1, 0x0, %pstate
+
/*
* The following code flushes one page_size worth.
*/
ba,a,pt %xcc, __prefill_itlb
/* Cheetah specific versions, patched at boot time. */
+__cheetah_flush_tlb_page: /* 14 insns */
+ rdpr %pstate, %g5
+ andn %g5, PSTATE_IE, %g2
+ wrpr %g2, 0x0, %pstate
+ wrpr %g0, 1, %tl
+ mov PRIMARY_CONTEXT, %o2
+ ldxa [%o2] ASI_DMMU, %g2
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g0, [%o1] ASI_DMMU_DEMAP
+ stxa %g0, [%o1] ASI_IMMU_DEMAP
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ wrpr %g0, 0, %tl
+ retl
+ wrpr %g5, 0x0, %pstate
+
__cheetah_flush_tlb_mm: /* 15 insns */
rdpr %pstate, %g5
andn %g5, PSTATE_IE, %g2
retl
wrpr %g5, 0x0, %pstate
-__cheetah_flush_tlb_pending: /* 22 insns */
- /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
- rdpr %pstate, %g5
- sllx %o1, 3, %o1
+__cheetah_flush_tlb_range: /* 20 insns */
+ cmp %o5, %o4
+ blu,pt %xcc, 9f
+ rdpr %pstate, %g5
andn %g5, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate
wrpr %g0, 1, %tl
- mov PRIMARY_CONTEXT, %o4
- ldxa [%o4] ASI_DMMU, %g2
- stxa %o0, [%o4] ASI_DMMU
-1: sub %o1, (1 << 3), %o1
- ldx [%o2 + %o1], %o3
- andcc %o3, 1, %g0
- be,pn %icc, 2f
- andn %o3, 1, %o3
- stxa %g0, [%o3] ASI_IMMU_DEMAP
-2: stxa %g0, [%o3] ASI_DMMU_DEMAP
- brnz,pt %o1, 1b
- membar #Sync
- stxa %g2, [%o4] ASI_DMMU
+ mov PRIMARY_CONTEXT, %o2
+ sub %o5, %o4, %o5
+ ldxa [%o2] ASI_DMMU, %g2
+ stxa %o0, [%o2] ASI_DMMU
+1: stxa %g0, [%o1 + %o5] ASI_DMMU_DEMAP
+ stxa %g0, [%o1 + %o5] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o5, 1b
+ sub %o5, %o4, %o5
+ stxa %g2, [%o2] ASI_DMMU
flush %g6
wrpr %g0, 0, %tl
- retl
+9: retl
wrpr %g5, 0x0, %pstate
flush_dcpage_cheetah: /* 11 insns */
cheetah_patch_cachetlbops:
save %sp, -128, %sp
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call cheetah_patch_one
+ mov 14, %o2
+
sethi %hi(__flush_tlb_mm), %o0
or %o0, %lo(__flush_tlb_mm), %o0
sethi %hi(__cheetah_flush_tlb_mm), %o1
call cheetah_patch_one
mov 15, %o2
- sethi %hi(__flush_tlb_pending), %o0
- or %o0, %lo(__flush_tlb_pending), %o0
- sethi %hi(__cheetah_flush_tlb_pending), %o1
- or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ sethi %hi(__flush_tlb_range), %o0
+ or %o0, %lo(__flush_tlb_range), %o0
+ sethi %hi(__cheetah_flush_tlb_range), %o1
+ or %o1, %lo(__cheetah_flush_tlb_range), %o1
call cheetah_patch_one
- mov 22, %o2
+ mov 20, %o2
sethi %hi(__flush_dcache_page), %o0
or %o0, %lo(__flush_dcache_page), %o0
* TODO: Make xcall TLB range flushes use the tricks above... -DaveM
*/
.align 32
- .globl xcall_flush_tlb_mm
+ .globl xcall_flush_tlb_page, xcall_flush_tlb_mm, xcall_flush_tlb_range
+xcall_flush_tlb_page:
+ mov PRIMARY_CONTEXT, %g2
+ ldxa [%g2] ASI_DMMU, %g3
+ stxa %g5, [%g2] ASI_DMMU
+ stxa %g0, [%g1] ASI_DMMU_DEMAP
+ stxa %g0, [%g1] ASI_IMMU_DEMAP
+ stxa %g3, [%g2] ASI_DMMU
+ retry
+ nop
+
xcall_flush_tlb_mm:
mov PRIMARY_CONTEXT, %g2
mov 0x40, %g4
stxa %g3, [%g2] ASI_DMMU
retry
- .globl xcall_flush_tlb_pending
-xcall_flush_tlb_pending:
- /* %g5=context, %g1=nr, %g7=vaddrs[] */
- sllx %g1, 3, %g1
- mov PRIMARY_CONTEXT, %g4
- ldxa [%g4] ASI_DMMU, %g2
+xcall_flush_tlb_range:
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ add %g2, 1, %g2
+ srlx %g3, PAGE_SHIFT, %g4
+ cmp %g4, 96
+
+ bgu,pn %icc, xcall_flush_tlb_mm
+ mov PRIMARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g7
+ sub %g3, %g2, %g3
stxa %g5, [%g4] ASI_DMMU
-1: sub %g1, (1 << 3), %g1
- ldx [%g7 + %g1], %g5
- andcc %g5, 0x1, %g0
- be,pn %icc, 2f
-
- andn %g5, 0x1, %g5
- stxa %g0, [%g5] ASI_IMMU_DEMAP
-2: stxa %g0, [%g5] ASI_DMMU_DEMAP
+ nop
+ nop
+ nop
+
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
membar #Sync
- brnz,pt %g1, 1b
- nop
- stxa %g2, [%g4] ASI_DMMU
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ stxa %g7, [%g4] ASI_DMMU
retry
+ nop
+ nop
.globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range:
retry
nop
nop
+ nop
/* This runs in a very controlled environment, so we do
* not need to worry about BH races etc.
__asm__ ("srl %0, 0, %0" \
: "=r" (__ret) \
: "0" (__x)); \
- (void __user *)__ret; \
+ __ret; \
})
extern unsigned sys_call_table[];
#define UFSMAGIC (((unsigned)'u'<<24)||((unsigned)'f'<<16)||((unsigned)'s'<<8))
-static inline int putstat(struct sol_stat __user *ubuf, struct kstat *kbuf)
+static inline int putstat(struct sol_stat *ubuf, struct kstat *kbuf)
{
if (kbuf->size > MAX_NON_LFS ||
!sysv_valid_dev(kbuf->dev) ||
__put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
__put_user (kbuf->blksize, &ubuf->st_blksize) ||
__put_user (kbuf->blocks, &ubuf->st_blocks) ||
- __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
+ __put_user (UFSMAGIC, (unsigned *)ubuf->st_fstype))
return -EFAULT;
return 0;
}
-static inline int putstat64(struct sol_stat64 __user *ubuf, struct kstat *kbuf)
+static inline int putstat64(struct sol_stat64 *ubuf, struct kstat *kbuf)
{
if (!sysv_valid_dev(kbuf->dev) || !sysv_valid_dev(kbuf->rdev))
return -EOVERFLOW;
__put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
__put_user (kbuf->blksize, &ubuf->st_blksize) ||
__put_user (kbuf->blocks, &ubuf->st_blocks) ||
- __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
+ __put_user (UFSMAGIC, (unsigned *)ubuf->st_fstype))
return -EFAULT;
return 0;
}
asmlinkage int solaris_stat(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_stat(A(filename), &s);
- if (!ret)
- return putstat(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_stat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat((struct sol_stat *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_stat64(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_stat(A(filename), &s);
- if (!ret)
- return putstat64(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_stat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat64((struct sol_stat64 *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_lstat(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_lstat(A(filename), &s);
- if (!ret)
- return putstat(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_lstat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat((struct sol_stat *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_lstat64(u32 filename, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_lstat(A(filename), &s);
- if (!ret)
- return putstat64(A(statbuf), &s);
+ char *filenam;
+ mm_segment_t old_fs = get_fs();
+
+ filenam = getname ((char *)A(filename));
+ ret = PTR_ERR(filenam);
+ if (!IS_ERR(filenam)) {
+ set_fs (KERNEL_DS);
+ ret = vfs_lstat(filenam, &s);
+ set_fs (old_fs);
+ putname (filenam);
+ return putstat64((struct sol_stat64 *)A(statbuf), &s);
+ }
return ret;
}
asmlinkage int solaris_fstat(unsigned int fd, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_fstat(fd, &s);
+ ret = vfs_fstat(fd, &s);
if (!ret)
- return putstat(A(statbuf), &s);
+ return putstat((struct sol_stat *)A(statbuf), &s);
return ret;
}
asmlinkage int solaris_fstat64(unsigned int fd, u32 statbuf)
{
+ int ret;
struct kstat s;
- int ret = vfs_fstat(fd, &s);
+
+ ret = vfs_fstat(fd, &s);
if (!ret)
- return putstat64(A(statbuf), &s);
+ return putstat64((struct sol_stat64 *)A(statbuf), &s);
return ret;
}
asmlinkage int solaris_mknod(u32 path, u32 mode, s32 dev)
{
- int (*sys_mknod)(const char __user *,int,unsigned) =
- (int (*)(const char __user *,int,unsigned))SYS(mknod);
+ int (*sys_mknod)(const char *,int,unsigned) =
+ (int (*)(const char *,int,unsigned))SYS(mknod);
int major = sysv_major(dev);
int minor = sysv_minor(dev);
/* minor is guaranteed to be OK for MKDEV, major might be not */
if (major > 0xfff)
return -EINVAL;
- return sys_mknod(A(path), mode, new_encode_dev(MKDEV(major,minor)));
+ return sys_mknod((const char *)A(path), mode,
+ new_encode_dev(MKDEV(major,minor)));
}
asmlinkage int solaris_xmknod(int vers, u32 path, u32 mode, s32 dev)
return solaris_mknod(path, mode, dev);
}
-asmlinkage int solaris_getdents64(unsigned int fd, void __user *dirent, unsigned int count)
+asmlinkage int solaris_getdents64(unsigned int fd, void *dirent, unsigned int count)
{
- int (*sys_getdents)(unsigned int, void __user *, unsigned int) =
- (int (*)(unsigned int, void __user *, unsigned int))SYS(getdents);
+ int (*sys_getdents)(unsigned int, void *, unsigned int) =
+ (int (*)(unsigned int, void *, unsigned int))SYS(getdents);
return sys_getdents(fd, dirent, count);
}
int ret;
struct statfs s;
mm_segment_t old_fs = get_fs();
- int (*sys_statfs)(const char __user *,struct statfs __user *) =
- (int (*)(const char __user *,struct statfs __user *))SYS(statfs);
- struct sol_statfs __user *ss = A(buf);
+ int (*sys_statfs)(const char *,struct statfs *) =
+ (int (*)(const char *,struct statfs *))SYS(statfs);
+ struct sol_statfs *ss = (struct sol_statfs *)A(buf);
if (len != sizeof(struct sol_statfs)) return -EINVAL;
if (!fstype) {
- /* FIXME: mixing userland and kernel pointers */
set_fs (KERNEL_DS);
- ret = sys_statfs(A(path), &s);
+ ret = sys_statfs((const char *)A(path), &s);
set_fs (old_fs);
if (!ret) {
if (put_user (s.f_type, &ss->f_type) ||
int ret;
struct statfs s;
mm_segment_t old_fs = get_fs();
- int (*sys_fstatfs)(unsigned,struct statfs __user *) =
- (int (*)(unsigned,struct statfs __user *))SYS(fstatfs);
- struct sol_statfs __user *ss = A(buf);
+ int (*sys_fstatfs)(unsigned,struct statfs *) =
+ (int (*)(unsigned,struct statfs *))SYS(fstatfs);
+ struct sol_statfs *ss = (struct sol_statfs *)A(buf);
if (len != sizeof(struct sol_statfs)) return -EINVAL;
if (!fstype) {
{
struct kstatfs s;
int error;
- struct sol_statvfs __user *ss = A(buf);
+ struct sol_statvfs *ss = (struct sol_statvfs *)A(buf);
error = vfs_statfs(mnt->mnt_sb, &s);
if (!error) {
int j = strlen (p);
if (j > 15) j = 15;
- if (IS_RDONLY(inode) || (mnt && MNT_IS_RDONLY(mnt))) i = 1;
+ if (IS_RDONLY(inode)) i = 1;
if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
if (!sysv_valid_dev(inode->i_sb->s_dev))
return -EOVERFLOW;
__put_user (s.f_ffree, &ss->f_favail) ||
__put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
__copy_to_user (ss->f_basetype,p,j) ||
- __put_user (0, (char __user *)&ss->f_basetype[j]) ||
+ __put_user (0, (char *)&ss->f_basetype[j]) ||
__put_user (s.f_namelen, &ss->f_namemax) ||
__put_user (i, &ss->f_flag) ||
__clear_user (&ss->f_fstr, 32))
{
struct kstatfs s;
int error;
- struct sol_statvfs64 __user *ss = A(buf);
+ struct sol_statvfs64 *ss = (struct sol_statvfs64 *)A(buf);
error = vfs_statfs(mnt->mnt_sb, &s);
if (!error) {
int j = strlen (p);
if (j > 15) j = 15;
- if (IS_RDONLY(inode) || (mnt && MNT_IS_RDONLY(mnt))) i = 1;
+ if (IS_RDONLY(inode)) i = 1;
if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
if (!sysv_valid_dev(inode->i_sb->s_dev))
return -EOVERFLOW;
__put_user (s.f_ffree, &ss->f_favail) ||
__put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
__copy_to_user (ss->f_basetype,p,j) ||
- __put_user (0, (char __user *)&ss->f_basetype[j]) ||
+ __put_user (0, (char *)&ss->f_basetype[j]) ||
__put_user (s.f_namelen, &ss->f_namemax) ||
__put_user (i, &ss->f_flag) ||
__clear_user (&ss->f_fstr, 32))
struct nameidata nd;
int error;
- error = user_path_walk(A(path),&nd);
+ error = user_path_walk((const char *)A(path),&nd);
if (!error) {
struct inode * inode = nd.dentry->d_inode;
error = report_statvfs(nd.mnt, inode, buf);
int error;
lock_kernel();
- error = user_path_walk(A(path), &nd);
+ error = user_path_walk((const char *)A(path), &nd);
if (!error) {
struct inode * inode = nd.dentry->d_inode;
error = report_statvfs64(nd.mnt, inode, buf);
case SOL_F_SETLKW:
{
struct flock f;
- struct sol_flock __user *p = A(arg);
mm_segment_t old_fs = get_fs();
switch (cmd) {
case SOL_F_SETLKW: cmd = F_SETLKW; break;
}
- if (get_user (f.l_type, &p->l_type) ||
- __get_user (f.l_whence, &p->l_whence) ||
- __get_user (f.l_start, &p->l_start) ||
- __get_user (f.l_len, &p->l_len) ||
- __get_user (f.l_pid, &p->l_sysid))
+ if (get_user (f.l_type, &((struct sol_flock *)A(arg))->l_type) ||
+ __get_user (f.l_whence, &((struct sol_flock *)A(arg))->l_whence) ||
+ __get_user (f.l_start, &((struct sol_flock *)A(arg))->l_start) ||
+ __get_user (f.l_len, &((struct sol_flock *)A(arg))->l_len) ||
+ __get_user (f.l_pid, &((struct sol_flock *)A(arg))->l_sysid))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long)&f);
set_fs(old_fs);
- if (__put_user (f.l_type, &p->l_type) ||
- __put_user (f.l_whence, &p->l_whence) ||
- __put_user (f.l_start, &p->l_start) ||
- __put_user (f.l_len, &p->l_len) ||
- __put_user (f.l_pid, &p->l_pid) ||
- __put_user (0, &p->l_sysid))
+ if (__put_user (f.l_type, &((struct sol_flock *)A(arg))->l_type) ||
+ __put_user (f.l_whence, &((struct sol_flock *)A(arg))->l_whence) ||
+ __put_user (f.l_start, &((struct sol_flock *)A(arg))->l_start) ||
+ __put_user (f.l_len, &((struct sol_flock *)A(arg))->l_len) ||
+ __put_user (f.l_pid, &((struct sol_flock *)A(arg))->l_pid) ||
+ __put_user (0, &((struct sol_flock *)A(arg))->l_sysid))
return -EFAULT;
return ret;
int (*sys_newftruncate)(unsigned int, unsigned long)=
(int (*)(unsigned int, unsigned long))SYS(ftruncate);
- if (get_user(length, &((struct sol_flock __user *)A(arg))->l_start))
+ if (get_user(length, &((struct sol_flock*)A(arg))->l_start))
return -EFAULT;
return sys_newftruncate(fd, length);
return -ENOSYS;
}
-asmlinkage int solaris_pread(unsigned int fd, char __user *buf, u32 count, u32 pos)
+asmlinkage int solaris_pread(unsigned int fd, char *buf, u32 count, u32 pos)
{
- ssize_t (*sys_pread64)(unsigned int, char __user *, size_t, loff_t) =
- (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pread64);
+ ssize_t (*sys_pread64)(unsigned int, char *, size_t, loff_t) =
+ (ssize_t (*)(unsigned int, char *, size_t, loff_t))SYS(pread64);
return sys_pread64(fd, buf, count, (loff_t)pos);
}
-asmlinkage int solaris_pwrite(unsigned int fd, char __user *buf, u32 count, u32 pos)
+asmlinkage int solaris_pwrite(unsigned int fd, char *buf, u32 count, u32 pos)
{
- ssize_t (*sys_pwrite64)(unsigned int, char __user *, size_t, loff_t) =
- (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pwrite64);
+ ssize_t (*sys_pwrite64)(unsigned int, char *, size_t, loff_t) =
+ (ssize_t (*)(unsigned int, char *, size_t, loff_t))SYS(pwrite64);
return sys_pwrite64(fd, buf, count, (loff_t)pos);
}
/* solaris_llseek returns long long - quite difficult */
asmlinkage long solaris_llseek(struct pt_regs *regs, u32 off_hi, u32 off_lo, int whence)
{
- int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int) =
- (int (*)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int))SYS(_llseek);
+ int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) =
+ (int (*)(unsigned int, unsigned long, unsigned long, loff_t *, unsigned int))SYS(_llseek);
int ret;
mm_segment_t old_fs = get_fs();
loff_t retval;
/* Have to mask out all but lower 3 bits */
asmlinkage int solaris_access(u32 filename, long mode)
{
- int (*sys_access)(const char __user *, int) =
- (int (*)(const char __user *, int))SYS(access);
+ int (*sys_access)(const char *, int) =
+ (int (*)(const char *, int))SYS(access);
- return sys_access(A(filename), mode & 7);
+ return sys_access((const char *)A(filename), mode & 7);
}
u32 arg);
asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-extern int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
- char __user *data_buf, int data_len, int flags);
-extern int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, int __user *ctl_len,
- char __user *data_buf, int data_maxlen, int __user *data_len, int *flags);
+extern int timod_putmsg(unsigned int fd, char *ctl_buf, int ctl_len,
+ char *data_buf, int data_len, int flags);
+extern int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, int *ctl_len,
+ char *data_buf, int data_maxlen, int *data_len, int *flags);
/* termio* stuff {{{ */
static inline int linux_to_solaris_termio(unsigned int fd, unsigned int cmd, u32 arg)
{
- struct solaris_termio __user *p = A(arg);
int ret;
- ret = sys_ioctl(fd, cmd, (unsigned long)p);
+ ret = sys_ioctl(fd, cmd, A(arg));
if (!ret) {
u32 cflag;
- if (__get_user (cflag, &p->c_cflag))
+ if (__get_user (cflag, &((struct solaris_termio *)A(arg))->c_cflag))
return -EFAULT;
cflag = linux_to_solaris_cflag(cflag);
- if (__put_user (cflag, &p->c_cflag))
+ if (__put_user (cflag, &((struct solaris_termio *)A(arg))->c_cflag))
return -EFAULT;
}
return ret;
struct solaris_termio s;
mm_segment_t old_fs = get_fs();
- if (copy_from_user (&s, (struct solaris_termio __user *)A(arg), sizeof(struct solaris_termio)))
+ if (copy_from_user (&s, (struct solaris_termio *)A(arg), sizeof(struct solaris_termio)))
return -EFAULT;
s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, cmd, (unsigned long)&s);
set_fs(old_fs);
if (!ret) {
- struct solaris_termios __user *p = A(arg);
- if (put_user (s.c_iflag, &p->c_iflag) ||
- __put_user (s.c_oflag, &p->c_oflag) ||
- __put_user (linux_to_solaris_cflag(s.c_cflag), &p->c_cflag) ||
- __put_user (s.c_lflag, &p->c_lflag) ||
- __copy_to_user (p->c_cc, s.c_cc, 16) ||
- __clear_user (p->c_cc + 16, 2))
+ if (put_user (s.c_iflag, &((struct solaris_termios *)A(arg))->c_iflag) ||
+ __put_user (s.c_oflag, &((struct solaris_termios *)A(arg))->c_oflag) ||
+ __put_user (linux_to_solaris_cflag(s.c_cflag), &((struct solaris_termios *)A(arg))->c_cflag) ||
+ __put_user (s.c_lflag, &((struct solaris_termios *)A(arg))->c_lflag) ||
+ __copy_to_user (((struct solaris_termios *)A(arg))->c_cc, s.c_cc, 16) ||
+ __clear_user (((struct solaris_termios *)A(arg))->c_cc + 16, 2))
return -EFAULT;
}
return ret;
{
int ret;
struct solaris_termios s;
- struct solaris_termios __user *p = A(arg);
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, TCGETS, (unsigned long)&s);
set_fs(old_fs);
if (ret) return ret;
- if (put_user (s.c_iflag, &p->c_iflag) ||
- __put_user (s.c_oflag, &p->c_oflag) ||
- __put_user (s.c_cflag, &p->c_cflag) ||
- __put_user (s.c_lflag, &p->c_lflag) ||
- __copy_from_user (s.c_cc, p->c_cc, 16))
+ if (put_user (s.c_iflag, &((struct solaris_termios *)A(arg))->c_iflag) ||
+ __put_user (s.c_oflag, &((struct solaris_termios *)A(arg))->c_oflag) ||
+ __put_user (s.c_cflag, &((struct solaris_termios *)A(arg))->c_cflag) ||
+ __put_user (s.c_lflag, &((struct solaris_termios *)A(arg))->c_lflag) ||
+ __copy_from_user (s.c_cc, ((struct solaris_termios *)A(arg))->c_cc, 16))
return -EFAULT;
s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
set_fs(KERNEL_DS);
case 109: /* SI_SOCKPARAMS */
{
struct solaris_si_sockparams si;
- if (copy_from_user (&si, A(arg), sizeof(si)))
+ if (copy_from_user (&si, (struct solaris_si_sockparams *) A(arg), sizeof(si)))
return (EFAULT << 8) | TSYSERR;
/* Should we modify socket ino->socket_i.ops and type? */
case 110: /* SI_GETUDATA */
{
int etsdusize, servtype;
- struct solaris_si_udata __user *p = A(arg);
switch (SOCKET_I(ino)->type) {
case SOCK_STREAM:
etsdusize = 1;
servtype = 3;
break;
}
- if (put_user(16384, &p->tidusize) ||
- __put_user(sizeof(struct sockaddr), &p->addrsize) ||
- __put_user(-1, &p->optsize) ||
- __put_user(etsdusize, &p->etsdusize) ||
- __put_user(servtype, &p->servtype) ||
- __put_user(0, &p->so_state) ||
- __put_user(0, &p->so_options) ||
- __put_user(16384, &p->tsdusize) ||
- __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_family) ||
- __put_user(SOCKET_I(ino)->type, &p->sockparams.sp_type) ||
- __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_protocol))
+ if (put_user(16384, &((struct solaris_si_udata *)A(arg))->tidusize) ||
+ __put_user(sizeof(struct sockaddr), &((struct solaris_si_udata *)A(arg))->addrsize) ||
+ __put_user(-1, &((struct solaris_si_udata *)A(arg))->optsize) ||
+ __put_user(etsdusize, &((struct solaris_si_udata *)A(arg))->etsdusize) ||
+ __put_user(servtype, &((struct solaris_si_udata *)A(arg))->servtype) ||
+ __put_user(0, &((struct solaris_si_udata *)A(arg))->so_state) ||
+ __put_user(0, &((struct solaris_si_udata *)A(arg))->so_options) ||
+ __put_user(16384, &((struct solaris_si_udata *)A(arg))->tsdusize) ||
+ __put_user(SOCKET_I(ino)->ops->family, &((struct solaris_si_udata *)A(arg))->sockparams.sp_family) ||
+ __put_user(SOCKET_I(ino)->type, &((struct solaris_si_udata *)A(arg))->sockparams.sp_type) ||
+ __put_user(SOCKET_I(ino)->ops->family, &((struct solaris_si_udata *)A(arg))->sockparams.sp_protocol))
return (EFAULT << 8) | TSYSERR;
return 0;
}
case 101: /* O_SI_GETUDATA */
{
int etsdusize, servtype;
- struct solaris_o_si_udata __user *p = A(arg);
switch (SOCKET_I(ino)->type) {
case SOCK_STREAM:
etsdusize = 1;
servtype = 3;
break;
}
- if (put_user(16384, &p->tidusize) ||
- __put_user(sizeof(struct sockaddr), &p->addrsize) ||
- __put_user(-1, &p->optsize) ||
- __put_user(etsdusize, &p->etsdusize) ||
- __put_user(servtype, &p->servtype) ||
- __put_user(0, &p->so_state) ||
- __put_user(0, &p->so_options) ||
- __put_user(16384, &p->tsdusize))
+ if (put_user(16384, &((struct solaris_o_si_udata *)A(arg))->tidusize) ||
+ __put_user(sizeof(struct sockaddr), &((struct solaris_o_si_udata *)A(arg))->addrsize) ||
+ __put_user(-1, &((struct solaris_o_si_udata *)A(arg))->optsize) ||
+ __put_user(etsdusize, &((struct solaris_o_si_udata *)A(arg))->etsdusize) ||
+ __put_user(servtype, &((struct solaris_o_si_udata *)A(arg))->servtype) ||
+ __put_user(0, &((struct solaris_o_si_udata *)A(arg))->so_state) ||
+ __put_user(0, &((struct solaris_o_si_udata *)A(arg))->so_options) ||
+ __put_user(16384, &((struct solaris_o_si_udata *)A(arg))->tsdusize))
return (EFAULT << 8) | TSYSERR;
return 0;
}
}
static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
- int len, int __user *len_p)
+ int len, int *len_p)
{
int ret;
int i;
u32 prim;
SOLD("TI_OPMGMT entry");
- ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
+ ret = timod_putmsg(fd, (char *)A(arg), len, NULL, -1, 0);
SOLD("timod_putmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
SOLD("ret ok");
- if (get_user(prim, (u32 __user *)A(arg)))
+ if (get_user(prim, (u32 *)A(arg)))
return (EFAULT << 8) | TSYSERR;
SOLD("got prim");
if (prim == T_ERROR_ACK) {
u32 tmp, tmp2;
SOLD("prim is T_ERROR_ACK");
- if (get_user(tmp, (u32 __user *)A(arg)+3) ||
- get_user(tmp2, (u32 __user *)A(arg)+2))
+ if (get_user(tmp, (u32 *)A(arg)+3) ||
+ get_user(tmp2, (u32 *)A(arg)+2))
return (EFAULT << 8) | TSYSERR;
return (tmp2 << 8) | tmp;
}
int i;
u32 prim;
SOLD("TI_BIND entry");
- ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
+ ret = timod_putmsg(fd, (char *)A(arg), len, NULL, -1, 0);
SOLD("timod_putmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
len = 1024; /* Solaris allows arbitrary return size */
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
SOLD("ret ok");
- if (get_user(prim, (u32 __user *)A(arg)))
+ if (get_user(prim, (u32 *)A(arg)))
return (EFAULT << 8) | TSYSERR;
SOLD("got prim");
if (prim == T_ERROR_ACK) {
u32 tmp, tmp2;
SOLD("prim is T_ERROR_ACK");
- if (get_user(tmp, (u32 __user *)A(arg)+3) ||
- get_user(tmp2, (u32 __user *)A(arg)+2))
+ if (get_user(tmp, (u32 *)A(arg)+3) ||
+ get_user(tmp2, (u32 *)A(arg)+2))
return (EFAULT << 8) | TSYSERR;
return (tmp2 << 8) | tmp;
}
SOLD("OK_ACK requested");
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
return -ENOSYS;
case 2: /* I_PUSH */
{
- p = getname (A(arg));
+ p = getname ((char *)A(arg));
if (IS_ERR (p))
return PTR_ERR(p);
ret = -EINVAL;
const char *p;
if (sock->modcount <= 0) return -EINVAL;
p = module_table[(unsigned)sock->module[sock->modcount]].name;
- if (copy_to_user (A(arg), p, strlen(p)))
+ if (copy_to_user ((char *)A(arg), p, strlen(p)))
return -EFAULT;
return 0;
}
case 5: /* I_FLUSH */
return 0;
case 8: /* I_STR */
- if (copy_from_user(&si, A(arg), sizeof(struct strioctl)))
+ if (copy_from_user(&si, (struct strioctl *)A(arg), sizeof(struct strioctl)))
return -EFAULT;
/* We ignore what module is actually at the top of stack. */
switch ((si.cmd >> 8) & 0xff) {
return solaris_sockmod(fd, si.cmd, si.data);
case 'T':
return solaris_timod(fd, si.cmd, si.data, si.len,
- &((struct strioctl __user *)A(arg))->len);
+ &((struct strioctl*)A(arg))->len);
default:
return solaris_ioctl(fd, si.cmd, si.data);
}
case 11: /* I_FIND */
{
int i;
- p = getname (A(arg));
+ p = getname ((char *)A(arg));
if (IS_ERR (p))
return PTR_ERR(p);
ret = 0;
return 0; /* We don't support them */
case 1: /* SIOCGHIWAT */
case 3: /* SIOCGLOWAT */
- if (put_user (0, (u32 __user *)A(arg)))
+ if (put_user (0, (u32 *)A(arg)))
return -EFAULT;
return 0; /* Lie */
case 7: /* SIOCATMARK */
args);
set_fs(old_fs);
if (ret >= 0) {
- if (copy_to_user(A(arg), &uaddr, uaddr_len))
+ if (copy_to_user((char *)A(arg), &uaddr, uaddr_len))
return -EFAULT;
}
return ret;
for (d = dev_base; d; d = d->next) i++;
read_unlock_bh(&dev_base_lock);
- if (put_user (i, (int __user *)A(arg)))
+ if (put_user (i, (int *)A(arg)))
return -EFAULT;
return 0;
}
asmlinkage long solaris_shmsys(int cmd, u32 arg1, u32 arg2, u32 arg3)
{
- int (*sys_ipc)(unsigned,int,int,unsigned long,void __user *,long) =
- (int (*)(unsigned,int,int,unsigned long,void __user *,long))SYS(ipc);
+ int (*sys_ipc)(unsigned,int,int,unsigned long,void *,long) =
+ (int (*)(unsigned,int,int,unsigned long,void *,long))SYS(ipc);
mm_segment_t old_fs;
unsigned long raddr;
int ret;
case 0: /* shmat */
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, A(arg2), 0);
+ ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, (void *)A(arg2), 0);
set_fs(old_fs);
if (ret >= 0) return (u32)raddr;
else return ret;
case 11: /* IPC_SET */
{
struct shmid_ds s;
- struct solaris_shmid_ds __user *p = A(arg3);
- if (get_user (s.shm_perm.uid, &p->shm_perm.uid) ||
- __get_user (s.shm_perm.gid, &p->shm_perm.gid) ||
- __get_user (s.shm_perm.mode, &p->shm_perm.mode))
+ if (get_user (s.shm_perm.uid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.uid)) ||
+ __get_user (s.shm_perm.gid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.gid)) ||
+ __get_user (s.shm_perm.mode, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.mode)))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
case 12: /* IPC_STAT */
{
struct shmid_ds s;
- struct solaris_shmid_ds __user *p = A(arg3);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
set_fs(old_fs);
- if (put_user (s.shm_perm.uid, &(p->shm_perm.uid)) ||
- __put_user (s.shm_perm.gid, &(p->shm_perm.gid)) ||
- __put_user (s.shm_perm.cuid, &(p->shm_perm.cuid)) ||
- __put_user (s.shm_perm.cgid, &(p->shm_perm.cgid)) ||
- __put_user (s.shm_perm.mode, &(p->shm_perm.mode)) ||
- __put_user (s.shm_perm.seq, &(p->shm_perm.seq)) ||
- __put_user (s.shm_perm.key, &(p->shm_perm.key)) ||
- __put_user (s.shm_segsz, &(p->shm_segsz)) ||
- __put_user (s.shm_lpid, &(p->shm_lpid)) ||
- __put_user (s.shm_cpid, &(p->shm_cpid)) ||
- __put_user (s.shm_nattch, &(p->shm_nattch)) ||
- __put_user (s.shm_atime, &(p->shm_atime)) ||
- __put_user (s.shm_dtime, &(p->shm_dtime)) ||
- __put_user (s.shm_ctime, &(p->shm_ctime)))
+ if (get_user (s.shm_perm.uid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.uid)) ||
+ __get_user (s.shm_perm.gid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.gid)) ||
+ __get_user (s.shm_perm.cuid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.cuid)) ||
+ __get_user (s.shm_perm.cgid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.cgid)) ||
+ __get_user (s.shm_perm.mode, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.mode)) ||
+ __get_user (s.shm_perm.seq, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.seq)) ||
+ __get_user (s.shm_perm.key, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.key)) ||
+ __get_user (s.shm_segsz, &(((struct solaris_shmid_ds *)A(arg3))->shm_segsz)) ||
+ __get_user (s.shm_lpid, &(((struct solaris_shmid_ds *)A(arg3))->shm_lpid)) ||
+ __get_user (s.shm_cpid, &(((struct solaris_shmid_ds *)A(arg3))->shm_cpid)) ||
+ __get_user (s.shm_nattch, &(((struct solaris_shmid_ds *)A(arg3))->shm_nattch)) ||
+ __get_user (s.shm_atime, &(((struct solaris_shmid_ds *)A(arg3))->shm_atime)) ||
+ __get_user (s.shm_dtime, &(((struct solaris_shmid_ds *)A(arg3))->shm_dtime)) ||
+ __get_user (s.shm_ctime, &(((struct solaris_shmid_ds *)A(arg3))->shm_ctime)))
return -EFAULT;
return ret;
}
default: return -EINVAL;
}
case 2: /* shmdt */
- return sys_ipc(SHMDT, 0, 0, 0, A(arg1), 0);
+ return sys_ipc(SHMDT, 0, 0, 0, (void *)A(arg1), 0);
case 3: /* shmget */
return sys_ipc(SHMGET, arg1, arg2, arg3, NULL, 0);
}
u32 offlo;
if (regs->u_regs[UREG_G1]) {
- if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
+ if (get_user (offlo, (u32 *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
return -EFAULT;
} else {
- if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
+ if (get_user (offlo, (u32 *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
return -EFAULT;
}
return do_solaris_mmap((u32)regs->u_regs[UREG_I0], len, prot, flags, fd, (((u64)offhi)<<32)|offlo);
for (p=from,i=0; *p && *p != '.' && --len; p++,i++); \
else \
i = len - 1; \
- if (__put_user('\0', (char __user *)((to)+i))) \
+ if (__put_user('\0', (char *)(to+i))) \
return -EFAULT; \
}
asmlinkage int solaris_utssys(u32 buf, u32 flags, int which, u32 buf2)
{
- struct sol_uname __user *v = A(buf);
switch (which) {
case 0: /* old uname */
/* Let's cheat */
- set_utsfield(v->sysname, "SunOS", 1, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->sysname,
+ "SunOS", 1, 0);
down_read(&uts_sem);
- set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
+ set_utsfield(((struct sol_uname *)A(buf))->nodename,
+ system_utsname.nodename, 1, 1);
up_read(&uts_sem);
- set_utsfield(v->release, "2.6", 0, 0);
- set_utsfield(v->version, "Generic", 0, 0);
- set_utsfield(v->machine, machine(), 0, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->release,
+ "2.6", 0, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->version,
+ "Generic", 0, 0);
+ set_utsfield(((struct sol_uname *)A(buf))->machine,
+ machine(), 0, 0);
return 0;
case 2: /* ustat */
return -ENOSYS;
asmlinkage int solaris_utsname(u32 buf)
{
- struct sol_utsname __user *v = A(buf);
/* Why should we not lie a bit? */
down_read(&uts_sem);
- set_utsfield(v->sysname, "SunOS", 0, 0);
- set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
- set_utsfield(v->release, "5.6", 0, 0);
- set_utsfield(v->version, "Generic", 0, 0);
- set_utsfield(v->machine, machine(), 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->sysname,
+ "SunOS", 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->nodename,
+ system_utsname.nodename, 1, 1);
+ set_utsfield(((struct sol_utsname *)A(buf))->release,
+ "5.6", 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->version,
+ "Generic", 0, 0);
+ set_utsfield(((struct sol_utsname *)A(buf))->machine,
+ machine(), 0, 0);
up_read(&uts_sem);
return 0;
}
}
len = strlen(r) + 1;
if (count < len) {
- if (copy_to_user(A(buf), r, count - 1) ||
- __put_user(0, (char __user *)A(buf) + count - 1))
+ if (copy_to_user((char *)A(buf), r, count - 1) ||
+ __put_user(0, (char *)A(buf) + count - 1))
return -EFAULT;
} else {
- if (copy_to_user(A(buf), r, len))
+ if (copy_to_user((char *)A(buf), r, len))
return -EFAULT;
}
return len;
u32 rlim_max;
};
-asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 __user *rlim)
+asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 *rlim)
{
struct rlimit r;
int ret;
return ret;
}
-asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 __user *rlim)
+asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 *rlim)
{
struct rlimit r, rold;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
- int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
+ int (*sys_setrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(setrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
return ret;
}
-asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit __user *rlim)
+asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit *rlim)
{
struct rlimit r;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
return ret;
}
-asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit __user *rlim)
+asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit *rlim)
{
struct rlimit r, rold;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
- int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
- (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
+ int (*sys_setrlimit)(unsigned int, struct rlimit *) =
+ (int (*)(unsigned int, struct rlimit *))SYS(setrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
s32 stbcnt;
};
-asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval __user *ntp)
+asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval *ntp)
{
- int (*sys_adjtimex)(struct timex __user *) =
- (int (*)(struct timex __user *))SYS(adjtimex);
+ int (*sys_adjtimex)(struct timex *) =
+ (int (*)(struct timex *))SYS(adjtimex);
struct timex t;
int ret;
mm_segment_t old_fs = get_fs();
return ret;
}
-asmlinkage int solaris_ntp_adjtime(struct sol_timex __user *txp)
+asmlinkage int solaris_ntp_adjtime(struct sol_timex *txp)
{
- int (*sys_adjtimex)(struct timex __user *) =
- (int (*)(struct timex __user *))SYS(adjtimex);
+ int (*sys_adjtimex)(struct timex *) =
+ (int (*)(struct timex *))SYS(adjtimex);
struct timex t;
int ret, err;
mm_segment_t old_fs = get_fs();
struct sigaction sa, old;
int ret;
mm_segment_t old_fs = get_fs();
- int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
- (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
+ int (*sys_sigaction)(int,struct sigaction *,struct sigaction *) =
+ (int (*)(int,struct sigaction *,struct sigaction *))SYS(sigaction);
sigemptyset(&sa.sa_mask);
sa.sa_restorer = NULL;
sa.sa_flags = 0;
if (one_shot) sa.sa_flags = SA_ONESHOT | SA_NOMASK;
set_fs (KERNEL_DS);
- ret = sys_sigaction(sig, (void __user *)&sa, (void __user *)&old);
+ ret = sys_sigaction(sig, &sa, &old);
set_fs (old_fs);
if (ret < 0) return ret;
- return (u32)(unsigned long)old.sa_handler;
+ return (u32)(long)old.sa_handler;
}
static inline long solaris_signal(int sig, u32 arg)
static inline long solaris_sigignore(int sig)
{
- return sig_handler(sig, (u32)(unsigned long)SIG_IGN, 0);
+ return sig_handler (sig, (u32)SIG_IGN, 0);
}
static inline long solaris_sigpause(int sig)
sigset_t in_s, *ins, out_s, *outs;
mm_segment_t old_fs = get_fs();
int ret;
- int (*sys_sigprocmask)(int,sigset_t __user *,sigset_t __user *) =
- (int (*)(int,sigset_t __user *,sigset_t __user *))SYS(sigprocmask);
+ int (*sys_sigprocmask)(int,sigset_t *,sigset_t *) =
+ (int (*)(int,sigset_t *,sigset_t *))SYS(sigprocmask);
ins = NULL; outs = NULL;
if (in) {
u32 tmp[2];
- if (copy_from_user (tmp, (void __user *)A(in), 2*sizeof(u32)))
+ if (copy_from_user (tmp, (sol_sigset_t *)A(in), 2*sizeof(u32)))
return -EFAULT;
ins = &in_s;
if (mapin (tmp, ins)) return -EINVAL;
}
if (out) outs = &out_s;
set_fs (KERNEL_DS);
- ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how,
- (void __user *)ins, (void __user *)outs);
+ ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how, ins, outs);
set_fs (old_fs);
if (ret) return ret;
if (out) {
tmp[2] = 0; tmp[3] = 0;
if (mapout (outs, tmp)) return -EINVAL;
- if (copy_to_user((void __user *)A(out), tmp, 4*sizeof(u32)))
+ if (copy_to_user((sol_sigset_t *)A(out), tmp, 4*sizeof(u32)))
return -EFAULT;
}
return 0;
sigset_t s;
u32 tmp[2];
- if (copy_from_user (tmp, (sol_sigset_t __user *)A(mask), 2*sizeof(u32)))
+ if (copy_from_user (tmp, (sol_sigset_t *)A(mask), 2*sizeof(u32)))
return -EFAULT;
if (mapin (tmp, &s)) return -EINVAL;
return (long)s.sig[0];
struct sigaction s, s2;
int ret;
mm_segment_t old_fs = get_fs();
- struct sol_sigaction __user *p = (void __user *)A(old);
- int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
- (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
+ int (*sys_sigaction)(int,struct sigaction *,struct sigaction *) =
+ (int (*)(int,struct sigaction *,struct sigaction *))SYS(sigaction);
sig = mapsig(sig);
if (sig < 0) {
/* We cheat a little bit for Solaris only signals */
- if (old && clear_user(p, sizeof(struct sol_sigaction)))
+ if (old && clear_user((struct sol_sigaction *)A(old), sizeof(struct sol_sigaction)))
return -EFAULT;
return 0;
}
if (act) {
- if (get_user (tmp, &p->sa_flags))
+ if (get_user (tmp, &((struct sol_sigaction *)A(act))->sa_flags))
return -EFAULT;
s.sa_flags = 0;
if (tmp & SOLARIS_SA_ONSTACK) s.sa_flags |= SA_STACK;
if (tmp & SOLARIS_SA_NODEFER) s.sa_flags |= SA_NOMASK;
if (tmp & SOLARIS_SA_RESETHAND) s.sa_flags |= SA_ONESHOT;
if (tmp & SOLARIS_SA_NOCLDSTOP) s.sa_flags |= SA_NOCLDSTOP;
- if (get_user (tmp, &p->sa_handler) ||
- copy_from_user (tmp2, &p->sa_mask, 2*sizeof(u32)))
+ if (get_user (tmp, &((struct sol_sigaction *)A(act))->sa_handler) ||
+ copy_from_user (tmp2, &((struct sol_sigaction *)A(act))->sa_mask, 2*sizeof(u32)))
return -EFAULT;
s.sa_handler = (__sighandler_t)A(tmp);
if (mapin (tmp2, &s.sa_mask)) return -EINVAL;
- s.sa_restorer = NULL;
+ s.sa_restorer = 0;
}
set_fs(KERNEL_DS);
- ret = sys_sigaction(sig, act ? (void __user *)&s : NULL,
- old ? (void __user *)&s2 : NULL);
+ ret = sys_sigaction(sig, act ? &s : NULL, old ? &s2 : NULL);
set_fs(old_fs);
if (ret) return ret;
if (old) {
if (s2.sa_flags & SA_NOMASK) tmp |= SOLARIS_SA_NODEFER;
if (s2.sa_flags & SA_ONESHOT) tmp |= SOLARIS_SA_RESETHAND;
if (s2.sa_flags & SA_NOCLDSTOP) tmp |= SOLARIS_SA_NOCLDSTOP;
- if (put_user (tmp, &p->sa_flags) ||
- __put_user ((u32)(unsigned long)s2.sa_handler, &p->sa_handler) ||
- copy_to_user (&p->sa_mask, tmp2, 4*sizeof(u32)))
+ if (put_user (tmp, &((struct sol_sigaction *)A(old))->sa_flags) ||
+ __put_user ((u32)(long)s2.sa_handler, &((struct sol_sigaction *)A(old))->sa_handler) ||
+ copy_to_user (&((struct sol_sigaction *)A(old))->sa_mask, tmp2, 4*sizeof(u32)))
return -EFAULT;
}
return 0;
}
if (mapout (&s, tmp)) return -EINVAL;
tmp[2] = 0; tmp[3] = 0;
- if (copy_to_user ((u32 __user *)A(set), tmp, sizeof(tmp)))
+ if (copy_to_user ((u32 *)A(set), tmp, sizeof(tmp)))
return -EFAULT;
return 0;
}
asmlinkage int solaris_wait(u32 stat_loc)
{
- unsigned __user *p = (unsigned __user *)A(stat_loc);
- int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
- (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
+ int (*sys_wait4)(pid_t,unsigned int *, int, struct rusage *) =
+ (int (*)(pid_t,unsigned int *, int, struct rusage *))SYS(wait4);
int ret, status;
- ret = sys_wait4(-1, p, WUNTRACED, NULL);
+ ret = sys_wait4(-1, (unsigned int *)A(stat_loc), WUNTRACED, NULL);
if (ret >= 0 && stat_loc) {
- if (get_user (status, p))
+ if (get_user (status, (unsigned int *)A(stat_loc)))
return -EFAULT;
if (((status - 1) & 0xffff) < 0xff)
status = linux_to_solaris_signals[status & 0x7f] & 0x7f;
else if ((status & 0xff) == 0x7f)
status = (linux_to_solaris_signals[(status >> 8) & 0xff] << 8) | 0x7f;
- if (__put_user (status, p))
+ if (__put_user (status, (unsigned int *)A(stat_loc)))
return -EFAULT;
}
return ret;
asmlinkage int solaris_waitid(int idtype, s32 pid, u32 info, int options)
{
- int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
- (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
+ int (*sys_wait4)(pid_t,unsigned int *, int, struct rusage *) =
+ (int (*)(pid_t,unsigned int *, int, struct rusage *))SYS(wait4);
int opts, status, ret;
switch (idtype) {
if (options & SOLARIS_WUNTRACED) opts |= WUNTRACED;
if (options & SOLARIS_WNOHANG) opts |= WNOHANG;
current->state = TASK_RUNNING;
- ret = sys_wait4(pid, (unsigned int __user *)A(info), opts, NULL);
+ ret = sys_wait4(pid, (unsigned int *)A(info), opts, NULL);
if (ret < 0) return ret;
if (info) {
- struct sol_siginfo __user *s = (void __user *)A(info);
+ struct sol_siginfo *s = (struct sol_siginfo *)A(info);
- if (get_user (status, (unsigned int __user *)A(info)))
+ if (get_user (status, (unsigned int *)A(info)))
return -EFAULT;
if (__put_user (SOLARIS_SIGCLD, &s->si_signo) ||
return sunos_getsockopt(fd, level, optname, optval, optlen);
}
-asmlinkage int solaris_connect(int fd, struct sockaddr __user *addr, int addrlen)
+asmlinkage int solaris_connect(int fd, struct sockaddr *addr, int addrlen)
{
- int (*sys_connect)(int, struct sockaddr __user *, int) =
- (int (*)(int, struct sockaddr __user *, int))SYS(connect);
+ int (*sys_connect)(int, struct sockaddr *, int) =
+ (int (*)(int, struct sockaddr *, int))SYS(connect);
return sys_connect(fd, addr, addrlen);
}
-asmlinkage int solaris_accept(int fd, struct sockaddr __user *addr, int __user *addrlen)
+asmlinkage int solaris_accept(int fd, struct sockaddr *addr, int *addrlen)
{
- int (*sys_accept)(int, struct sockaddr __user *, int __user *) =
- (int (*)(int, struct sockaddr __user *, int __user *))SYS(accept);
+ int (*sys_accept)(int, struct sockaddr *, int *) =
+ (int (*)(int, struct sockaddr *, int *))SYS(accept);
return sys_accept(fd, addr, addrlen);
}
return fl;
}
-asmlinkage int solaris_recvfrom(int s, char __user *buf, int len, int flags, u32 from, u32 fromlen)
+asmlinkage int solaris_recvfrom(int s, char *buf, int len, int flags, u32 from, u32 fromlen)
{
- int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+ int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
- return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), A(from), A(fromlen));
+ return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), (struct sockaddr *)A(from), (int *)A(fromlen));
}
-asmlinkage int solaris_recv(int s, char __user *buf, int len, int flags)
+asmlinkage int solaris_recv(int s, char *buf, int len, int flags)
{
- int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+ int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
}
-asmlinkage int solaris_sendto(int s, char __user *buf, int len, int flags, u32 to, u32 tolen)
+asmlinkage int solaris_sendto(int s, char *buf, int len, int flags, u32 to, u32 tolen)
{
- int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(sendto);
+ int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(sendto);
- return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), A(to), A(tolen));
+ return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), (struct sockaddr *)A(to), (int *)A(tolen));
}
asmlinkage int solaris_send(int s, char *buf, int len, int flags)
};
static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg,
- struct sol_nmsghdr __user *umsg)
+ struct sol_nmsghdr *umsg)
{
u32 tmp1, tmp2, tmp3;
int err;
if (err)
return -EFAULT;
- kmsg->msg_name = A(tmp1);
- kmsg->msg_iov = A(tmp2);
- kmsg->msg_control = A(tmp3);
+ kmsg->msg_name = (void *)A(tmp1);
+ kmsg->msg_iov = (struct iovec *)A(tmp2);
+ kmsg->msg_control = (void *)A(tmp3);
err = get_user(kmsg->msg_namelen, &umsg->msg_namelen);
err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen);
return err;
}
-asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned user_flags)
+asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr *user_msg, unsigned user_flags)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
total_len = err;
if(kern_msg.msg_controllen) {
- struct sol_cmsghdr __user *ucmsg = kern_msg.msg_control;
+ struct sol_cmsghdr *ucmsg = (struct sol_cmsghdr *)kern_msg.msg_control;
unsigned long *kcmsg;
compat_size_t cmlen;
return err;
}
-asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned int user_flags)
+asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr *user_msg, unsigned int user_flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct msghdr kern_msg;
char addr[MAX_SOCK_ADDR];
struct socket *sock;
struct iovec *iov = iovstack;
- struct sockaddr __user *uaddr;
- int __user *uaddr_len;
+ struct sockaddr *uaddr;
+ int *uaddr_len;
unsigned long cmsg_ptr;
int err, total_len, len = 0;
SOLD("done");
}
-static int timod_optmgmt(unsigned int fd, int flag, char __user *opt_buf, int opt_len, int do_ret)
+static int timod_optmgmt(unsigned int fd, int flag, char *opt_buf, int opt_len, int do_ret)
{
int error, failed;
int ret_space, ret_len;
return 0;
}
-int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
- char __user *data_buf, int data_len, int flags)
+int timod_putmsg(unsigned int fd, char *ctl_buf, int ctl_len,
+ char *data_buf, int data_len, int flags)
{
int ret, error, terror;
char *buf;
struct sol_socket_struct *sock;
mm_segment_t old_fs = get_fs();
long args[6];
- int (*sys_socketcall)(int, unsigned long __user *) =
- (int (*)(int, unsigned long __user *))SYS(socketcall);
- int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int) =
- (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int))SYS(sendto);
+ int (*sys_socketcall)(int, unsigned long *) =
+ (int (*)(int, unsigned long *))SYS(socketcall);
+ int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int) =
+ (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int))SYS(sendto);
filp = current->files->fd[fd];
ino = filp->f_dentry->d_inode;
sock = (struct sol_socket_struct *)filp->private_data;
SOLD("entry");
- if (get_user(ret, (int __user *)A(ctl_buf)))
+ if (get_user(ret, (int *)A(ctl_buf)))
return -EFAULT;
switch (ret) {
case T_BIND_REQ:
printk("\n");
}
#endif
- err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr __user *)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
+ err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr*)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
if (err == data_len)
return 0;
if(err >= 0) {
return -EINVAL;
}
-int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, s32 __user *ctl_len,
- char __user *data_buf, int data_maxlen, s32 __user *data_len, int *flags_p)
+int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, s32 *ctl_len,
+ char *data_buf, int data_maxlen, s32 *data_len, int *flags_p)
{
int error;
int oldflags;
struct T_unitdata_ind udi;
mm_segment_t old_fs = get_fs();
long args[6];
- char __user *tmpbuf;
+ char *tmpbuf;
int tmplen;
- int (*sys_socketcall)(int, unsigned long __user *) =
- (int (*)(int, unsigned long __user *))SYS(socketcall);
- int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *);
+ int (*sys_socketcall)(int, unsigned long *) =
+ (int (*)(int, unsigned long *))SYS(socketcall);
+ int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *);
SOLD("entry");
SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
oldflags = filp->f_flags;
filp->f_flags |= O_NONBLOCK;
SOLD("calling recvfrom");
- sys_recvfrom = (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
- error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr __user *)tmpbuf, ctl_len);
+ sys_recvfrom = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
+ error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr*)tmpbuf, ctl_len);
filp->f_flags = oldflags;
if (error < 0)
return error;
{
struct file *filp;
struct inode *ino;
- struct strbuf __user *ctlptr;
- struct strbuf __user *datptr;
+ struct strbuf *ctlptr, *datptr;
struct strbuf ctl, dat;
- int __user *flgptr;
+ int *flgptr;
int flags;
int error = -EBADF;
if (!ino->i_sock)
goto out;
- ctlptr = (struct strbuf __user *)A(arg1);
- datptr = (struct strbuf __user *)A(arg2);
- flgptr = (int __user *)A(arg3);
+ ctlptr = (struct strbuf *)A(arg1);
+ datptr = (struct strbuf *)A(arg2);
+ flgptr = (int *)A(arg3);
error = -EFAULT;
goto out;
}
- error = timod_getmsg(fd,A(ctl.buf),ctl.maxlen,&ctlptr->len,
- A(dat.buf),dat.maxlen,&datptr->len,&flags);
+ error = timod_getmsg(fd,(char*)A(ctl.buf),ctl.maxlen,&ctlptr->len,
+ (char*)A(dat.buf),dat.maxlen,&datptr->len,&flags);
if (!error && put_user(flags,flgptr))
error = -EFAULT;
{
struct file *filp;
struct inode *ino;
- struct strbuf __user *ctlptr;
- struct strbuf __user *datptr;
+ struct strbuf *ctlptr, *datptr;
struct strbuf ctl, dat;
int flags = (int) arg3;
int error = -EBADF;
(imajor(ino) != 30 || iminor(ino) != 1))
goto out;
- ctlptr = A(arg1);
- datptr = A(arg2);
+ ctlptr = (struct strbuf *)A(arg1);
+ datptr = (struct strbuf *)A(arg2);
error = -EFAULT;
dat.buf = 0;
}
- error = timod_putmsg(fd,A(ctl.buf),ctl.len,
- A(dat.buf),dat.len,flags);
+ error = timod_putmsg(fd,(char*)A(ctl.buf),ctl.len,
+ (char*)A(dat.buf),dat.len,flags);
out:
unlock_kernel();
SOLD("done");
include $(ARCH_DIR)/Makefile-$(SUBARCH)
include $(ARCH_DIR)/Makefile-os-$(OS)
-EXTRAVERSION := $(EXTRAVERSION)-1um
+EXTRAVERSION := $(EXTRAVERSION)-2um
ARCH_INCLUDE = -I$(ARCH_DIR)/include
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
timer_alive = 1;
unlock_kernel();
- return nonseekable_open(inode, file);
+ return 0;
}
extern void stop_watchdog(int in_fd, int out_fd);
static ssize_t harddog_write(struct file *file, const char *data, size_t len,
loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/*
* Refresh the timer.
*/
+++ /dev/null
-/*
- * Copyright (C) 2002 Steve Schmidtke
- * Licensed under the GPL
- */
-
-#include <sys/types.h>
-#include <unistd.h>
-#include <errno.h>
-#include "hostaudio.h"
-#include "user_util.h"
-#include "kern_util.h"
-#include "user.h"
-#include "os.h"
-
-/* /dev/dsp file operations */
-
-ssize_t hostaudio_read_user(struct hostaudio_state *state, char *buffer,
- size_t count, loff_t *ppos)
-{
-#ifdef DEBUG
- printk("hostaudio: read_user called, count = %d\n", count);
-#endif
-
- return(os_read_file(state->fd, buffer, count));
-}
-
-ssize_t hostaudio_write_user(struct hostaudio_state *state, const char *buffer,
- size_t count, loff_t *ppos)
-{
-#ifdef DEBUG
- printk("hostaudio: write_user called, count = %d\n", count);
-#endif
-
- return(os_write_file(state->fd, buffer, count));
-}
-
-int hostaudio_ioctl_user(struct hostaudio_state *state, unsigned int cmd,
- unsigned long arg)
-{
-#ifdef DEBUG
- printk("hostaudio: ioctl_user called, cmd = %u\n", cmd);
-#endif
-
- return(os_ioctl_generic(state->fd, cmd, arg));
-}
-
-int hostaudio_open_user(struct hostaudio_state *state, int r, int w, char *dsp)
-{
-#ifdef DEBUG
- printk("hostaudio: open_user called\n");
-#endif
-
- state->fd = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
-
- if(state->fd < 0) {
- printk("hostaudio_open_user failed to open '%s', err = %d\n",
- dsp, -state->fd);
- return(state->fd);
- }
-
- return(0);
-}
-
-int hostaudio_release_user(struct hostaudio_state *state)
-{
-#ifdef DEBUG
- printk("hostaudio: release called\n");
-#endif
- if(state->fd >= 0){
- os_close_file(state->fd);
- state->fd = -1;
- }
-
- return(0);
-}
-
-/* /dev/mixer file operations */
-
-int hostmixer_ioctl_mixdev_user(struct hostmixer_state *state,
- unsigned int cmd, unsigned long arg)
-{
-#ifdef DEBUG
- printk("hostmixer: ioctl_user called cmd = %u\n",cmd);
-#endif
-
- return(os_ioctl_generic(state->fd, cmd, arg));
-}
-
-int hostmixer_open_mixdev_user(struct hostmixer_state *state, int r, int w,
- char *mixer)
-{
-#ifdef DEBUG
- printk("hostmixer: open_user called\n");
-#endif
-
- state->fd = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
-
- if(state->fd < 0) {
- printk("hostaudio_open_mixdev_user failed to open '%s', "
- "err = %d\n", mixer, state->fd);
- return(state->fd);
- }
-
- return(0);
-}
-
-int hostmixer_release_mixdev_user(struct hostmixer_state *state)
-{
-#ifdef DEBUG
- printk("hostmixer: release_user called\n");
-#endif
-
- if(state->fd >= 0){
- os_close_file(state->fd);
- state->fd = -1;
- }
-
- return 0;
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
late_initcall(stdio_init);
-static void uml_console_write(struct console *console, const char *string,
- unsigned len)
+static void stdio_console_write(struct console *console, const char *string,
+ unsigned len)
{
struct line *line = &vts[console->index];
up(&line->sem);
}
-static struct tty_driver *uml_console_device(struct console *c, int *index)
+static struct tty_driver *stdio_console_device(struct console *c, int *index)
{
*index = c->index;
return console_driver;
}
-static int uml_console_setup(struct console *co, char *options)
+static int stdio_console_setup(struct console *co, char *options)
{
return(0);
}
static struct console stdiocons = {
name: "tty",
- write: uml_console_write,
- device: uml_console_device,
- setup: uml_console_setup,
+ write: stdio_console_write,
+ device: stdio_console_device,
+ setup: stdio_console_setup,
flags: CON_PRINTBUFFER,
index: -1,
};
+++ /dev/null
-/*
- * Copyright (C) 2002 Steve Schmidtke
- * Licensed under the GPL
- */
-
-#ifndef HOSTAUDIO_H
-#define HOSTAUDIO_H
-
-#define HOSTAUDIO_DEV_DSP "/dev/sound/dsp"
-#define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer"
-
-struct hostaudio_state {
- int fd;
-};
-
-struct hostmixer_state {
- int fd;
-};
-
-/* UML user-side protoypes */
-extern ssize_t hostaudio_read_user(struct hostaudio_state *state, char *buffer,
- size_t count, loff_t *ppos);
-extern ssize_t hostaudio_write_user(struct hostaudio_state *state,
- const char *buffer, size_t count,
- loff_t *ppos);
-extern int hostaudio_ioctl_user(struct hostaudio_state *state,
- unsigned int cmd, unsigned long arg);
-extern int hostaudio_open_user(struct hostaudio_state *state, int r, int w,
- char *dsp);
-extern int hostaudio_release_user(struct hostaudio_state *state);
-extern int hostmixer_ioctl_mixdev_user(struct hostmixer_state *state,
- unsigned int cmd, unsigned long arg);
-extern int hostmixer_open_mixdev_user(struct hostmixer_state *state, int r,
- int w, char *mixer);
-extern int hostmixer_release_mixdev_user(struct hostmixer_state *state);
-
-#endif /* HOSTAUDIO_H */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
#ifndef __UM_SYSDEP_CHECKSUM_H
#define __UM_SYSDEP_CHECKSUM_H
-#include "linux/in6.h"
#include "linux/string.h"
+#include "linux/in6.h"
/*
* computes the checksum of a memory block at buff, length len,
#include "linux/module.h"
#include "linux/sched.h"
#include "linux/init_task.h"
+#include "linux/version.h"
#include "linux/mqueue.h"
#include "asm/uaccess.h"
#include "asm/pgtable.h"
+++ /dev/null
-#ifndef __MPROT_H__
-#define __MPROT_H__
-
-extern void no_access(unsigned long addr, unsigned int len);
-
-#endif
extern syscall_handler_t old_select;
extern syscall_handler_t sys_modify_ldt;
extern syscall_handler_t sys_rt_sigsuspend;
-
extern syscall_handler_t sys_vserver;
syscall_handler_t *sys_call_table[] = {
-obj-y = bitops.o bugs.o checksum.o fault.o ksyms.o ldt.o ptrace.o \
- ptrace_user.o semaphore.o sigcontext.o syscalls.o sysrq.o
+obj-y = bugs.o checksum.o fault.o ksyms.o ldt.o ptrace.o ptrace_user.o \
+ semaphore.o bitops.o sigcontext.o syscalls.o sysrq.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_MODULES) += module.o
USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
-SYMLINKS = bitops.c semaphore.c highmem.c module.c
+SYMLINKS = semaphore.c highmem.c module.c bitops.c
SYMLINKS := $(foreach f,$(SYMLINKS),$(src)/$f)
clean-files := $(SYMLINKS)
-bitops.c-dir = lib
semaphore.c-dir = kernel
highmem.c-dir = mm
module.c-dir = kernel
+bitops.c-dir = lib
define make_link
-rm -f $1
+++ /dev/null
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for nonzero in the first 32 bits:
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (*p >> bit));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No set bit yet, search remaining full words for a bit
- */
- res = find_first_bit (p, size - 32 * (p - addr));
- return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_bit);
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset)
-{
- unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for zero in the first 32 bits.
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (~(*p >> bit)));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No zero yet, search remaining full bytes for a zero
- */
- res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
- return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
oldvalp = (void *) A(a32.oldval);
newvalp = (void *) A(a32.newval);
- if ((oldvalp && get_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
+ if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp)))
|| !access_ok(VERIFY_WRITE, namep, 0)
|| !access_ok(VERIFY_WRITE, oldvalp, 0)
|| !access_ok(VERIFY_WRITE, newvalp, 0))
unlock_kernel();
set_fs(old_fs);
- if (oldvalp && put_user (oldlen, (int __user *)compat_ptr(a32.oldlenp)))
+ if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp)))
return -EFAULT;
return ret;
long
sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
{
- struct sigevent __user *p = NULL;
+ struct sigevent se;
+ mm_segment_t oldfs;
+ long err;
+
if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
memset(&se, 0, sizeof(struct sigevent));
if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
__get_user(se.sigev_signo, &se32->sigev_signo) ||
__get_user(se.sigev_notify, &se32->sigev_notify) ||
__copy_from_user(&se._sigev_un._pad, &se32->payload,
- sizeof(se32->payload)) ||
- copy_to_user(p, &se, sizeof(se)))
+ sizeof(se32->payload)))
return -EFAULT;
}
- return sys_timer_create(clock, p, timer_id);
+ if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
+ return -EFAULT;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_create(clock, se32 ? &se : NULL, timer_id);
+ set_fs(oldfs);
+
+ return err;
}
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
BUILD_14_IRQS(0xe)
#endif
IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
IRQLIST_16(0xc), IRQLIST_16(0xd)
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
, IRQLIST_14(0xe)
#endif
} irq_2_pin[PIN_MAP_SIZE];
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
int assign_irq_vector(int irq)
#else
int __init assign_irq_vector(int irq)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return;
}
printk(" failed :(.\n");
- panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
+ panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
}
/*
int apic_version [MAX_APICS];
unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL };
+cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = CPU_MASK_ALL };
int mp_current_pci_id = 0;
/* I/O APIC entries */
static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
-static u32 gart_unmapped_entry;
-
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
static void flush_gart(struct pci_dev *dev)
{
unsigned long flags;
+ int bus = dev ? dev->bus->number : -1;
+ cpumask_t bus_cpumask = pcibus_to_cpumask(bus);
int flushed = 0;
int i;
u32 w;
if (!northbridges[i])
continue;
+ if (bus >= 0 && !(cpu_isset(i, bus_cpumask)))
+ continue;
pci_write_config_dword(northbridges[i], 0x9c,
northbridge_flush_word[i] | 1);
/* Make sure the hardware actually executed the flush. */
flushed++;
}
if (!flushed)
- printk("nothing to flush?\n");
+ printk("nothing to flush? %d\n", bus);
need_flush = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
unsigned long pages = 0;
int need = 0, nextneed;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb)
- return swiotlb_map_sg(&dev->dev,sg,nents,dir);
-#endif
-
BUG_ON(dir == PCI_DMA_NONE);
if (nents == 0)
return 0;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
- iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
+ iommu_gatt_base[iommu_page + i] = 0;
CLEAR_LEAK(iommu_page + i);
}
free_iommu(iommu_page, npages);
unsigned long aper_size;
unsigned long iommu_start;
struct pci_dev *dev;
- unsigned long scratch;
- long i;
+
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
return -1;
}
}
-
+
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
*/
clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
- /*
- * Try to workaround a bug (thanks to BenH)
- * Set unmapped entries to a scratch page instead of 0.
- * Any prefetches that hit unmapped entries won't get an bus abort
- * then.
- */
- scratch = get_zeroed_page(GFP_KERNEL);
- if (!scratch)
- panic("Cannot allocate iommu scratch page");
- gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
- for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
- iommu_gatt_base[i] = gart_unmapped_entry;
-
for_all_nb(dev) {
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;
if (me->used_math) {
fp = get_stack(ka, regs, sizeof(struct _fpstate));
- frame = (void __user *)round_down((unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
+ frame = (void __user *)round_down((u64)fp - sizeof(struct rt_sigframe), 16) - 8;
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) {
goto give_sigsegv;
Dprintk("CPU has booted.\n");
} else {
boot_error = 1;
- if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
+ if (*((volatile unsigned char *)phys_to_virt(8192))
== 0xA5)
/* trampoline started but...? */
printk("Stuck ??\n");
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
cpucount--;
}
+
+ /* mark "stuck" area as not stuck */
+ *((volatile unsigned *)phys_to_virt(8192)) = 0;
}
cycles_t cacheflush_time;
# Makefile for the linux x86_64-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extable.o pageattr.o mmap.o
+obj-y := init.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
__flush_tlb_all();
}
-static inline int page_is_ram (unsigned long pagenr)
+int page_is_ram (unsigned long pagenr)
{
int i;
return 0;
}
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
- * valid. The argument is a physical page number.
- *
- *
- * On x86-64, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
- if (pagenr <= 256)
- return 1;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
-}
-
-
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
+++ /dev/null
-/*
- * linux/arch/x86-64/mm/mmap.c
- *
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave an at least ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline unsigned long mmap_base(void)
-{
- unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return TASK_SIZE - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
- /*
- * Force standard allocation for 64 bit programs.
- */
- if (!test_thread_flag(TIF_IA32))
- return 1;
-
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-}
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_MSI=y
+# CONFIG_PCI_USE_VECTOR is not set
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
#
# User Modules And Translation Layers
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
#
# User Modules And Translation Layers
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
#
# Class Based Kernel Resource Management
#
-CONFIG_CKRM=y
-CONFIG_RCFS_FS=y
-CONFIG_CKRM_TYPE_TASKCLASS=y
-CONFIG_CKRM_RES_NUMTASKS=y
-CONFIG_CKRM_CPU_SCHEDULE=y
-CONFIG_CKRM_RES_BLKIO=y
-# CONFIG_CKRM_RES_MEM is not set
-# CONFIG_CKRM_TYPE_SOCKETCLASS is not set
-CONFIG_CKRM_RBCE=y
+# CONFIG_CKRM is not set
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_HOTPLUG is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_OOM_PANIC=y
# CONFIG_EMBEDDED is not set
# CONFIG_DELAY_ACCT is not set
CONFIG_KALLSYMS=y
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
# CONFIG_NET_SCH_CBQ is not set
CONFIG_NET_SCH_HTB=m
# CONFIG_NET_SCH_HFSC is not set
# CONFIG_NET_SCH_NETEM is not set
# CONFIG_NET_SCH_INGRESS is not set
# CONFIG_NET_QOS is not set
-CONFIG_NET_CLS=y
-# CONFIG_NET_CLS_TCINDEX is not set
-# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS is not set
CONFIG_NET_CLS_ROUTE=y
-CONFIG_NET_CLS_FW=m
-# CONFIG_NET_CLS_U32 is not set
-# CONFIG_NET_CLS_IND is not set
#
# Network testing
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_DRM is not set
# CONFIG_MWAVE is not set
# CONFIG_RAW_DRIVER is not set
-CONFIG_HANGCHECK_TIMER=y
+CONFIG_HANGCHECK_TIMER=m
#
# I2C support
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
#
# Kernel hacking
#
-# CONFIG_CRASH_DUMP is not set
+CONFIG_CRASH_DUMP=y
+CONFIG_CRASH_DUMP_BLOCKDEV=y
+# CONFIG_CRASH_DUMP_NETDEV is not set
+# CONFIG_CRASH_DUMP_MEMDEV is not set
+# CONFIG_CRASH_DUMP_COMPRESS_RLE is not set
+# CONFIG_CRASH_DUMP_COMPRESS_GZIP is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# Linux VServer
#
CONFIG_VSERVER_LEGACY=y
-# CONFIG_VSERVER_PROC_SECURE is not set
-# CONFIG_VSERVER_HARDCPU is not set
+# CONFIG_PROC_SECURE is not set
+CONFIG_VSERVER_HARDCPU=y
# CONFIG_INOXID_NONE is not set
-# CONFIG_INOXID_UID16 is not set
# CONFIG_INOXID_GID16 is not set
-CONFIG_INOXID_UGID24=y
-# CONFIG_INOXID_INTERN is not set
-# CONFIG_INOXID_RUNTIME is not set
-# CONFIG_VSERVER_DEBUG is not set
+CONFIG_INOXID_GID24=y
+# CONFIG_INOXID_GID32 is not set
+# CONFIG_INOXID_MAGIC is not set
#
# Security options
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
#
# General setup
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_MSI=y
+# CONFIG_PCI_USE_VECTOR is not set
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
#
# User Modules And Translation Layers
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
#
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
# CONFIG_NET_SCH_CBQ is not set
CONFIG_NET_SCH_HTB=m
# CONFIG_NET_SCH_HFSC is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
-# CONFIG_TUX is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# Ethernet (10 or 100Mbit)
#
# CONFIG_NET_ETHERNET is not set
+# CONFIG_NE2000 is not set
#
# Ethernet (1000 Mbit)
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# Linux VServer
#
CONFIG_VSERVER_LEGACY=y
-# CONFIG_VSERVER_PROC_SECURE is not set
+# CONFIG_PROC_SECURE is not set
CONFIG_VSERVER_HARDCPU=y
# CONFIG_INOXID_NONE is not set
-# CONFIG_INOXID_UID16 is not set
# CONFIG_INOXID_GID16 is not set
-CONFIG_INOXID_UGID24=y
-# CONFIG_INOXID_INTERN is not set
-# CONFIG_INOXID_RUNTIME is not set
-CONFIG_VSERVER_DEBUG=y
+CONFIG_INOXID_GID24=y
+# CONFIG_INOXID_GID32 is not set
+# CONFIG_INOXID_MAGIC is not set
#
# Security options
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
-CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
#
# User Modules And Translation Layers
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
+# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
CONFIG_NET_DIVERT=y
# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
# CONFIG_NET_HW_FLOWCONTROL is not set
#
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_CLS_IND=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_ACT is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_CLS_POLICE is not set
#
# Network testing
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
-# CONFIG_TUX is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
-CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_DEBUG_KERNEL=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_SLAB=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES_586=m
+CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
+++ /dev/null
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_X86=y
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_GENERIC_ISA_DMA=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-
-#
-# Class Based Kernel Resource Management
-#
-CONFIG_CKRM=y
-CONFIG_RCFS_FS=y
-CONFIG_CKRM_TYPE_TASKCLASS=y
-CONFIG_CKRM_RES_NUMTASKS=y
-CONFIG_CKRM_CPU_SCHEDULE=y
-CONFIG_CKRM_RES_BLKIO=y
-CONFIG_CKRM_RES_MEM=y
-# CONFIG_CKRM_MEM_LRUORDER_CHANGE is not set
-# CONFIG_CKRM_TYPE_SOCKETCLASS is not set
-CONFIG_CKRM_RBCE=y
-CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_HOTPLUG is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_OOM_PANIC=y
-# CONFIG_EMBEDDED is not set
-# CONFIG_DELAY_ACCT is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-CONFIG_KALLSYMS_EXTRA_PASS=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SIG is not set
-CONFIG_KMOD=y
-
-#
-# Processor type and features
-#
-CONFIG_X86_PC=y
-# CONFIG_X86_ELAN is not set
-# CONFIG_X86_VOYAGER is not set
-# CONFIG_X86_NUMAQ is not set
-# CONFIG_X86_SUMMIT is not set
-# CONFIG_X86_BIGSMP is not set
-# CONFIG_X86_VISWS is not set
-# CONFIG_X86_GENERICARCH is not set
-# CONFIG_X86_ES7000 is not set
-# CONFIG_M386 is not set
-# CONFIG_M486 is not set
-# CONFIG_M586 is not set
-# CONFIG_M586TSC is not set
-# CONFIG_M586MMX is not set
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-CONFIG_MPENTIUMIII=y
-# CONFIG_MPENTIUMM is not set
-# CONFIG_MPENTIUM4 is not set
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
-# CONFIG_MWINCHIP3D is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-CONFIG_X86_GENERIC=y
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_XADD=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_GOOD_APIC=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-CONFIG_X86_4G=y
-CONFIG_X86_SWITCH_PAGETABLES=y
-CONFIG_X86_4G_VM_LAYOUT=y
-CONFIG_X86_UACCESS_INDIRECT=y
-CONFIG_X86_HIGH_ENTRY=y
-CONFIG_HPET_TIMER=y
-CONFIG_HPET_EMULATE_RTC=y
-# CONFIG_SMP is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_PREEMPT_VOLUNTARY is not set
-# CONFIG_X86_UP_APIC is not set
-CONFIG_X86_TSC=y
-CONFIG_X86_MCE=y
-# CONFIG_X86_MCE_NONFATAL is not set
-CONFIG_TOSHIBA=m
-CONFIG_I8K=m
-CONFIG_MICROCODE=m
-CONFIG_X86_MSR=m
-CONFIG_X86_CPUID=m
-
-#
-# Firmware Drivers
-#
-CONFIG_EDD=m
-# CONFIG_NOHIGHMEM is not set
-CONFIG_HIGHMEM4G=y
-# CONFIG_HIGHMEM64G is not set
-CONFIG_HIGHMEM=y
-CONFIG_HIGHPTE=y
-# CONFIG_MATH_EMULATION is not set
-CONFIG_MTRR=y
-# CONFIG_EFI is not set
-CONFIG_REGPARM=y
-
-#
-# Power management options (ACPI, APM)
-#
-CONFIG_PM=y
-# CONFIG_SOFTWARE_SUSPEND is not set
-# CONFIG_PM_DISK is not set
-
-#
-# ACPI (Advanced Configuration and Power Interface) Support
-#
-CONFIG_ACPI=y
-CONFIG_ACPI_BOOT=y
-CONFIG_ACPI_INTERPRETER=y
-CONFIG_ACPI_SLEEP=y
-CONFIG_ACPI_SLEEP_PROC_FS=y
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
-CONFIG_ACPI_BUTTON=m
-CONFIG_ACPI_FAN=y
-CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_THERMAL=y
-CONFIG_ACPI_ASUS=m
-CONFIG_ACPI_TOSHIBA=m
-# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_BUS=y
-CONFIG_ACPI_EC=y
-CONFIG_ACPI_POWER=y
-CONFIG_ACPI_PCI=y
-CONFIG_ACPI_SYSTEM=y
-CONFIG_X86_PM_TIMER=y
-
-#
-# APM (Advanced Power Management) BIOS Support
-#
-CONFIG_APM=m
-# CONFIG_APM_IGNORE_USER_SUSPEND is not set
-# CONFIG_APM_DO_ENABLE is not set
-CONFIG_APM_CPU_IDLE=y
-# CONFIG_APM_DISPLAY_BLANK is not set
-CONFIG_APM_RTC_IS_GMT=y
-# CONFIG_APM_ALLOW_INTS is not set
-# CONFIG_APM_REAL_MODE_POWER_OFF is not set
-
-#
-# CPU Frequency scaling
-#
-CONFIG_CPU_FREQ=y
-# CONFIG_CPU_FREQ_PROC_INTF is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-# CONFIG_CPU_FREQ_24_API is not set
-CONFIG_CPU_FREQ_TABLE=y
-
-#
-# CPUFreq processor drivers
-#
-CONFIG_X86_ACPI_CPUFREQ=m
-# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
-CONFIG_X86_POWERNOW_K6=m
-CONFIG_X86_POWERNOW_K7=m
-CONFIG_X86_POWERNOW_K8=m
-# CONFIG_X86_GX_SUSPMOD is not set
-CONFIG_X86_SPEEDSTEP_CENTRINO=m
-CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
-CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI=y
-CONFIG_X86_SPEEDSTEP_ICH=m
-CONFIG_X86_SPEEDSTEP_SMI=m
-CONFIG_X86_P4_CLOCKMOD=m
-CONFIG_X86_SPEEDSTEP_LIB=m
-# CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK is not set
-CONFIG_X86_LONGRUN=m
-CONFIG_X86_LONGHAUL=m
-
-#
-# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-#
-CONFIG_PCI=y
-# CONFIG_PCI_GOBIOS is not set
-# CONFIG_PCI_GOMMCONFIG is not set
-# CONFIG_PCI_GODIRECT is not set
-CONFIG_PCI_GOANY=y
-CONFIG_PCI_BIOS=y
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_LEGACY_PROC=y
-# CONFIG_PCI_NAMES is not set
-CONFIG_ISA=y
-# CONFIG_EISA is not set
-# CONFIG_MCA is not set
-# CONFIG_SCx200 is not set
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-CONFIG_BINFMT_MISC=y
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-CONFIG_MTD=m
-# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CONCAT=m
-CONFIG_MTD_REDBOOT_PARTS=m
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-CONFIG_MTD_CMDLINE_PARTS=y
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-
-#
-# RAM/ROM/Flash chip drivers
-#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_AMDSTD_RETRY=3
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
-CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
-
-#
-# Mapping drivers for chip access
-#
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-# CONFIG_MTD_PHYSMAP is not set
-# CONFIG_MTD_PNC2000 is not set
-CONFIG_MTD_SC520CDP=m
-CONFIG_MTD_NETSC520=m
-CONFIG_MTD_SBC_GXX=m
-CONFIG_MTD_ELAN_104NC=m
-CONFIG_MTD_SCx200_DOCFLASH=m
-# CONFIG_MTD_AMD76XROM is not set
-# CONFIG_MTD_ICHXROM is not set
-CONFIG_MTD_SCB2_FLASH=m
-# CONFIG_MTD_NETtel is not set
-# CONFIG_MTD_DILNETPC is not set
-# CONFIG_MTD_L440GX is not set
-CONFIG_MTD_PCI=m
-
-#
-# Self-contained MTD device drivers
-#
-CONFIG_MTD_PMC551=m
-# CONFIG_MTD_PMC551_BUGFIX is not set
-# CONFIG_MTD_PMC551_DEBUG is not set
-# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-# CONFIG_MTD_BLKMTD is not set
-
-#
-# Disk-On-Chip Device Drivers
-#
-CONFIG_MTD_DOC2000=m
-# CONFIG_MTD_DOC2001 is not set
-CONFIG_MTD_DOC2001PLUS=m
-CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
-# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-CONFIG_MTD_DOCPROBE_ADDRESS=0
-
-#
-# NAND Flash Device Drivers
-#
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-CONFIG_MTD_NAND_IDS=m
-# CONFIG_MTD_NAND_DISKONCHIP is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
-
-#
-# Protocols
-#
-CONFIG_ISAPNP=y
-# CONFIG_PNPBIOS is not set
-
-#
-# Block devices
-#
-CONFIG_BLK_DEV_FD=m
-# CONFIG_BLK_DEV_XD is not set
-CONFIG_BLK_CPQ_DA=m
-CONFIG_BLK_CPQ_CISS_DA=m
-CONFIG_CISS_SCSI_TAPE=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_LBD=y
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-# CONFIG_BLK_DEV_HD_IDE is not set
-CONFIG_BLK_DEV_IDEDISK=y
-CONFIG_IDEDISK_MULTI_MODE=y
-CONFIG_BLK_DEV_IDECD=y
-# CONFIG_BLK_DEV_IDETAPE is not set
-CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=m
-# CONFIG_IDE_TASK_IOCTL is not set
-# CONFIG_IDE_TASKFILE_IO is not set
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=y
-# CONFIG_BLK_DEV_CMD640 is not set
-CONFIG_BLK_DEV_IDEPNP=y
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=y
-# CONFIG_BLK_DEV_OPTI621 is not set
-CONFIG_BLK_DEV_RZ1000=y
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-CONFIG_IDEDMA_PCI_AUTO=y
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_ADMA=y
-CONFIG_BLK_DEV_AEC62XX=y
-CONFIG_BLK_DEV_ALI15X3=y
-# CONFIG_WDC_ALI15X3 is not set
-CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_BLK_DEV_ATIIXP=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_TRIFLEX=y
-CONFIG_BLK_DEV_CY82C693=y
-CONFIG_BLK_DEV_CS5520=y
-CONFIG_BLK_DEV_CS5530=y
-CONFIG_BLK_DEV_HPT34X=y
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=y
-# CONFIG_BLK_DEV_SC1200 is not set
-CONFIG_BLK_DEV_PIIX=y
-# CONFIG_BLK_DEV_NS87415 is not set
-CONFIG_BLK_DEV_PDC202XX_OLD=y
-# CONFIG_PDC202XX_BURST is not set
-CONFIG_BLK_DEV_PDC202XX_NEW=y
-CONFIG_PDC202XX_FORCE=y
-CONFIG_BLK_DEV_SVWKS=y
-CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_SIS5513=y
-CONFIG_BLK_DEV_SLC90E66=y
-# CONFIG_BLK_DEV_TRM290 is not set
-CONFIG_BLK_DEV_VIA82CXXX=y
-# CONFIG_IDE_ARM is not set
-# CONFIG_IDE_CHIPSETS is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
-CONFIG_IDEDMA_AUTO=y
-# CONFIG_BLK_DEV_HD is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI=m
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-
-#
-# SCSI Transport Attributes
-#
-CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=m
-
-#
-# SCSI low-level drivers
-#
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-# CONFIG_SCSI_7000FASST is not set
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AHA152X=m
-CONFIG_SCSI_AHA1542=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC7XXX_OLD=m
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=4
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
-# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-CONFIG_AIC79XX_DEBUG_MASK=0
-# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-# CONFIG_SCSI_DPT_I2O is not set
-CONFIG_SCSI_IN2000=m
-CONFIG_SCSI_MEGARAID=m
-CONFIG_SCSI_SATA=y
-CONFIG_SCSI_SATA_SVW=m
-CONFIG_SCSI_ATA_PIIX=m
-CONFIG_SCSI_SATA_NV=m
-CONFIG_SCSI_SATA_PROMISE=m
-CONFIG_SCSI_SATA_SX4=m
-CONFIG_SCSI_SATA_SIL=m
-CONFIG_SCSI_SATA_SIS=m
-CONFIG_SCSI_SATA_VIA=m
-CONFIG_SCSI_SATA_VITESSE=m
-CONFIG_SCSI_BUSLOGIC=m
-# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_GDTH=m
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INIA100=m
-# CONFIG_SCSI_NCR53C406A is not set
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_PSI240I is not set
-CONFIG_SCSI_QLOGIC_FAS=m
-CONFIG_SCSI_QLOGIC_ISP=m
-# CONFIG_SCSI_QLOGIC_FC is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA2XXX=m
-CONFIG_SCSI_QLA21XX=m
-CONFIG_SCSI_QLA22XX=m
-CONFIG_SCSI_QLA2300=m
-CONFIG_SCSI_QLA2322=m
-CONFIG_SCSI_QLA6312=m
-CONFIG_SCSI_QLA6322=m
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC395x is not set
-CONFIG_SCSI_DC390T=m
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Old CD-ROM drivers (not SCSI, not IDE)
-#
-# CONFIG_CD_NO_IDESCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID5=m
-CONFIG_MD_RAID6=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_CRYPT is not set
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=m
-CONFIG_FUSION_MAX_SGE=40
-# CONFIG_FUSION_ISENSE is not set
-CONFIG_FUSION_CTL=m
-
-#
-# IEEE 1394 (FireWire) support
-#
-CONFIG_IEEE1394=m
-
-#
-# Subsystem Options
-#
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-CONFIG_IEEE1394_OUI_DB=y
-# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set
-
-#
-# Device Drivers
-#
-# CONFIG_IEEE1394_PCILYNX is not set
-CONFIG_IEEE1394_OHCI1394=m
-
-#
-# Protocol Drivers
-#
-# CONFIG_IEEE1394_VIDEO1394 is not set
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-# CONFIG_IEEE1394_ETH1394 is not set
-CONFIG_IEEE1394_DV1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_CMP=m
-CONFIG_IEEE1394_AMDTP=m
-
-#
-# I2O device support
-#
-CONFIG_I2O=m
-CONFIG_I2O_CONFIG=m
-CONFIG_I2O_BLOCK=m
-CONFIG_I2O_SCSI=m
-CONFIG_I2O_PROC=m
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_ACCEPT_QUEUES is not set
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
-CONFIG_ICMP_IPOD=y
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_LOCAL=y
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
-# CONFIG_IP_NF_COMPAT_IPFWADM is not set
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_REALM=m
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_HW_FLOWCONTROL is not set
-
-#
-# QoS and/or fair queueing
-#
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CLK_JIFFIES=y
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-# CONFIG_NET_SCH_CLK_CPU is not set
-# CONFIG_NET_SCH_CBQ is not set
-CONFIG_NET_SCH_HTB=m
-# CONFIG_NET_SCH_HFSC is not set
-# CONFIG_NET_SCH_PRIO is not set
-# CONFIG_NET_SCH_RED is not set
-# CONFIG_NET_SCH_SFQ is not set
-# CONFIG_NET_SCH_TEQL is not set
-# CONFIG_NET_SCH_TBF is not set
-# CONFIG_NET_SCH_GRED is not set
-# CONFIG_NET_SCH_DSMARK is not set
-# CONFIG_NET_SCH_NETEM is not set
-# CONFIG_NET_SCH_INGRESS is not set
-# CONFIG_NET_QOS is not set
-CONFIG_NET_CLS=y
-# CONFIG_NET_CLS_TCINDEX is not set
-# CONFIG_NET_CLS_ROUTE4 is not set
-CONFIG_NET_CLS_ROUTE=y
-CONFIG_NET_CLS_FW=m
-# CONFIG_NET_CLS_U32 is not set
-# CONFIG_NET_CLS_IND is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_TUX is not set
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
-# CONFIG_NET_SB1000 is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_EL1=m
-CONFIG_EL2=m
-CONFIG_ELPLUS=m
-CONFIG_EL16=m
-CONFIG_EL3=m
-CONFIG_3C515=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-CONFIG_LANCE=m
-CONFIG_NET_VENDOR_SMC=y
-CONFIG_WD80x3=m
-CONFIG_ULTRA=m
-CONFIG_SMC9194=m
-CONFIG_NET_VENDOR_RACAL=y
-# CONFIG_NI5010 is not set
-CONFIG_NI52=m
-CONFIG_NI65=m
-
-#
-# Tulip family network device support
-#
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=m
-# CONFIG_TULIP_MWI is not set
-CONFIG_TULIP_MMIO=y
-# CONFIG_TULIP_NAPI is not set
-CONFIG_DE4X5=m
-CONFIG_WINBOND_840=m
-CONFIG_DM9102=m
-# CONFIG_AT1700 is not set
-CONFIG_DEPCA=m
-CONFIG_HP100=m
-# CONFIG_NET_ISA is not set
-CONFIG_NE2000=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
-CONFIG_AMD8111_ETH=m
-CONFIG_AMD8111E_NAPI=y
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_ADAPTEC_STARFIRE_NAPI=y
-CONFIG_AC3200=m
-CONFIG_APRICOT=m
-CONFIG_B44=m
-CONFIG_FORCEDETH=m
-CONFIG_CS89x0=m
-CONFIG_DGRS=m
-CONFIG_EEPRO100=m
-# CONFIG_EEPRO100_PIO is not set
-CONFIG_E100=m
-CONFIG_E100_NAPI=y
-CONFIG_FEALNX=m
-CONFIG_NATSEMI=m
-CONFIG_NE2K_PCI=m
-CONFIG_8139CP=m
-CONFIG_8139TOO=m
-CONFIG_8139TOO_PIO=y
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-CONFIG_8139TOO_8129=y
-# CONFIG_8139_OLD_RX_RESET is not set
-CONFIG_SIS900=m
-CONFIG_EPIC100=m
-CONFIG_SUNDANCE=m
-# CONFIG_SUNDANCE_MMIO is not set
-CONFIG_TLAN=m
-CONFIG_VIA_RHINE=m
-CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=m
-CONFIG_NET_POCKET=y
-CONFIG_ATP=m
-CONFIG_DE600=m
-CONFIG_DE620=m
-
-#
-# Ethernet (1000 Mbit)
-#
-CONFIG_ACENIC=m
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-CONFIG_DL2K=m
-CONFIG_E1000=m
-CONFIG_E1000_NAPI=y
-CONFIG_NS83820=m
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_R8169=m
-CONFIG_SK98LIN=m
-CONFIG_TIGON3=m
-
-#
-# Ethernet (10000 Mbit)
-#
-CONFIG_IXGB=m
-CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PCIPS2 is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_INPORT is not set
-# CONFIG_MOUSE_LOGIBM is not set
-# CONFIG_MOUSE_PC110PAD is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_ACPI is not set
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_MULTIPORT=y
-CONFIG_SERIAL_8250_RSA=y
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-# CONFIG_CRASH is not set
-CONFIG_LEGACY_PTY_COUNT=256
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-CONFIG_IPMI_HANDLER=m
-# CONFIG_IPMI_PANIC_EVENT is not set
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-CONFIG_HW_RANDOM=m
-CONFIG_NVRAM=m
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-CONFIG_AGP=m
-CONFIG_AGP_ALI=m
-CONFIG_AGP_ATI=m
-CONFIG_AGP_AMD=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_INTEL_MCH=m
-CONFIG_AGP_NVIDIA=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_SWORKS=m
-CONFIG_AGP_VIA=m
-CONFIG_AGP_EFFICEON=m
-CONFIG_DRM=y
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_GAMMA=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_I810=m
-CONFIG_DRM_I830=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_SIS=m
-CONFIG_MWAVE=m
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_HPET is not set
-CONFIG_HANGCHECK_TIMER=m
-
-#
-# I2C support
-#
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCF=m
-
-#
-# I2C Hardware Bus support
-#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD8111=m
-# CONFIG_I2C_ELEKTOR is not set
-# CONFIG_I2C_I801 is not set
-CONFIG_I2C_I810=m
-CONFIG_I2C_ISA=m
-CONFIG_I2C_NFORCE2=m
-# CONFIG_I2C_PARPORT_LIGHT is not set
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_PROSAVAGE=m
-CONFIG_I2C_SAVAGE4=m
-# CONFIG_SCx200_ACB is not set
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-CONFIG_I2C_VOODOO3=m
-
-#
-# Hardware Sensors Chip support
-#
-CONFIG_I2C_SENSOR=m
-CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1031=m
-CONFIG_SENSORS_ASB100=m
-CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_FSCHER=m
-CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
-CONFIG_SENSORS_LM78=m
-CONFIG_SENSORS_LM80=m
-CONFIG_SENSORS_LM83=m
-CONFIG_SENSORS_LM85=m
-CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_VIA686A=m
-CONFIG_SENSORS_W83781D=m
-CONFIG_SENSORS_W83L785TS=m
-CONFIG_SENSORS_W83627HF=m
-
-#
-# Other I2C Chip support
-#
-CONFIG_SENSORS_EEPROM=m
-CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_RTC8564=m
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Misc devices
-#
-CONFIG_IBM_ASM=m
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-
-#
-# Video For Linux
-#
-
-#
-# Video Adapters
-#
-# CONFIG_VIDEO_BT848 is not set
-CONFIG_VIDEO_PMS=m
-CONFIG_VIDEO_CPIA=m
-# CONFIG_VIDEO_CPIA_USB is not set
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_TUNER_3036=m
-CONFIG_VIDEO_STRADIS=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DPC=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_OVCAMCHIP=m
-
-#
-# Radio Adapters
-#
-CONFIG_RADIO_CADET=m
-CONFIG_RADIO_RTRACK=m
-CONFIG_RADIO_RTRACK2=m
-CONFIG_RADIO_AZTECH=m
-CONFIG_RADIO_GEMTEK=m
-CONFIG_RADIO_GEMTEK_PCI=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_MAESTRO=m
-CONFIG_RADIO_SF16FMI=m
-CONFIG_RADIO_SF16FMR2=m
-CONFIG_RADIO_TERRATEC=m
-CONFIG_RADIO_TRUST=m
-CONFIG_RADIO_TYPHOON=m
-CONFIG_RADIO_TYPHOON_PROC_FS=y
-CONFIG_RADIO_ZOLTRIX=m
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_VIDEO_VIDEOBUF=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_BUF=m
-CONFIG_VIDEO_BTCX=m
-CONFIG_VIDEO_IR=m
-
-#
-# Graphics support
-#
-CONFIG_FB=y
-CONFIG_FB_CIRRUS=m
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-CONFIG_FB_VGA16=m
-CONFIG_FB_VESA=y
-CONFIG_VIDEO_SELECT=y
-CONFIG_FB_HGA=m
-CONFIG_FB_HGA_ACCEL=y
-CONFIG_FB_RIVA=m
-# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_I810=m
-CONFIG_FB_I810_GTF=y
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G450=y
-CONFIG_FB_MATROX_G100=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_MATROX_MULTIHEAD=y
-# CONFIG_FB_RADEON_OLD is not set
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-CONFIG_FB_ATY_GX=y
-# CONFIG_FB_ATY_XL_INIT is not set
-# CONFIG_FB_SIS is not set
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-CONFIG_FB_3DFX_ACCEL=y
-CONFIG_FB_VOODOO1=m
-CONFIG_FB_TRIDENT=m
-CONFIG_FB_TRIDENT_ACCEL=y
-# CONFIG_FB_VIRTUAL is not set
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-CONFIG_MDA_CONSOLE=m
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-
-#
-# Logo configuration
-#
-# CONFIG_LOGO is not set
-
-#
-# Sound
-#
-CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
-CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_RAWMIDI=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_OSSEMUL=y
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_RTCTIMER=m
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_OPL4_LIB=m
-CONFIG_SND_VX_LIB=m
-CONFIG_SND_DUMMY=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_MTPAV=m
-# CONFIG_SND_SERIAL_U16550 is not set
-CONFIG_SND_MPU401=m
-
-#
-# ISA devices
-#
-CONFIG_SND_AD1816A=m
-CONFIG_SND_AD1848=m
-CONFIG_SND_CS4231=m
-CONFIG_SND_CS4232=m
-CONFIG_SND_CS4236=m
-CONFIG_SND_ES968=m
-CONFIG_SND_ES1688=m
-CONFIG_SND_ES18XX=m
-CONFIG_SND_GUSCLASSIC=m
-CONFIG_SND_GUSEXTREME=m
-CONFIG_SND_GUSMAX=m
-CONFIG_SND_INTERWAVE=m
-CONFIG_SND_INTERWAVE_STB=m
-CONFIG_SND_OPTI92X_AD1848=m
-CONFIG_SND_OPTI92X_CS4231=m
-CONFIG_SND_OPTI93X=m
-CONFIG_SND_SB8=m
-CONFIG_SND_SB16=m
-CONFIG_SND_SBAWE=m
-CONFIG_SND_SB16_CSP=y
-# CONFIG_SND_WAVEFRONT is not set
-CONFIG_SND_ALS100=m
-CONFIG_SND_AZT2320=m
-CONFIG_SND_CMI8330=m
-CONFIG_SND_DT019X=m
-CONFIG_SND_OPL3SA2=m
-CONFIG_SND_SGALAXY=m
-CONFIG_SND_SSCAPE=m
-
-#
-# PCI devices
-#
-CONFIG_SND_AC97_CODEC=m
-CONFIG_SND_ALI5451=m
-CONFIG_SND_ATIIXP=m
-CONFIG_SND_AU8810=m
-CONFIG_SND_AU8820=m
-CONFIG_SND_AU8830=m
-CONFIG_SND_AZT3328=m
-CONFIG_SND_BT87X=m
-CONFIG_SND_CS46XX=m
-CONFIG_SND_CS46XX_NEW_DSP=y
-CONFIG_SND_CS4281=m
-CONFIG_SND_EMU10K1=m
-CONFIG_SND_KORG1212=m
-CONFIG_SND_MIXART=m
-CONFIG_SND_NM256=m
-CONFIG_SND_RME32=m
-CONFIG_SND_RME96=m
-CONFIG_SND_RME9652=m
-CONFIG_SND_HDSP=m
-CONFIG_SND_TRIDENT=m
-CONFIG_SND_YMFPCI=m
-CONFIG_SND_ALS4000=m
-CONFIG_SND_CMIPCI=m
-CONFIG_SND_ENS1370=m
-CONFIG_SND_ENS1371=m
-CONFIG_SND_ES1938=m
-CONFIG_SND_ES1968=m
-CONFIG_SND_MAESTRO3=m
-CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X=m
-CONFIG_SND_ICE1712=m
-CONFIG_SND_ICE1724=m
-CONFIG_SND_INTEL8X0=m
-CONFIG_SND_INTEL8X0M=m
-CONFIG_SND_SONICVIBES=m
-CONFIG_SND_VIA82XX=m
-CONFIG_SND_VX222=m
-
-#
-# ALSA USB devices
-#
-# CONFIG_SND_USB_AUDIO is not set
-
-#
-# Open Sound System
-#
-# CONFIG_SOUND_PRIME is not set
-
-#
-# USB support
-#
-CONFIG_USB=m
-# CONFIG_USB_DEBUG is not set
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_BANDWIDTH is not set
-# CONFIG_USB_DYNAMIC_MINORS is not set
-
-#
-# USB Host Controller Drivers
-#
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_SPLIT_ISO=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_UHCI_HCD=m
-
-#
-# USB Device Class drivers
-#
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
-# CONFIG_USB_MIDI is not set
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_PRINTER is not set
-CONFIG_USB_STORAGE=m
-# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_RW_DETECT=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_HP8200e=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-
-#
-# USB Human Interface Devices (HID)
-#
-# CONFIG_USB_HID is not set
-
-#
-# USB HID Boot Protocol drivers
-#
-# CONFIG_USB_KBD is not set
-# CONFIG_USB_MOUSE is not set
-# CONFIG_USB_AIPTEK is not set
-# CONFIG_USB_WACOM is not set
-# CONFIG_USB_KBTAB is not set
-# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_EGALAX is not set
-# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
-
-#
-# USB Imaging devices
-#
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
-# CONFIG_USB_HPUSBSCSI is not set
-
-#
-# USB Multimedia devices
-#
-# CONFIG_USB_DABUSB is not set
-# CONFIG_USB_VICAM is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_USB_IBMCAM is not set
-# CONFIG_USB_KONICAWC is not set
-# CONFIG_USB_OV511 is not set
-# CONFIG_USB_PWC is not set
-# CONFIG_USB_SE401 is not set
-# CONFIG_USB_SN9C102 is not set
-# CONFIG_USB_STV680 is not set
-# CONFIG_USB_W9968CF is not set
-
-#
-# USB Network adaptors
-#
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-
-#
-# USB Host-to-Host Cables
-#
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_BELKIN=y
-CONFIG_USB_GENESYS=y
-CONFIG_USB_NET1080=y
-CONFIG_USB_PL2301=y
-
-#
-# Intelligent USB Devices/Gadgets
-#
-CONFIG_USB_ARMLINUX=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_ZAURUS=y
-CONFIG_USB_CDCETHER=y
-
-#
-# USB Network Adapters
-#
-CONFIG_USB_AX8817X=y
-
-#
-# USB port drivers
-#
-
-#
-# USB Serial Converter support
-#
-# CONFIG_USB_SERIAL is not set
-
-#
-# USB Miscellaneous drivers
-#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
-# CONFIG_USB_TIGL is not set
-# CONFIG_USB_AUERSWALD is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_LEGOTOWER is not set
-# CONFIG_USB_LCD is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETSERVO is not set
-# CONFIG_USB_TEST is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_QUOTA=y
-# CONFIG_QFMT_V1 is not set
-CONFIG_QFMT_V2=y
-CONFIG_QUOTACTL=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_ZISOFS_FS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS_XATTR=y
-CONFIG_DEVPTS_FS_SECURITY=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_RAMFS=y
-# CONFIG_RELAYFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_EXPORTFS is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=m
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-CONFIG_NLS_UTF8=m
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-CONFIG_CRASH_DUMP=y
-CONFIG_CRASH_DUMP_BLOCKDEV=y
-# CONFIG_CRASH_DUMP_NETDEV is not set
-# CONFIG_CRASH_DUMP_MEMDEV is not set
-# CONFIG_CRASH_DUMP_COMPRESS_RLE is not set
-# CONFIG_CRASH_DUMP_COMPRESS_GZIP is not set
-CONFIG_DEBUG_KERNEL=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
-# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_FRAME_POINTER is not set
-
-#
-# Linux VServer
-#
-CONFIG_VSERVER_LEGACY=y
-# CONFIG_VSERVER_PROC_SECURE is not set
-# CONFIG_VSERVER_HARDCPU is not set
-# CONFIG_INOXID_NONE is not set
-# CONFIG_INOXID_UID16 is not set
-# CONFIG_INOXID_GID16 is not set
-CONFIG_INOXID_UGID24=y
-# CONFIG_INOXID_INTERN is not set
-# CONFIG_INOXID_RUNTIME is not set
-CONFIG_VSERVER_DEBUG=y
-
-#
-# Security options
-#
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
-
-#
-# Library routines
-#
-CONFIG_CRC_CCITT=m
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=y
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_PC=y
config CRYPTO_AES
tristate "AES cipher algorithms"
- depends on CRYPTO && !(X86 && !X86_64)
+ depends on CRYPTO
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
See http://csrc.nist.gov/CryptoToolkit/aes/ for more information.
-config CRYPTO_AES_586
- tristate "AES cipher algorithms (i586)"
- depends on CRYPTO && (X86 && !X86_64)
- help
- AES cipher algorithms (FIPS-197). AES uses the Rijndael
- algorithm.
-
- Rijndael appears to be consistently a very good performer in
- both hardware and software across a wide range of computing
- environments regardless of its use in feedback or non-feedback
- modes. Its key setup time is excellent, and its key agility is
- good. Rijndael's very low memory requirements make it very well
- suited for restricted-space environments, in which it also
- demonstrates excellent performance. Rijndael's operations are
- among the easiest to defend against power and timing attacks.
-
- The AES specifies three key sizes: 128, 192 and 256 bits
-
- See http://csrc.nist.gov/encryption/aes/ for more information.
-
config CRYPTO_CAST5
tristate "CAST5 (CAST-128) cipher algorithm"
depends on CRYPTO
WEP, but it should not be for other purposes because of the
weakness of the algorithm.
-config CRYPTO_KHAZAD
- tristate "Khazad cipher algorithm"
- depends on CRYPTO
- help
- Khazad cipher algorithm.
-
- Khazad was a finalist in the initial NESSIE competition. It is
- an algorithm optimized for 64-bit processors with good performance
- on 32-bit processors. Khazad uses an 128 bit key size.
-
- See also:
- http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html
-
config CRYPTO_DEFLATE
tristate "Deflate compression algorithm"
depends on CRYPTO
obj-$(CONFIG_CRYPTO_CAST6) += cast6.o
obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
obj-$(CONFIG_CRYPTO_TEA) += tea.o
-obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
{
struct scatter_walk walk_in, walk_out;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- u8 tmp_src[bsize];
- u8 tmp_dst[bsize];
+ u8 tmp_src[nbytes > src->length ? bsize : 0];
+ u8 tmp_dst[nbytes > dst->length ? bsize : 0];
if (!nbytes)
return 0;
+++ /dev/null
-/*
- * Cryptographic API.
- *
- * Khazad Algorithm
- *
- * The Khazad algorithm was developed by Paulo S. L. M. Barreto and
- * Vincent Rijmen. It was a finalist in the NESSIE encryption contest.
- *
- * The original authors have disclaimed all copyright interest in this
- * code and thus put it in the public domain. The subsequent authors
- * have put this under the GNU General Public License.
- *
- * By Aaron Grothe ajgrothe@yahoo.com, August 1, 2004
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-#include <linux/crypto.h>
-
-#define KHAZAD_KEY_SIZE 16
-#define KHAZAD_BLOCK_SIZE 8
-#define KHAZAD_ROUNDS 8
-
-struct khazad_ctx {
- u64 E[KHAZAD_ROUNDS + 1];
- u64 D[KHAZAD_ROUNDS + 1];
-};
-
-static const u64 T0[256] = {
- 0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL,
- 0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL,
- 0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL,
- 0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL,
- 0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL,
- 0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL,
- 0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL,
- 0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL,
- 0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL,
- 0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL,
- 0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL,
- 0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL,
- 0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL,
- 0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL,
- 0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL,
- 0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL,
- 0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL,
- 0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL,
- 0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL,
- 0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL,
- 0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL,
- 0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL,
- 0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL,
- 0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL,
- 0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL,
- 0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL,
- 0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL,
- 0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL,
- 0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL,
- 0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL,
- 0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL,
- 0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL,
- 0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL,
- 0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL,
- 0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL,
- 0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL,
- 0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL,
- 0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL,
- 0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL,
- 0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL,
- 0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL,
- 0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL,
- 0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL,
- 0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL,
- 0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL,
- 0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL,
- 0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL,
- 0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL,
- 0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL,
- 0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL,
- 0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL,
- 0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL,
- 0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL,
- 0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL,
- 0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL,
- 0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL,
- 0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL,
- 0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL,
- 0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL,
- 0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL,
- 0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL,
- 0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL,
- 0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL,
- 0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL,
- 0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL,
- 0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL,
- 0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL,
- 0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL,
- 0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL,
- 0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL,
- 0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL,
- 0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL,
- 0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL,
- 0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL,
- 0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL,
- 0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL,
- 0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL,
- 0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL,
- 0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL,
- 0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL,
- 0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL,
- 0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL,
- 0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL,
- 0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL,
- 0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL,
- 0x42c61557912aecd3ULL
-};
-
-static const u64 T1[256] = {
- 0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL,
- 0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL,
- 0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL,
- 0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL,
- 0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL,
- 0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL,
- 0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL,
- 0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL,
- 0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL,
- 0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL,
- 0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL,
- 0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL,
- 0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL,
- 0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL,
- 0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL,
- 0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL,
- 0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL,
- 0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL,
- 0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL,
- 0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL,
- 0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL,
- 0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL,
- 0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL,
- 0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL,
- 0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL,
- 0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL,
- 0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL,
- 0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL,
- 0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL,
- 0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL,
- 0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL,
- 0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL,
- 0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL,
- 0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL,
- 0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL,
- 0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL,
- 0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL,
- 0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL,
- 0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL,
- 0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL,
- 0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL,
- 0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL,
- 0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL,
- 0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL,
- 0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL,
- 0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL,
- 0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL,
- 0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL,
- 0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL,
- 0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL,
- 0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL,
- 0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL,
- 0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL,
- 0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL,
- 0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL,
- 0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL,
- 0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL,
- 0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL,
- 0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL,
- 0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL,
- 0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL,
- 0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL,
- 0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL,
- 0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL,
- 0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL,
- 0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL,
- 0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL,
- 0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL,
- 0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL,
- 0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL,
- 0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL,
- 0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL,
- 0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL,
- 0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL,
- 0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL,
- 0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL,
- 0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL,
- 0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL,
- 0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL,
- 0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL,
- 0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL,
- 0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL,
- 0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL,
- 0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL,
- 0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL,
- 0xc64257152a91d3ecULL
-};
-
-static const u64 T2[256] = {
- 0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL,
- 0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL,
- 0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL,
- 0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL,
- 0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL,
- 0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL,
- 0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL,
- 0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL,
- 0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL,
- 0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL,
- 0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL,
- 0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL,
- 0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL,
- 0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL,
- 0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL,
- 0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL,
- 0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL,
- 0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL,
- 0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL,
- 0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL,
- 0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL,
- 0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL,
- 0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL,
- 0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL,
- 0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL,
- 0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL,
- 0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL,
- 0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL,
- 0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL,
- 0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL,
- 0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL,
- 0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL,
- 0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL,
- 0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL,
- 0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL,
- 0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL,
- 0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL,
- 0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL,
- 0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL,
- 0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL,
- 0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL,
- 0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL,
- 0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL,
- 0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL,
- 0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL,
- 0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL,
- 0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL,
- 0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL,
- 0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL,
- 0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL,
- 0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL,
- 0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL,
- 0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL,
- 0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL,
- 0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL,
- 0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL,
- 0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL,
- 0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL,
- 0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL,
- 0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL,
- 0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL,
- 0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL,
- 0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL,
- 0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL,
- 0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL,
- 0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL,
- 0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL,
- 0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL,
- 0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL,
- 0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL,
- 0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL,
- 0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL,
- 0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL,
- 0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL,
- 0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL,
- 0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL,
- 0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL,
- 0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL,
- 0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL,
- 0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL,
- 0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL,
- 0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL,
- 0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL,
- 0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL,
- 0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL,
- 0x155742c6ecd3912aULL
-};
-
-static const u64 T3[256] = {
- 0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL,
- 0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL,
- 0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL,
- 0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL,
- 0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL,
- 0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL,
- 0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL,
- 0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL,
- 0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL,
- 0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL,
- 0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL,
- 0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL,
- 0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL,
- 0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL,
- 0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL,
- 0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL,
- 0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL,
- 0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL,
- 0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL,
- 0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL,
- 0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL,
- 0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL,
- 0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL,
- 0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL,
- 0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL,
- 0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL,
- 0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL,
- 0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL,
- 0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL,
- 0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL,
- 0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL,
- 0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL,
- 0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL,
- 0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL,
- 0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL,
- 0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL,
- 0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL,
- 0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL,
- 0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL,
- 0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL,
- 0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL,
- 0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL,
- 0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL,
- 0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL,
- 0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL,
- 0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL,
- 0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL,
- 0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL,
- 0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL,
- 0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL,
- 0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL,
- 0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL,
- 0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL,
- 0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL,
- 0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL,
- 0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL,
- 0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL,
- 0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL,
- 0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL,
- 0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL,
- 0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL,
- 0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL,
- 0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL,
- 0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL,
- 0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL,
- 0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL,
- 0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL,
- 0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL,
- 0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL,
- 0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL,
- 0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL,
- 0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL,
- 0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL,
- 0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL,
- 0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL,
- 0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL,
- 0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL,
- 0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL,
- 0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL,
- 0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL,
- 0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL,
- 0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL,
- 0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL,
- 0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL,
- 0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL,
- 0x5715c642d3ec2a91ULL
-};
-
-static const u64 T4[256] = {
- 0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL,
- 0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL,
- 0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL,
- 0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL,
- 0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL,
- 0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL,
- 0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL,
- 0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL,
- 0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL,
- 0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL,
- 0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL,
- 0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL,
- 0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL,
- 0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL,
- 0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL,
- 0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL,
- 0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL,
- 0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL,
- 0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL,
- 0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL,
- 0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL,
- 0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL,
- 0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL,
- 0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL,
- 0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL,
- 0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL,
- 0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL,
- 0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL,
- 0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL,
- 0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL,
- 0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL,
- 0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL,
- 0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL,
- 0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL,
- 0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL,
- 0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL,
- 0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL,
- 0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL,
- 0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL,
- 0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL,
- 0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL,
- 0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL,
- 0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL,
- 0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL,
- 0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL,
- 0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL,
- 0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL,
- 0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL,
- 0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL,
- 0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL,
- 0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL,
- 0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL,
- 0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL,
- 0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL,
- 0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL,
- 0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL,
- 0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL,
- 0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL,
- 0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL,
- 0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL,
- 0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL,
- 0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL,
- 0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL,
- 0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL,
- 0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL,
- 0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL,
- 0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL,
- 0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL,
- 0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL,
- 0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL,
- 0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL,
- 0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL,
- 0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL,
- 0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL,
- 0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL,
- 0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL,
- 0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL,
- 0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL,
- 0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL,
- 0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL,
- 0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL,
- 0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL,
- 0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL,
- 0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL,
- 0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL,
- 0x912aecd342c61557ULL
-};
-
-static const u64 T5[256] = {
- 0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL,
- 0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL,
- 0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL,
- 0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL,
- 0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL,
- 0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL,
- 0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL,
- 0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL,
- 0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL,
- 0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL,
- 0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL,
- 0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL,
- 0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL,
- 0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL,
- 0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL,
- 0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL,
- 0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL,
- 0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL,
- 0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL,
- 0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL,
- 0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL,
- 0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL,
- 0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL,
- 0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL,
- 0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL,
- 0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL,
- 0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL,
- 0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL,
- 0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL,
- 0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL,
- 0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL,
- 0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL,
- 0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL,
- 0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL,
- 0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL,
- 0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL,
- 0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL,
- 0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL,
- 0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL,
- 0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL,
- 0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL,
- 0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL,
- 0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL,
- 0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL,
- 0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL,
- 0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL,
- 0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL,
- 0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL,
- 0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL,
- 0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL,
- 0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL,
- 0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL,
- 0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL,
- 0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL,
- 0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL,
- 0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL,
- 0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL,
- 0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL,
- 0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL,
- 0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL,
- 0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL,
- 0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL,
- 0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL,
- 0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL,
- 0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL,
- 0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL,
- 0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL,
- 0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL,
- 0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL,
- 0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL,
- 0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL,
- 0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL,
- 0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL,
- 0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL,
- 0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL,
- 0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL,
- 0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL,
- 0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL,
- 0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL,
- 0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL,
- 0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL,
- 0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL,
- 0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL,
- 0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL,
- 0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL,
- 0x2a91d3ecc6425715ULL
-};
-
-static const u64 T6[256] = {
- 0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL,
- 0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL,
- 0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL,
- 0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL,
- 0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL,
- 0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL,
- 0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL,
- 0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL,
- 0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL,
- 0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL,
- 0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL,
- 0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL,
- 0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL,
- 0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL,
- 0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL,
- 0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL,
- 0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL,
- 0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL,
- 0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL,
- 0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL,
- 0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL,
- 0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL,
- 0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL,
- 0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL,
- 0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL,
- 0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL,
- 0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL,
- 0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL,
- 0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL,
- 0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL,
- 0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL,
- 0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL,
- 0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL,
- 0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL,
- 0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL,
- 0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL,
- 0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL,
- 0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL,
- 0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL,
- 0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL,
- 0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL,
- 0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL,
- 0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL,
- 0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL,
- 0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL,
- 0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL,
- 0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL,
- 0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL,
- 0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL,
- 0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL,
- 0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL,
- 0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL,
- 0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL,
- 0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL,
- 0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL,
- 0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL,
- 0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL,
- 0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL,
- 0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL,
- 0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL,
- 0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL,
- 0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL,
- 0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL,
- 0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL,
- 0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL,
- 0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL,
- 0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL,
- 0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL,
- 0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL,
- 0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL,
- 0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL,
- 0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL,
- 0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL,
- 0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL,
- 0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL,
- 0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL,
- 0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL,
- 0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL,
- 0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL,
- 0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL,
- 0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL,
- 0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL,
- 0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL,
- 0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL,
- 0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL,
- 0xecd3912a155742c6ULL
-};
-
-static const u64 T7[256] = {
- 0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL,
- 0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL,
- 0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL,
- 0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL,
- 0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL,
- 0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL,
- 0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL,
- 0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL,
- 0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL,
- 0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL,
- 0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL,
- 0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL,
- 0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL,
- 0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL,
- 0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL,
- 0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL,
- 0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL,
- 0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL,
- 0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL,
- 0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL,
- 0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL,
- 0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL,
- 0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL,
- 0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL,
- 0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL,
- 0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL,
- 0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL,
- 0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL,
- 0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL,
- 0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL,
- 0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL,
- 0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL,
- 0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL,
- 0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL,
- 0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL,
- 0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL,
- 0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL,
- 0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL,
- 0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL,
- 0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL,
- 0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL,
- 0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL,
- 0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL,
- 0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL,
- 0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL,
- 0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL,
- 0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL,
- 0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL,
- 0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL,
- 0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL,
- 0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL,
- 0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL,
- 0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL,
- 0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL,
- 0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL,
- 0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL,
- 0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL,
- 0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL,
- 0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL,
- 0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL,
- 0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL,
- 0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL,
- 0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL,
- 0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL,
- 0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL,
- 0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL,
- 0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL,
- 0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL,
- 0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL,
- 0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL,
- 0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL,
- 0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL,
- 0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL,
- 0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL,
- 0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL,
- 0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL,
- 0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL,
- 0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL,
- 0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL,
- 0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL,
- 0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL,
- 0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL,
- 0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL,
- 0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL,
- 0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL,
- 0xd3ec2a915715c642ULL
-};
-
-static const u64 c[KHAZAD_ROUNDS + 1] = {
- 0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL,
- 0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL,
- 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL
-};
-
-static int khazad_setkey(void *ctx_arg, const u8 *in_key,
- unsigned int key_len, u32 *flags)
-{
-
- struct khazad_ctx *ctx = ctx_arg;
- int r;
- const u64 *S = T7;
- u64 K2, K1;
-
- if (key_len != 16)
- {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
- K2 = ((u64)in_key[ 0] << 56) ^
- ((u64)in_key[ 1] << 48) ^
- ((u64)in_key[ 2] << 40) ^
- ((u64)in_key[ 3] << 32) ^
- ((u64)in_key[ 4] << 24) ^
- ((u64)in_key[ 5] << 16) ^
- ((u64)in_key[ 6] << 8) ^
- ((u64)in_key[ 7] );
- K1 = ((u64)in_key[ 8] << 56) ^
- ((u64)in_key[ 9] << 48) ^
- ((u64)in_key[10] << 40) ^
- ((u64)in_key[11] << 32) ^
- ((u64)in_key[12] << 24) ^
- ((u64)in_key[13] << 16) ^
- ((u64)in_key[14] << 8) ^
- ((u64)in_key[15] );
-
- /* setup the encrypt key */
- for (r = 0; r <= KHAZAD_ROUNDS; r++) {
- ctx->E[r] = T0[(int)(K1 >> 56) ] ^
- T1[(int)(K1 >> 48) & 0xff] ^
- T2[(int)(K1 >> 40) & 0xff] ^
- T3[(int)(K1 >> 32) & 0xff] ^
- T4[(int)(K1 >> 24) & 0xff] ^
- T5[(int)(K1 >> 16) & 0xff] ^
- T6[(int)(K1 >> 8) & 0xff] ^
- T7[(int)(K1 ) & 0xff] ^
- c[r] ^ K2;
- K2 = K1;
- K1 = ctx->E[r];
- }
- /* Setup the decrypt key */
- ctx->D[0] = ctx->E[KHAZAD_ROUNDS];
- for (r = 1; r < KHAZAD_ROUNDS; r++) {
- K1 = ctx->E[KHAZAD_ROUNDS - r];
- ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^
- T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^
- T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^
- T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^
- T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^
- T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^
- T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^
- T7[(int)S[(int)(K1 ) & 0xff] & 0xff];
- }
- ctx->D[KHAZAD_ROUNDS] = ctx->E[0];
-
- return 0;
-
-}
-
-static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
- u8 *ciphertext, const u8 *plaintext)
-{
-
- int r;
- u64 state;
-
- state = ((u64)plaintext[0] << 56) ^
- ((u64)plaintext[1] << 48) ^
- ((u64)plaintext[2] << 40) ^
- ((u64)plaintext[3] << 32) ^
- ((u64)plaintext[4] << 24) ^
- ((u64)plaintext[5] << 16) ^
- ((u64)plaintext[6] << 8) ^
- ((u64)plaintext[7] ) ^
- roundKey[0];
-
- for (r = 1; r < KHAZAD_ROUNDS; r++) {
- state = T0[(int)(state >> 56) ] ^
- T1[(int)(state >> 48) & 0xff] ^
- T2[(int)(state >> 40) & 0xff] ^
- T3[(int)(state >> 32) & 0xff] ^
- T4[(int)(state >> 24) & 0xff] ^
- T5[(int)(state >> 16) & 0xff] ^
- T6[(int)(state >> 8) & 0xff] ^
- T7[(int)(state ) & 0xff] ^
- roundKey[r];
- }
-
- state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^
- (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^
- (T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^
- (T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^
- (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^
- (T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^
- (T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^
- (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
- roundKey[KHAZAD_ROUNDS];
-
- ciphertext[0] = (u8)(state >> 56);
- ciphertext[1] = (u8)(state >> 48);
- ciphertext[2] = (u8)(state >> 40);
- ciphertext[3] = (u8)(state >> 32);
- ciphertext[4] = (u8)(state >> 24);
- ciphertext[5] = (u8)(state >> 16);
- ciphertext[6] = (u8)(state >> 8);
- ciphertext[7] = (u8)(state );
-
-}
-
-static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
- struct khazad_ctx *ctx = ctx_arg;
- khazad_crypt(ctx->E, dst, src);
-}
-
-static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
-{
- struct khazad_ctx *ctx = ctx_arg;
- khazad_crypt(ctx->D, dst, src);
-}
-
-static struct crypto_alg khazad_alg = {
- .cra_name = "khazad",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = KHAZAD_BLOCK_SIZE,
- .cra_ctxsize = sizeof (struct khazad_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list),
- .cra_u = { .cipher = {
- .cia_min_keysize = KHAZAD_KEY_SIZE,
- .cia_max_keysize = KHAZAD_KEY_SIZE,
- .cia_setkey = khazad_setkey,
- .cia_encrypt = khazad_encrypt,
- .cia_decrypt = khazad_decrypt } }
-};
-
-static int __init init(void)
-{
- int ret = 0;
-
- ret = crypto_register_alg(&khazad_alg);
- return ret;
-}
-
-static void __exit fini(void)
-{
- crypto_unregister_alg(&khazad_alg);
-}
-
-
-module_init(init);
-module_exit(fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
{
printk("- Added public key %X%X\n", pk->keyid[0], pk->keyid[1]);
-// if (pk->expiredate && pk->expiredate < xtime.tv_sec)
-// printk(" - public key has expired\n");
+ if (pk->expiredate && pk->expiredate < xtime.tv_sec)
+ printk(" - public key has expired\n");
if (pk->timestamp > xtime.tv_sec )
printk(" - key was been created %lu seconds in future\n",
struct ksign_public_key *pk;
uint8_t sha1[SHA1_DIGEST_SIZE];
MPI result = NULL;
- int rc = 0;
+ int rc = 0, i;
pk = ksign_get_public_key(sig->keyid);
if (!pk) {
test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS);
test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS);
- //KHAZAD
- test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS);
- test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS);
-
test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
test_deflate();
test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS);
break;
- case 21:
- test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS);
- test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS);
- break;
-
#ifdef CONFIG_CRYPTO_HMAC
case 100:
test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS);
}
};
-/*
- * KHAZAD test vectors.
- */
-#define KHAZAD_ENC_TEST_VECTORS 5
-#define KHAZAD_DEC_TEST_VECTORS 5
-struct cipher_testvec khazad_enc_tv_template[] = {
- {
- .key = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .klen = 16,
- .input = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .ilen = 8,
- .result = { 0x49, 0xa4, 0xce, 0x32, 0xac, 0x19, 0x0e, 0x3f },
- .rlen = 8,
- }, {
- .key = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
- 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .klen = 16,
- .input = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .ilen = 8,
- .result = { 0x7e, 0x82, 0x12, 0xa1, 0Xd9, 0X5b, 0Xe4, 0Xf9 },
- .rlen = 8,
- }, {
- .key = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2,
- 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .klen = 16,
- .input = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .ilen = 8,
- .result = { 0Xaa, 0Xbe, 0Xc1, 0X95, 0Xc5, 0X94, 0X1a, 0X9c },
- .rlen = 8,
- }, {
- .key = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .ilen = 8,
- .result = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .rlen = 8,
- }, {
- .key = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f ,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .ilen = 16,
- .result = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 ,
- 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .rlen = 16,
- },
-};
-
-struct cipher_testvec khazad_dec_tv_template[] = {
- {
- .key = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .klen = 16,
- .input = { 0X49, 0Xa4, 0Xce, 0X32, 0Xac, 0X19, 0X0e, 0X3f },
- .ilen = 8,
- .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .rlen = 8,
- }, {
- .key = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
- 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .klen = 16,
- .input = { 0X7e, 0X82, 0X12, 0Xa1, 0Xd9, 0X5b, 0Xe4, 0Xf9 },
- .ilen = 8,
- .result = { 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38 },
- .rlen = 8,
- }, {
- .key = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2,
- 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .klen = 16,
- .input = { 0Xaa, 0Xbe, 0Xc1, 0X95, 0Xc5, 0X94, 0X1a, 0X9c },
- .ilen = 8,
- .result = { 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2, 0Xa2 },
- .rlen = 8,
- }, {
- .key = { 0x2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .ilen = 8,
- .result = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .rlen = 8,
- }, {
- .key = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .klen = 16,
- .input = { 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 ,
- 0X04, 0X74, 0Xf5, 0X70, 0X50, 0X16, 0Xd3, 0Xb8 },
- .ilen = 16,
- .result = { 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f ,
- 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f, 0X2f },
- .rlen = 16,
- },
-};
/*
* Compression stuff.
acpi_set_register(ACPI_BITREG_RT_CLOCK_ENABLE, 1, ACPI_MTX_LOCK);
- *ppos += count;
+ file->f_pos += count;
result = 0;
end:
{
acpi_status status = AE_OK;
struct acpi_buffer dsdt = {ACPI_ALLOCATE_BUFFER, NULL};
- ssize_t res;
+ void *data = NULL;
+ size_t size = 0;
ACPI_FUNCTION_TRACE("acpi_system_read_dsdt");
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
- res = simple_read_from_buffer(buffer, count, ppos,
- dsdt.pointer, dsdt.length);
+ if (*ppos < dsdt.length) {
+ data = dsdt.pointer + file->f_pos;
+ size = dsdt.length - file->f_pos;
+ if (size > count)
+ size = count;
+ if (copy_to_user(buffer, data, size)) {
+ acpi_os_free(dsdt.pointer);
+ return_VALUE(-EFAULT);
+ }
+ }
+
acpi_os_free(dsdt.pointer);
- return_VALUE(res);
+ *ppos += size;
+
+ return_VALUE(size);
}
{
acpi_status status = AE_OK;
struct acpi_buffer fadt = {ACPI_ALLOCATE_BUFFER, NULL};
- ssize_t res;
+ void *data = NULL;
+ size_t size = 0;
ACPI_FUNCTION_TRACE("acpi_system_read_fadt");
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
- res = simple_read_from_buffer(buffer, count, ppos,
- fadt.pointer, fadt.length);
+ if (*ppos < fadt.length) {
+ data = fadt.pointer + file->f_pos;
+ size = fadt.length - file->f_pos;
+ if (size > count)
+ size = count;
+ if (copy_to_user(buffer, data, size)) {
+ acpi_os_free(fadt.pointer);
+ return_VALUE(-EFAULT);
+ }
+ }
+
acpi_os_free(fadt.pointer);
- return_VALUE(res);
+ *ppos += size;
+
+ return_VALUE(size);
}
if (alignment <= 0x10) {
t = kmalloc (size, flags);
- if ((unsigned long)t & (alignment-1)) {
+ if ((unsigned int)t & (alignment-1)) {
printk ("Kmalloc doesn't align things correctly! %p\n", t);
kfree (t);
return aligned_kmalloc (size, flags, alignment * 4);
#endif
-static const struct atmdev_ops fore200e_ops;
-static const struct fore200e_bus fore200e_bus[];
+extern const struct atmdev_ops fore200e_ops;
+extern const struct fore200e_bus fore200e_bus[];
static struct fore200e* fore200e_boards = NULL;
#define IF_IADBG_SUNI_STAT 0x02000000 // suni statistics
#define IF_IADBG_RESET 0x04000000
+extern unsigned int IADebugFlag;
+
#define IF_IADBG(f) if (IADebugFlag & (f))
#ifdef CONFIG_ATM_IA_DEBUG /* Debug build */
menu "Generic Driver Options"
-config STANDALONE
- bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
- default y
- help
- Select this option if you don't have magic firmware for drivers that
- need it.
-
- If unsure, say Y.
-
config PREVENT_FIRMWARE_BUILD
bool "Prevent firmware from being built"
default y
Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
floppy controller on the Macintosh IIfx and Quadra 900/950.
-config MAC_FLOPPY
- tristate "Support for PowerMac floppy"
- depends on PPC_PMAC && !PPC_PMAC64
- help
- If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
- floppy controller, say Y here. Most commonly found in PowerMacs.
-
config BLK_DEV_PS2
tristate "PS/2 ESDI hard disk support"
depends on MCA && MCA_LEGACY
# kblockd threads
#
-obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o ckrm-iostub.o
+obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
-obj-$(CONFIG_CKRM_RES_BLKIO) += ckrm-io.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_BLK_DEV_FD98) += floppy98.o
static void fd_deselect( void );
static void fd_motor_off_timer( unsigned long dummy );
static void check_change( unsigned long dummy );
+static __inline__ void set_head_settle_flag( void );
+static __inline__ int get_head_settle_flag( void );
static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
static void fd_times_out( unsigned long dummy );
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
+static __inline__ void copy_buffer( void *from, void *to);
static void setup_req_params( int drive );
static void redo_fd_request( void);
static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int
static struct timer_list fd_timer =
TIMER_INITIALIZER(check_change, 0, 0);
-static inline void start_motor_off_timer(void)
+static inline void
+start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
MotorOffTrys = 0;
}
-static inline void start_check_change_timer( void )
+static inline void
+start_check_change_timer( void )
{
mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
}
-static inline void start_timeout(void)
+static inline void
+start_timeout(void)
{
mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT);
}
-static inline void stop_timeout(void)
+static inline void
+stop_timeout(void)
{
del_timer(&timeout_timer);
}
* seek operation, because we don't use seeks with verify.
*/
-static inline void set_head_settle_flag(void)
+static __inline__ void set_head_settle_flag( void )
{
HeadSettleFlag = FDCCMDADD_E;
}
-static inline int get_head_settle_flag(void)
+static __inline__ int get_head_settle_flag( void )
{
int tmp = HeadSettleFlag;
HeadSettleFlag = 0;
return( tmp );
}
-static inline void copy_buffer(void *from, void *to)
-{
- ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
- int cnt;
-
- for (cnt = 512/4; cnt; cnt--)
- *p2++ = *p1++;
-}
-
return 0;
}
+static __inline__ void copy_buffer(void *from, void *to)
+{
+ ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
+ int cnt;
+
+ for( cnt = 512/4; cnt; cnt-- )
+ *p2++ = *p1++;
+}
+
/* This sets up the global variables describing the current request. */
#include <linux/init.h>
#include <linux/hdreg.h>
#include <linux/spinlock.h>
-#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/io.h>
int cciss_ioctl32_passthru(unsigned int fd, unsigned cmd, unsigned long arg,
struct file *file)
{
- IOCTL32_Command_struct __user *arg32 =
- (IOCTL32_Command_struct __user *) arg;
+ IOCTL32_Command_struct *arg32 =
+ (IOCTL32_Command_struct *) arg;
IOCTL_Command_struct arg64;
- IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ mm_segment_t old_fs;
int err;
- u32 cp;
+ unsigned long cp;
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
+ arg64.buf = (BYTE *)cp;
if (err)
return -EFAULT;
- err = sys_ioctl(fd, CCISS_PASSTHRU, (unsigned long) p);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, CCISS_PASSTHRU, (unsigned long) &arg64);
+ set_fs(old_fs);
if (err)
return err;
- err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(&arg32->error_info));
+ err |= copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(&arg32->error_info));
if (err)
return -EFAULT;
return err;
}
-
int cciss_ioctl32_big_passthru(unsigned int fd, unsigned cmd, unsigned long arg,
struct file *file)
{
- BIG_IOCTL32_Command_struct __user *arg32 =
- (BIG_IOCTL32_Command_struct __user *) arg;
+ BIG_IOCTL32_Command_struct *arg32 =
+ (BIG_IOCTL32_Command_struct *) arg;
BIG_IOCTL_Command_struct arg64;
- BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ mm_segment_t old_fs;
int err;
- u32 cp;
+ unsigned long cp;
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(arg64.malloc_size, &arg32->malloc_size);
err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
+ arg64.buf = (BYTE *)cp;
if (err)
return -EFAULT;
- err = sys_ioctl(fd, CCISS_BIG_PASSTHRU, (unsigned long) p);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, CCISS_BIG_PASSTHRU, (unsigned long) &arg64);
+ set_fs(old_fs);
if (err)
return err;
- err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(&arg32->error_info));
+ err |= copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(&arg32->error_info));
if (err)
return -EFAULT;
return err;
* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
- * IO priorities are supported, from 0% to 100% in 5% increments. Both of
- * those values have special meaning - 0% class is allowed to do io if
- * noone else wants to use the disk. 100% is considered real-time io, and
- * always get priority. Default process io rate is 95%. In absence of other
- * io, a class may consume 100% disk bandwidth regardless. Withing a class,
- * bandwidth is distributed equally among the citizens.
- *
- * TODO:
- * - cfq_select_requests() needs some work for 5-95% io
- * - barriers not supported
- * - export grace periods in ms, not jiffies
- *
* Copyright (C) 2003 Jens Axboe <axboe@suse.de>
*/
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/mempool.h>
-#include <asm/div64.h>
-
-#if IOPRIO_NR > BITS_PER_LONG
-#error Cannot support this many io priority levels
-#endif
-
-#define LIMIT_DEBUG 1
/*
* tunables
*/
-static int cfq_quantum = 6;
-static int cfq_quantum_io = 256;
-static int cfq_idle_quantum = 1;
-static int cfq_idle_quantum_io = 64;
-static int cfq_queued = 4;
-static int cfq_grace_rt = HZ / 100 ?: 1;
-static int cfq_grace_idle = HZ / 10;
+static int cfq_quantum = 4;
+static int cfq_queued = 8;
#define CFQ_QHASH_SHIFT 6
#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
+#define list_entry_qhash(entry) list_entry((entry), struct cfq_queue, cfq_hash)
#define CFQ_MHASH_SHIFT 8
#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
#define CFQ_MHASH_FN(sec) (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
+#define ON_MHASH(crq) !list_empty(&(crq)->hash)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
+#define list_entry_hash(ptr) list_entry((ptr), struct cfq_rq, hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
-#define list_entry_prio(ptr) list_entry((ptr), struct cfq_rq, prio_list)
-
-#define cfq_account_io(crq) \
- ((crq)->ioprio != IOPRIO_IDLE && (crq)->ioprio != IOPRIO_RT)
-
-/* define to be 50 ms for now; make tunable later */
-#define CFQ_EPOCH 50000
-/* Needs to be made tunable right away, in MiB/s */
-#define CFQ_DISKBW 10
-/* Temporary global limit, as percent of available b/w, for each "class" */
-#define CFQ_TEMPLIM 10
-
-/*
- * defines how we distribute bandwidth (can be tgid, uid, etc)
- */
-
-/* FIXME: change hash_key to be sizeof(void *) rather than sizeof(int)
- * otherwise the cast of cki_tsk_icls will not work reliably on 64-bit arches.
- * OR, change cki_tsk_icls to return ints (will need another id space to be
- * managed)
- */
-
-#if defined(CONFIG_CKRM_RES_BLKIO) || defined(CONFIG_CKRM_RES_BLKIO_MODULE)
-extern inline void *cki_hash_key(struct task_struct *tsk);
-extern inline int cki_ioprio(struct task_struct *tsk);
-#define cfq_hash_key(current) ((int)cki_hash_key((current)))
-#define cfq_ioprio(current) (cki_ioprio((current)))
-
-#else
-#define cfq_hash_key(current) ((current)->tgid)
-
-/*
- * move to io_context
- */
-#define cfq_ioprio(current) ((current)->ioprio)
-#endif
-#define CFQ_WAIT_RT 0
-#define CFQ_WAIT_NORM 1
+#define RQ_DATA(rq) ((struct cfq_rq *) (rq)->elevator_private)
static kmem_cache_t *crq_pool;
static kmem_cache_t *cfq_pool;
static mempool_t *cfq_mpool;
-/*
- * defines an io priority level
- */
-struct io_prio_data {
- struct list_head rr_list;
- int busy_queues;
- int busy_rq;
- unsigned long busy_sectors;
-
- /* requests, sectors and queues
- * added(in),dispatched/deleted(out)
- * at this priority level.
- */
- atomic_t cum_rq_in,cum_rq_out;
- atomic_t cum_sectors_in,cum_sectors_out;
- atomic_t cum_queues_in,cum_queues_out;
-
-#ifdef LIMIT_DEBUG
- int nskip;
- unsigned long navsec;
- unsigned long csectorate;
- unsigned long lsectorate;
-#endif
-
- struct list_head prio_list;
- int last_rq;
- int last_sectors;
-};
-
-/*
- * per-request queue structure
- */
struct cfq_data {
struct list_head rr_list;
struct list_head *dispatch;
- struct hlist_head *cfq_hash;
- struct hlist_head *crq_hash;
- mempool_t *crq_pool;
+ struct list_head *cfq_hash;
- struct io_prio_data cid[IOPRIO_NR];
+ struct list_head *crq_hash;
- /*
- * total number of busy queues and requests
- */
- int busy_rq;
- int busy_queues;
- unsigned long busy_sectors;
+ unsigned int busy_queues;
+ unsigned int max_queued;
+ mempool_t *crq_pool;
request_queue_t *queue;
- unsigned long rq_starved_mask;
-
- /*
- * grace period handling
- */
- struct timer_list timer;
- unsigned long wait_end;
- unsigned long flags;
- struct work_struct work;
/*
* tunables
*/
unsigned int cfq_quantum;
- unsigned int cfq_quantum_io;
- unsigned int cfq_idle_quantum;
- unsigned int cfq_idle_quantum_io;
unsigned int cfq_queued;
- unsigned int cfq_grace_rt;
- unsigned int cfq_grace_idle;
-
- unsigned long cfq_epoch; /* duration for limit enforcement */
- unsigned long cfq_epochsectors; /* max sectors dispatchable/epoch */
};
-/*
- * per-class structure
- */
struct cfq_queue {
+ struct list_head cfq_hash;
struct list_head cfq_list;
- struct hlist_node cfq_hash;
- int hash_key;
struct rb_root sort_list;
+ int pid;
int queued[2];
- int ioprio;
-
- unsigned long avsec; /* avg sectors dispatched/epoch */
- unsigned long long lastime; /* timestamp of last request served */
- unsigned long sectorate; /* limit for sectors served/epoch */
- int skipped; /* queue skipped at last dispatch ? */
+#if 0
+ /*
+ * with a simple addition like this, we can do io priorities. almost.
+ * does need a split request free list, too.
+ */
+ int io_prio
+#endif
};
-/*
- * per-request structure
- */
struct cfq_rq {
- struct cfq_queue *cfq_queue;
struct rb_node rb_node;
- struct hlist_node hash;
sector_t rb_key;
struct request *request;
- struct list_head prio_list;
- unsigned long nr_sectors;
- int ioprio;
+
+ struct cfq_queue *cfq_queue;
+
+ struct list_head hash;
};
static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
/*
* lots of deadline iosched dupes, can be abstracted later...
*/
+static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
+{
+ list_del_init(&crq->hash);
+}
+
static inline void cfq_del_crq_hash(struct cfq_rq *crq)
{
- hlist_del_init(&crq->hash);
+ if (ON_MHASH(crq))
+ __cfq_del_crq_hash(crq);
}
-static inline void
-cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
+static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
{
cfq_del_crq_hash(crq);
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
struct request *rq = crq->request;
- const int hash_idx = CFQ_MHASH_FN(rq_hash_key(rq));
- BUG_ON(!hlist_unhashed(&crq->hash));
-
- hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
+ BUG_ON(ON_MHASH(crq));
+
+ list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
}
static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
{
- struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
- struct hlist_node *entry, *next;
+ struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+ struct list_head *entry, *next = hash_list->next;
- hlist_for_each_safe(entry, next, hash_list) {
+ while ((entry = next) != hash_list) {
struct cfq_rq *crq = list_entry_hash(entry);
struct request *__rq = crq->request;
- BUG_ON(hlist_unhashed(&crq->hash));
+ next = entry->next;
+
+ BUG_ON(!ON_MHASH(crq));
if (!rq_mergeable(__rq)) {
- cfq_del_crq_hash(crq);
+ __cfq_del_crq_hash(crq);
continue;
}
/*
* rb tree support functions
*/
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
+#define RB_NONE (2)
+#define RB_EMPTY(node) ((node)->rb_node == NULL)
+#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
+#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
+#define ON_RB(node) ((node)->rb_color != RB_NONE)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
-static void
-cfq_del_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
+static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
- if (crq->cfq_queue) {
- crq->cfq_queue = NULL;
-
- if (cfq_account_io(crq)) {
- cfqd->busy_rq--;
- cfqd->busy_sectors -= crq->nr_sectors;
- cfqd->cid[crq->ioprio].busy_rq--;
- cfqd->cid[crq->ioprio].busy_sectors -= crq->nr_sectors;
- }
- atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_out));
- atomic_add(crq->nr_sectors,
- &(cfqd->cid[crq->ioprio].cum_sectors_out));
+ if (ON_RB(&crq->rb_node)) {
cfqq->queued[rq_data_dir(crq->request)]--;
rb_erase(&crq->rb_node, &cfqq->sort_list);
+ crq->cfq_queue = NULL;
}
}
struct request *rq = crq->request;
struct cfq_rq *__alias;
-
+ crq->rb_key = rq_rb_key(rq);
cfqq->queued[rq_data_dir(rq)]++;
- if (cfq_account_io(crq)) {
- cfqd->busy_rq++;
- cfqd->busy_sectors += crq->nr_sectors;
- cfqd->cid[crq->ioprio].busy_rq++;
- cfqd->cid[crq->ioprio].busy_sectors += crq->nr_sectors;
- }
- atomic_inc(&(cfqd->cid[crq->ioprio].cum_rq_in));
- atomic_add(crq->nr_sectors,
- &(cfqd->cid[crq->ioprio].cum_sectors_in));
retry:
__alias = __cfq_add_crq_rb(cfqq, crq);
if (!__alias) {
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
- crq->rb_key = rq_rb_key(rq);
crq->cfq_queue = cfqq;
return;
}
static struct request *
cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
{
- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
+ struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
struct rb_node *n;
if (!cfqq)
static void cfq_remove_request(request_queue_t *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
if (crq) {
+ struct cfq_queue *cfqq = crq->cfq_queue;
cfq_remove_merge_hints(q, crq);
- list_del_init(&crq->prio_list);
list_del_init(&rq->queuelist);
- /*
- * set a grace period timer to allow realtime io to make real
- * progress, if we release an rt request. for normal request,
- * set timer so idle io doesn't interfere with other io
- */
- if (crq->ioprio == IOPRIO_RT) {
- set_bit(CFQ_WAIT_RT, &cfqd->flags);
- cfqd->wait_end = jiffies + cfqd->cfq_grace_rt;
- } else if (crq->ioprio != IOPRIO_IDLE) {
- set_bit(CFQ_WAIT_NORM, &cfqd->flags);
- cfqd->wait_end = jiffies + cfqd->cfq_grace_idle;
- }
-
- if (crq->cfq_queue) {
- struct cfq_queue *cfqq = crq->cfq_queue;
-
- cfq_del_crq_rb(cfqd, cfqq, crq);
+ if (cfqq) {
+ cfq_del_crq_rb(cfqq, crq);
if (RB_EMPTY(&cfqq->sort_list))
cfq_put_queue(cfqd, cfqq);
static void cfq_merged_request(request_queue_t *q, struct request *req)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(req);
- int tmp;
+ struct cfq_rq *crq = RQ_DATA(req);
cfq_del_crq_hash(crq);
cfq_add_crq_hash(cfqd, crq);
- if (crq->cfq_queue && (rq_rb_key(req) != crq->rb_key)) {
+ if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
struct cfq_queue *cfqq = crq->cfq_queue;
- cfq_del_crq_rb(cfqd, cfqq, crq);
+ cfq_del_crq_rb(cfqq, crq);
cfq_add_crq_rb(cfqd, cfqq, crq);
}
- tmp = req->hard_nr_sectors - crq->nr_sectors;
- cfqd->busy_sectors += tmp;
- cfqd->cid[crq->ioprio].busy_sectors += tmp;
- atomic_add(tmp,&(cfqd->cid[crq->ioprio].cum_sectors_in));
-
- crq->nr_sectors = req->hard_nr_sectors;
-
q->last_merge = req;
}
cfq_remove_request(q, next);
}
-/*
- * sort into dispatch list, in optimal ascending order
- */
static void
cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq)
struct list_head *head = cfqd->dispatch, *entry = head;
struct request *__rq;
- cfq_del_crq_rb(cfqd, cfqq, crq);
+ cfq_del_crq_rb(cfqq, crq);
cfq_remove_merge_hints(cfqd->queue, crq);
if (!list_empty(head)) {
list_add_tail(&crq->request->queuelist, entry);
}
-/*
- * remove from io scheduler core and put on dispatch list for service
- */
-static inline int
+static inline void
__cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
- struct cfq_rq *crq;
- unsigned long long ts, gap;
- unsigned long newavsec;
-
- crq = rb_entry_crq(rb_first(&cfqq->sort_list));
-
-#if 1
- /* Determine if queue should be skipped for being overshare */
- ts = sched_clock();
- gap = ts - cfqq->lastime;
-#ifdef LIMIT_DEBUG
- cfqq->sectorate = (cfqd->cfq_epochsectors
- * CFQ_TEMPLIM)/100;
-
-#endif
- if ((gap >= cfqd->cfq_epoch) || (gap < 0)) {
- cfqq->avsec = crq->nr_sectors ;
- cfqq->lastime = ts;
- } else {
- u64 tmp;
- /* Age old average and accumalate request to be served */
-
-// tmp = (u64) (cfqq->avsec * gap) ;
-// do_div(tmp, cfqd->cfq_epoch);
- newavsec = (unsigned long)(cfqq->avsec >> 1) + crq->nr_sectors;
-// if (crq->ioprio >= 0 && crq->ioprio <= 20)
-// cfqd->cid[crq->ioprio].lsectorate = newavsec;
-// atomic_set(&(cfqd->cid[crq->ioprio].lsectorate),
-// newavsec);
-
- if ((newavsec < cfqq->sectorate) || cfqq->skipped) {
- cfqq->avsec = newavsec ;
- cfqq->lastime = ts;
- cfqq->skipped = 0;
- } else {
- /* queue over share ; skip once */
- cfqq->skipped = 1;
-#ifdef LIMIT_DEBUG
-// atomic_inc(&(cfqd->cid[crq->ioprio].nskip));
-// if (crq->ioprio >= 0 && crq->ioprio <= 20)
-// cfqd->cid[crq->ioprio].nskip++;
-#endif
- return 0;
- }
- }
-#endif
-
-#ifdef LIMIT_DEBUG
-// if (crq->ioprio >= 0 && crq->ioprio <= 20) {
-// cfqd->cid[crq->ioprio].navsec = cfqq->avsec;
-// cfqd->cid[crq->ioprio].csectorate = cfqq->sectorate;
-// }
+ struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
-// atomic_set(&(cfqd->cid[crq->ioprio].navsec),cfqq->avsec);
-// atomic_set(&(cfqd->cid[crq->ioprio].csectorate),cfqq->sectorate);
-#endif
cfq_dispatch_sort(cfqd, cfqq, crq);
-
- /*
- * technically, for IOPRIO_RT we don't need to add it to the list.
- */
- list_add_tail(&crq->prio_list, &cfqd->cid[cfqq->ioprio].prio_list);
- return crq->nr_sectors;
}
-static int
-cfq_dispatch_requests(request_queue_t *q, int prio, int max_rq, int max_sectors)
+static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
- struct list_head *plist = &cfqd->cid[prio].rr_list;
- struct list_head *entry, *nxt;
- int q_rq, q_io;
- int ret ;
+ struct cfq_queue *cfqq;
+ struct list_head *entry, *tmp;
+ int ret, queued, good_queues;
- /*
- * for each queue at this prio level, dispatch a request
- */
- q_rq = q_io = 0;
- list_for_each_safe(entry, nxt, plist) {
- struct cfq_queue *cfqq = list_entry_cfqq(entry);
+ if (list_empty(&cfqd->rr_list))
+ return 0;
+
+ queued = ret = 0;
+restart:
+ good_queues = 0;
+ list_for_each_safe(entry, tmp, &cfqd->rr_list) {
+ cfqq = list_entry_cfqq(cfqd->rr_list.next);
BUG_ON(RB_EMPTY(&cfqq->sort_list));
- ret = __cfq_dispatch_requests(q, cfqd, cfqq);
- if (ret <= 0) {
- continue; /* skip queue */
- /* can optimize more by moving q to end of plist ? */
- }
- q_io += ret ;
- q_rq++ ;
+ __cfq_dispatch_requests(q, cfqd, cfqq);
if (RB_EMPTY(&cfqq->sort_list))
cfq_put_queue(cfqd, cfqq);
- /*
- * if we hit the queue limit, put the string of serviced
- * queues at the back of the pending list
- */
- if (q_io >= max_sectors || q_rq >= max_rq) {
- struct list_head *prv = nxt->prev;
-
- if (prv != plist) {
- list_del(plist);
- list_add(plist, prv);
- }
- break;
- }
- }
-
- cfqd->cid[prio].last_rq = q_rq;
- cfqd->cid[prio].last_sectors = q_io;
- return q_rq;
-}
-
-/*
- * try to move some requests to the dispatch list. return 0 on success
- */
-static int cfq_select_requests(request_queue_t *q, struct cfq_data *cfqd)
-{
- int queued, busy_rq, busy_sectors, i;
-
- /*
- * if there's any realtime io, only schedule that
- */
- if (cfq_dispatch_requests(q, IOPRIO_RT, cfqd->cfq_quantum, cfqd->cfq_quantum_io))
- return 1;
-
- /*
- * if RT io was last serviced and grace time hasn't expired,
- * arm the timer to restart queueing if no other RT io has been
- * submitted in the mean time
- */
- if (test_bit(CFQ_WAIT_RT, &cfqd->flags)) {
- if (time_before(jiffies, cfqd->wait_end)) {
- mod_timer(&cfqd->timer, cfqd->wait_end);
- return 0;
- }
- clear_bit(CFQ_WAIT_RT, &cfqd->flags);
- }
-
- /*
- * for each priority level, calculate number of requests we
- * are allowed to put into service.
- */
- queued = 0;
- busy_rq = cfqd->busy_rq;
- busy_sectors = cfqd->busy_sectors;
- for (i = IOPRIO_RT - 1; i > IOPRIO_IDLE; i--) {
- const int o_rq = busy_rq - cfqd->cid[i].busy_rq;
- const int o_sectors = busy_sectors - cfqd->cid[i].busy_sectors;
- int q_rq = cfqd->cfq_quantum * (i + 1) / IOPRIO_NR;
- int q_io = cfqd->cfq_quantum_io * (i + 1) / IOPRIO_NR;
-
- /*
- * no need to keep iterating the list, if there are no
- * requests pending anymore
- */
- if (!cfqd->busy_rq)
- break;
-
- /*
- * find out how many requests and sectors we are allowed to
- * service
- */
- if (o_rq)
- q_rq = o_sectors * (i + 1) / IOPRIO_NR;
- if (q_rq > cfqd->cfq_quantum)
- q_rq = cfqd->cfq_quantum;
-
- if (o_sectors)
- q_io = o_sectors * (i + 1) / IOPRIO_NR;
- if (q_io > cfqd->cfq_quantum_io)
- q_io = cfqd->cfq_quantum_io;
-
- /*
- * average with last dispatched for fairness
- */
- if (cfqd->cid[i].last_rq != -1)
- q_rq = (cfqd->cid[i].last_rq + q_rq) / 2;
- if (cfqd->cid[i].last_sectors != -1)
- q_io = (cfqd->cid[i].last_sectors + q_io) / 2;
-
- queued += cfq_dispatch_requests(q, i, q_rq, q_io);
- }
-
- if (queued)
- return 1;
+ else
+ good_queues++;
- /*
- * only allow dispatch of idle io, if the queue has been idle from
- * servicing RT or normal io for the grace period
- */
- if (test_bit(CFQ_WAIT_NORM, &cfqd->flags)) {
- if (time_before(jiffies, cfqd->wait_end)) {
- mod_timer(&cfqd->timer, cfqd->wait_end);
- return 0;
- }
- clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
+ queued++;
+ ret = 1;
}
- /*
- * if we found nothing to do, allow idle io to be serviced
- */
- if (cfq_dispatch_requests(q, IOPRIO_IDLE, cfqd->cfq_idle_quantum, cfqd->cfq_idle_quantum_io))
- return 1;
+ if ((queued < cfqd->cfq_quantum) && good_queues)
+ goto restart;
- return 0;
+ return ret;
}
static struct request *cfq_next_request(request_queue_t *q)
if (!list_empty(cfqd->dispatch)) {
struct cfq_rq *crq;
dispatch:
- /*
- * end grace period, we are servicing a request
- */
- del_timer(&cfqd->timer);
- clear_bit(CFQ_WAIT_RT, &cfqd->flags);
- clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
-
- BUG_ON(list_empty(cfqd->dispatch));
rq = list_entry_rq(cfqd->dispatch->next);
- BUG_ON(q->last_merge == rq);
- crq = RQ_ELV_DATA(rq);
- if (crq) {
- BUG_ON(!hlist_unhashed(&crq->hash));
- list_del_init(&crq->prio_list);
- }
+ crq = RQ_DATA(rq);
+ if (crq)
+ cfq_remove_merge_hints(q, crq);
return rq;
}
- /*
- * we moved requests to dispatch list, go back end serve one
- */
- if (cfq_select_requests(q, cfqd))
+ if (cfq_dispatch_requests(q, cfqd))
goto dispatch;
return NULL;
}
static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
{
- struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
- struct hlist_node *entry;
+ struct list_head *hash_list = &cfqd->cfq_hash[hashval];
+ struct list_head *entry;
- hlist_for_each(entry, hash_list) {
+ list_for_each(entry, hash_list) {
struct cfq_queue *__cfqq = list_entry_qhash(entry);
- if (__cfqq->hash_key == hashkey)
+ if (__cfqq->pid == pid)
return __cfqq;
}
return NULL;
}
-
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int hashkey)
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
{
- const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
+ const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
- return __cfq_find_cfq_hash(cfqd, hashkey, hashval);
+ return __cfq_find_cfq_hash(cfqd, pid, hashval);
}
static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfqd->busy_queues--;
- WARN_ON(cfqd->busy_queues < 0);
-
- cfqd->cid[cfqq->ioprio].busy_queues--;
- WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues < 0);
- atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
-
list_del(&cfqq->cfq_list);
- hlist_del(&cfqq->cfq_hash);
+ list_del(&cfqq->cfq_hash);
mempool_free(cfqq, cfq_mpool);
}
-static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int hashkey,
+static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid,
int gfp_mask)
{
- const int hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
+ const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
struct cfq_queue *cfqq, *new_cfqq = NULL;
request_queue_t *q = cfqd->queue;
retry:
- cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
+ cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
if (!cfqq) {
if (new_cfqq) {
} else
return NULL;
- memset(cfqq, 0, sizeof(*cfqq));
- INIT_HLIST_NODE(&cfqq->cfq_hash);
+ INIT_LIST_HEAD(&cfqq->cfq_hash);
INIT_LIST_HEAD(&cfqq->cfq_list);
- cfqq->hash_key = cfq_hash_key(current);
- cfqq->ioprio = cfq_ioprio(current);
- cfqq->avsec = 0 ;
- cfqq->lastime = sched_clock();
- cfqq->sectorate = (cfqd->cfq_epochsectors * CFQ_TEMPLIM)/100;
- hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+ RB_CLEAR_ROOT(&cfqq->sort_list);
+
+ cfqq->pid = pid;
+ cfqq->queued[0] = cfqq->queued[1] = 0;
+ list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
}
if (new_cfqq)
return cfqq;
}
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int hashkey,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
int gfp_mask)
{
request_queue_t *q = cfqd->queue;
struct cfq_queue *cfqq;
spin_lock_irq(q->queue_lock);
- cfqq = __cfq_get_queue(cfqd, hashkey, gfp_mask);
+ cfqq = __cfq_get_queue(cfqd, pid, gfp_mask);
spin_unlock_irq(q->queue_lock);
return cfqq;
}
-static void
-__cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
+static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
{
- const int prio = crq->ioprio;
struct cfq_queue *cfqq;
- cfqq = __cfq_get_queue(cfqd, cfq_hash_key(current), GFP_ATOMIC);
+ cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC);
if (cfqq) {
-
- /*
- * not too good...
- */
- if (prio > cfqq->ioprio) {
- printk("prio hash collision %d %d\n",
- prio, cfqq->ioprio);
- if (!list_empty(&cfqq->cfq_list)) {
- cfqd->cid[cfqq->ioprio].busy_queues--;
- WARN_ON(cfqd->cid[cfqq->ioprio].busy_queues<0);
- atomic_inc(&(cfqd->cid[cfqq->ioprio].cum_queues_out));
- cfqd->cid[prio].busy_queues++;
- atomic_inc(&(cfqd->cid[prio].cum_queues_in));
- list_move_tail(&cfqq->cfq_list,
- &cfqd->cid[prio].rr_list);
- }
- cfqq->ioprio = prio;
- }
-
cfq_add_crq_rb(cfqd, cfqq, crq);
if (list_empty(&cfqq->cfq_list)) {
- list_add_tail(&cfqq->cfq_list,
- &cfqd->cid[prio].rr_list);
- cfqd->cid[prio].busy_queues++;
- atomic_inc(&(cfqd->cid[prio].cum_queues_in));
+ list_add(&cfqq->cfq_list, &cfqd->rr_list);
cfqd->busy_queues++;
}
-
- if (rq_mergeable(crq->request)) {
- cfq_add_crq_hash(cfqd, crq);
-
- if (!q->last_merge)
- q->last_merge = crq->request;
- }
-
} else {
/*
* should can only happen if the request wasn't allocated
}
}
-static void cfq_reenqueue(request_queue_t *q, struct cfq_data *cfqd, int prio)
-{
- struct list_head *prio_list = &cfqd->cid[prio].prio_list;
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, prio_list) {
- struct cfq_rq *crq = list_entry_prio(entry);
-
- list_del_init(entry);
- list_del_init(&crq->request->queuelist);
- __cfq_enqueue(q, cfqd, crq);
- }
-}
-
-static void
-cfq_enqueue(request_queue_t *q, struct cfq_data *cfqd, struct cfq_rq *crq)
-{
- const int prio = cfq_ioprio(current);
-
- crq->ioprio = prio;
- crq->nr_sectors = crq->request->hard_nr_sectors;
- __cfq_enqueue(q, cfqd, crq);
-
- if (prio == IOPRIO_RT) {
- int i;
-
- /*
- * realtime io gets priority, move all other io back
- */
- for (i = IOPRIO_IDLE; i < IOPRIO_RT; i++)
- cfq_reenqueue(q, cfqd, i);
- } else if (prio != IOPRIO_IDLE) {
- /*
- * check if we need to move idle io back into queue
- */
- cfq_reenqueue(q, cfqd, IOPRIO_IDLE);
- }
-}
-
static void
cfq_insert_request(request_queue_t *q, struct request *rq, int where)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
switch (where) {
case ELEVATOR_INSERT_BACK:
-#if 0
while (cfq_dispatch_requests(q, cfqd))
;
-#endif
list_add_tail(&rq->queuelist, cfqd->dispatch);
break;
case ELEVATOR_INSERT_FRONT:
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
- cfq_enqueue(q, cfqd, crq);
+ cfq_enqueue(cfqd, crq);
break;
default:
- printk("%s: bad insert point %d\n",
- __FUNCTION__,where);
+ printk("%s: bad insert point %d\n", __FUNCTION__,where);
return;
}
+
+ if (rq_mergeable(rq)) {
+ cfq_add_crq_hash(cfqd, crq);
+
+ if (!q->last_merge)
+ q->last_merge = rq;
+ }
}
static int cfq_queue_empty(request_queue_t *q)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- if (list_empty(cfqd->dispatch) && !cfqd->busy_queues)
+ if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
return 1;
return 0;
static struct request *
cfq_former_request(request_queue_t *q, struct request *rq)
{
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
struct rb_node *rbprev = rb_prev(&crq->rb_node);
if (rbprev)
static struct request *
cfq_latter_request(request_queue_t *q, struct request *rq)
{
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
struct rb_node *rbnext = rb_next(&crq->rb_node);
if (rbnext)
return NULL;
}
-static void cfq_queue_congested(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator.elevator_data;
-
- set_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
-}
-
static int cfq_may_queue(request_queue_t *q, int rw)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
struct cfq_queue *cfqq;
- const int prio = cfq_ioprio(current);
- int limit, ret = 1;
+ int ret = 1;
if (!cfqd->busy_queues)
goto out;
- cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
- if (!cfqq)
- goto out;
-
- cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(current));
- if (!cfqq)
- goto out;
-
- /*
- * if higher or equal prio io is sleeping waiting for a request, don't
- * allow this one to allocate one. as long as ll_rw_blk does fifo
- * waitqueue wakeups this should work...
- */
- if (cfqd->rq_starved_mask & ~((1 << prio) - 1))
- goto out;
+ cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
+ if (cfqq) {
+ int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
- if (cfqq->queued[rw] < cfqd->cfq_queued || !cfqd->cid[prio].busy_queues)
- goto out;
+ if (limit < 3)
+ limit = 3;
+ else if (limit > cfqd->max_queued)
+ limit = cfqd->max_queued;
- limit = q->nr_requests * (prio + 1) / IOPRIO_NR;
- limit /= cfqd->cid[prio].busy_queues;
- if (cfqq->queued[rw] > limit)
- ret = 0;
+ if (cfqq->queued[rw] > limit)
+ ret = 0;
+ }
out:
return ret;
}
static void cfq_put_request(request_queue_t *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator.elevator_data;
- struct cfq_rq *crq = RQ_ELV_DATA(rq);
+ struct cfq_rq *crq = RQ_DATA(rq);
struct request_list *rl;
int other_rw;
if (crq) {
BUG_ON(q->last_merge == rq);
- BUG_ON(!hlist_unhashed(&crq->hash));
+ BUG_ON(ON_MHASH(crq));
mempool_free(crq, cfqd->crq_pool);
rq->elevator_private = NULL;
/*
* prepare a queue up front, so cfq_enqueue() doesn't have to
*/
- cfqq = cfq_get_queue(cfqd, cfq_hash_key(current), gfp_mask);
+ cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask);
if (!cfqq)
return 1;
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
- /*
- * process now has one request
- */
- clear_bit(cfq_ioprio(current), &cfqd->rq_starved_mask);
-
memset(crq, 0, sizeof(*crq));
+ RB_CLEAR(&crq->rb_node);
crq->request = rq;
- INIT_HLIST_NODE(&crq->hash);
- INIT_LIST_HEAD(&crq->prio_list);
+ crq->cfq_queue = NULL;
+ INIT_LIST_HEAD(&crq->hash);
rq->elevator_private = crq;
return 0;
}
kfree(cfqd);
}
-static void cfq_timer(unsigned long data)
-{
- struct cfq_data *cfqd = (struct cfq_data *) data;
-
- clear_bit(CFQ_WAIT_RT, &cfqd->flags);
- clear_bit(CFQ_WAIT_NORM, &cfqd->flags);
- kblockd_schedule_work(&cfqd->work);
-}
-
-static void cfq_work(void *data)
-{
- request_queue_t *q = data;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (cfq_next_request(q))
- q->request_fn(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
static int cfq_init(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
return -ENOMEM;
memset(cfqd, 0, sizeof(*cfqd));
- init_timer(&cfqd->timer);
- cfqd->timer.function = cfq_timer;
- cfqd->timer.data = (unsigned long) cfqd;
-
- INIT_WORK(&cfqd->work, cfq_work, q);
-
- for (i = 0; i < IOPRIO_NR; i++) {
- struct io_prio_data *cid = &cfqd->cid[i];
-
- INIT_LIST_HEAD(&cid->rr_list);
- INIT_LIST_HEAD(&cid->prio_list);
- cid->last_rq = -1;
- cid->last_sectors = -1;
-
- atomic_set(&cid->cum_rq_in,0);
- atomic_set(&cid->cum_rq_out,0);
- atomic_set(&cid->cum_sectors_in,0);
- atomic_set(&cid->cum_sectors_out,0);
- atomic_set(&cid->cum_queues_in,0);
- atomic_set(&cid->cum_queues_out,0);
-#if 0
- atomic_set(&cid->nskip,0);
- atomic_set(&cid->navsec,0);
- atomic_set(&cid->csectorate,0);
- atomic_set(&cid->lsectorate,0);
-#endif
- }
+ INIT_LIST_HEAD(&cfqd->rr_list);
- cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES,
- GFP_KERNEL);
+ cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->crq_hash)
goto out_crqhash;
- cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES,
- GFP_KERNEL);
+ cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->cfq_hash)
goto out_cfqhash;
- cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, crq_pool);
+ cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
if (!cfqd->crq_pool)
goto out_crqpool;
for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
+ INIT_LIST_HEAD(&cfqd->crq_hash[i]);
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
- INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
-
- cfqd->cfq_queued = cfq_queued;
- cfqd->cfq_quantum = cfq_quantum;
- cfqd->cfq_quantum_io = cfq_quantum_io;
- cfqd->cfq_idle_quantum = cfq_idle_quantum;
- cfqd->cfq_idle_quantum_io = cfq_idle_quantum_io;
- cfqd->cfq_grace_rt = cfq_grace_rt;
- cfqd->cfq_grace_idle = cfq_grace_idle;
-
- q->nr_requests <<= 2;
+ INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
cfqd->dispatch = &q->queue_head;
e->elevator_data = cfqd;
cfqd->queue = q;
- cfqd->cfq_epoch = CFQ_EPOCH;
- if (q->hardsect_size)
- cfqd->cfq_epochsectors = ((CFQ_DISKBW * 1000000)/
- q->hardsect_size)* (1000000 / CFQ_EPOCH);
- else
- cfqd->cfq_epochsectors = ((CFQ_DISKBW * 1000000)/512)
- * (1000000 / CFQ_EPOCH) ;
+ /*
+ * just set it to some high value, we want anyone to be able to queue
+ * some requests. fairness is handled differently
+ */
+ cfqd->max_queued = q->nr_requests;
+ q->nr_requests = 8192;
+
+ cfqd->cfq_queued = cfq_queued;
+ cfqd->cfq_quantum = cfq_quantum;
return 0;
out_crqpool:
return cfq_var_show(__VAR, (page)); \
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
-SHOW_FUNCTION(cfq_quantum_io_show, cfqd->cfq_quantum_io);
-SHOW_FUNCTION(cfq_idle_quantum_show, cfqd->cfq_idle_quantum);
-SHOW_FUNCTION(cfq_idle_quantum_io_show, cfqd->cfq_idle_quantum_io);
SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
-SHOW_FUNCTION(cfq_grace_rt_show, cfqd->cfq_grace_rt);
-SHOW_FUNCTION(cfq_grace_idle_show, cfqd->cfq_grace_idle);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
return ret; \
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
-STORE_FUNCTION(cfq_quantum_io_store, &cfqd->cfq_quantum_io, 4, INT_MAX);
-STORE_FUNCTION(cfq_idle_quantum_store, &cfqd->cfq_idle_quantum, 1, INT_MAX);
-STORE_FUNCTION(cfq_idle_quantum_io_store, &cfqd->cfq_idle_quantum_io, 4, INT_MAX);
STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
-STORE_FUNCTION(cfq_grace_rt_store, &cfqd->cfq_grace_rt, 0, INT_MAX);
-STORE_FUNCTION(cfq_grace_idle_store, &cfqd->cfq_grace_idle, 0, INT_MAX);
#undef STORE_FUNCTION
-
-static ssize_t cfq_epoch_show(struct cfq_data *cfqd, char *page)
-{
- return sprintf(page, "%lu\n", cfqd->cfq_epoch);
-}
-
-static ssize_t cfq_epoch_store(struct cfq_data *cfqd, const char *page, size_t count)
-{
- char *p = (char *) page;
- cfqd->cfq_epoch = simple_strtoul(p, &p, 10);
- return count;
-}
-
-static ssize_t cfq_epochsectors_show(struct cfq_data *cfqd, char *page)
-{
- return sprintf(page, "%lu\n", cfqd->cfq_epochsectors);
-}
-
-static ssize_t
-cfq_epochsectors_store(struct cfq_data *cfqd, const char *page, size_t count)
-{
- char *p = (char *) page;
- cfqd->cfq_epochsectors = simple_strtoul(p, &p, 10);
- return count;
-}
-
-/* Additional entries to get priority level data */
-static ssize_t
-cfq_prio_show(struct cfq_data *cfqd, char *page, unsigned int priolvl)
-{
- int r1,r2,s1,s2,q1,q2;
-
- if (!(priolvl >= IOPRIO_IDLE && priolvl <= IOPRIO_RT))
- return 0;
-
- r1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_in));
- r2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_rq_out));
- s1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_in));
- s2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_sectors_out));
- q1 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_in));
- q2 = (int)atomic_read(&(cfqd->cid[priolvl].cum_queues_out));
-
- return sprintf(page,"skip %d avsec %lu rate %lu new %lu"
- "rq (%d,%d) sec (%d,%d) q (%d,%d)\n",
- cfqd->cid[priolvl].nskip,
- cfqd->cid[priolvl].navsec,
- cfqd->cid[priolvl].csectorate,
- cfqd->cid[priolvl].lsectorate,
-// atomic_read(&cfqd->cid[priolvl].nskip),
-// atomic_read(&cfqd->cid[priolvl].navsec),
-// atomic_read(&cfqd->cid[priolvl].csectorate),
-// atomic_read(&cfqd->cid[priolvl].lsectorate),
- r1,r2,
- s1,s2,
- q1,q2);
-}
-
-#define SHOW_PRIO_DATA(__PRIOLVL) \
-static ssize_t cfq_prio_##__PRIOLVL##_show(struct cfq_data *cfqd, char *page) \
-{ \
- return cfq_prio_show(cfqd,page,__PRIOLVL); \
-}
-SHOW_PRIO_DATA(0);
-SHOW_PRIO_DATA(1);
-SHOW_PRIO_DATA(2);
-SHOW_PRIO_DATA(3);
-SHOW_PRIO_DATA(4);
-SHOW_PRIO_DATA(5);
-SHOW_PRIO_DATA(6);
-SHOW_PRIO_DATA(7);
-SHOW_PRIO_DATA(8);
-SHOW_PRIO_DATA(9);
-SHOW_PRIO_DATA(10);
-SHOW_PRIO_DATA(11);
-SHOW_PRIO_DATA(12);
-SHOW_PRIO_DATA(13);
-SHOW_PRIO_DATA(14);
-SHOW_PRIO_DATA(15);
-SHOW_PRIO_DATA(16);
-SHOW_PRIO_DATA(17);
-SHOW_PRIO_DATA(18);
-SHOW_PRIO_DATA(19);
-SHOW_PRIO_DATA(20);
-#undef SHOW_PRIO_DATA
-
-
-static ssize_t cfq_prio_store(struct cfq_data *cfqd, const char *page, size_t count, int priolvl)
-{
- atomic_set(&(cfqd->cid[priolvl].cum_rq_in),0);
- atomic_set(&(cfqd->cid[priolvl].cum_rq_out),0);
- atomic_set(&(cfqd->cid[priolvl].cum_sectors_in),0);
- atomic_set(&(cfqd->cid[priolvl].cum_sectors_out),0);
- atomic_set(&(cfqd->cid[priolvl].cum_queues_in),0);
- atomic_set(&(cfqd->cid[priolvl].cum_queues_out),0);
-
- return count;
-}
-
-
-#define STORE_PRIO_DATA(__PRIOLVL) \
-static ssize_t cfq_prio_##__PRIOLVL##_store(struct cfq_data *cfqd, const char *page, size_t count) \
-{ \
- return cfq_prio_store(cfqd,page,count,__PRIOLVL); \
-}
-STORE_PRIO_DATA(0);
-STORE_PRIO_DATA(1);
-STORE_PRIO_DATA(2);
-STORE_PRIO_DATA(3);
-STORE_PRIO_DATA(4);
-STORE_PRIO_DATA(5);
-STORE_PRIO_DATA(6);
-STORE_PRIO_DATA(7);
-STORE_PRIO_DATA(8);
-STORE_PRIO_DATA(9);
-STORE_PRIO_DATA(10);
-STORE_PRIO_DATA(11);
-STORE_PRIO_DATA(12);
-STORE_PRIO_DATA(13);
-STORE_PRIO_DATA(14);
-STORE_PRIO_DATA(15);
-STORE_PRIO_DATA(16);
-STORE_PRIO_DATA(17);
-STORE_PRIO_DATA(18);
-STORE_PRIO_DATA(19);
-STORE_PRIO_DATA(20);
-#undef STORE_PRIO_DATA
-
-
static struct cfq_fs_entry cfq_quantum_entry = {
.attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
.show = cfq_quantum_show,
.store = cfq_quantum_store,
};
-static struct cfq_fs_entry cfq_quantum_io_entry = {
- .attr = {.name = "quantum_io", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_quantum_io_show,
- .store = cfq_quantum_io_store,
-};
-static struct cfq_fs_entry cfq_idle_quantum_entry = {
- .attr = {.name = "idle_quantum", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_idle_quantum_show,
- .store = cfq_idle_quantum_store,
-};
-static struct cfq_fs_entry cfq_idle_quantum_io_entry = {
- .attr = {.name = "idle_quantum_io", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_idle_quantum_io_show,
- .store = cfq_idle_quantum_io_store,
-};
static struct cfq_fs_entry cfq_queued_entry = {
.attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
.show = cfq_queued_show,
.store = cfq_queued_store,
};
-static struct cfq_fs_entry cfq_grace_rt_entry = {
- .attr = {.name = "grace_rt", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_grace_rt_show,
- .store = cfq_grace_rt_store,
-};
-static struct cfq_fs_entry cfq_grace_idle_entry = {
- .attr = {.name = "grace_idle", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_grace_idle_show,
- .store = cfq_grace_idle_store,
-};
-static struct cfq_fs_entry cfq_epoch_entry = {
- .attr = {.name = "epoch", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_epoch_show,
- .store = cfq_epoch_store,
-};
-static struct cfq_fs_entry cfq_epochsectors_entry = {
- .attr = {.name = "epochsectors", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_epochsectors_show,
- .store = cfq_epochsectors_store,
-};
-
-#define P_0_STR "p0"
-#define P_1_STR "p1"
-#define P_2_STR "p2"
-#define P_3_STR "p3"
-#define P_4_STR "p4"
-#define P_5_STR "p5"
-#define P_6_STR "p6"
-#define P_7_STR "p7"
-#define P_8_STR "p8"
-#define P_9_STR "p9"
-#define P_10_STR "p10"
-#define P_11_STR "p11"
-#define P_12_STR "p12"
-#define P_13_STR "p13"
-#define P_14_STR "p14"
-#define P_15_STR "p15"
-#define P_16_STR "p16"
-#define P_17_STR "p17"
-#define P_18_STR "p18"
-#define P_19_STR "p19"
-#define P_20_STR "p20"
-
-
-#define CFQ_PRIO_SYSFS_ENTRY(__PRIOLVL) \
-static struct cfq_fs_entry cfq_prio_##__PRIOLVL##_entry = { \
- .attr = {.name = P_##__PRIOLVL##_STR, .mode = S_IRUGO | S_IWUSR }, \
- .show = cfq_prio_##__PRIOLVL##_show, \
- .store = cfq_prio_##__PRIOLVL##_store, \
-};
-CFQ_PRIO_SYSFS_ENTRY(0);
-CFQ_PRIO_SYSFS_ENTRY(1);
-CFQ_PRIO_SYSFS_ENTRY(2);
-CFQ_PRIO_SYSFS_ENTRY(3);
-CFQ_PRIO_SYSFS_ENTRY(4);
-CFQ_PRIO_SYSFS_ENTRY(5);
-CFQ_PRIO_SYSFS_ENTRY(6);
-CFQ_PRIO_SYSFS_ENTRY(7);
-CFQ_PRIO_SYSFS_ENTRY(8);
-CFQ_PRIO_SYSFS_ENTRY(9);
-CFQ_PRIO_SYSFS_ENTRY(10);
-CFQ_PRIO_SYSFS_ENTRY(11);
-CFQ_PRIO_SYSFS_ENTRY(12);
-CFQ_PRIO_SYSFS_ENTRY(13);
-CFQ_PRIO_SYSFS_ENTRY(14);
-CFQ_PRIO_SYSFS_ENTRY(15);
-CFQ_PRIO_SYSFS_ENTRY(16);
-CFQ_PRIO_SYSFS_ENTRY(17);
-CFQ_PRIO_SYSFS_ENTRY(18);
-CFQ_PRIO_SYSFS_ENTRY(19);
-CFQ_PRIO_SYSFS_ENTRY(20);
-#undef CFQ_PRIO_SYSFS_ENTRY
static struct attribute *default_attrs[] = {
&cfq_quantum_entry.attr,
- &cfq_quantum_io_entry.attr,
- &cfq_idle_quantum_entry.attr,
- &cfq_idle_quantum_io_entry.attr,
&cfq_queued_entry.attr,
- &cfq_grace_rt_entry.attr,
- &cfq_grace_idle_entry.attr,
- &cfq_epoch_entry.attr,
- &cfq_epochsectors_entry.attr,
- &cfq_prio_0_entry.attr,
- &cfq_prio_1_entry.attr,
- &cfq_prio_2_entry.attr,
- &cfq_prio_3_entry.attr,
- &cfq_prio_4_entry.attr,
- &cfq_prio_5_entry.attr,
- &cfq_prio_6_entry.attr,
- &cfq_prio_7_entry.attr,
- &cfq_prio_8_entry.attr,
- &cfq_prio_9_entry.attr,
- &cfq_prio_10_entry.attr,
- &cfq_prio_11_entry.attr,
- &cfq_prio_12_entry.attr,
- &cfq_prio_13_entry.attr,
- &cfq_prio_14_entry.attr,
- &cfq_prio_15_entry.attr,
- &cfq_prio_16_entry.attr,
- &cfq_prio_17_entry.attr,
- &cfq_prio_18_entry.attr,
- &cfq_prio_19_entry.attr,
- &cfq_prio_20_entry.attr,
NULL,
};
.elevator_set_req_fn = cfq_set_request,
.elevator_put_req_fn = cfq_put_request,
.elevator_may_queue_fn = cfq_may_queue,
- .elevator_set_congested_fn = cfq_queue_congested,
.elevator_init_fn = cfq_init,
.elevator_exit_fn = cfq_exit,
};
/* Absolute shares of this class
* in local units.
*/
-
- int cnt_guarantee; /* Allocation as parent */
- int cnt_unused; /* Allocation to default subclass */
-
+
+ int ioprio;
+ int unused;
+
/* Statistics, for class and default subclass */
cki_stats_t stats;
cki_stats_t mystats;
static inline void cki_reset_stats(cki_stats_t *usg);
static inline void init_icls_one(cki_icls_t *icls);
static inline int cki_div(int *a, int b, int c);
-//static inline int cki_recalc(cki_icls_t *icls, int rel2abs);
-static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres);
+static inline int cki_recalc(cki_icls_t *icls, int rel2abs);
+#ifdef DOES_NOT_WORK_AND_NOT_NEEDED
/* External functions e.g. interface to ioscheduler */
-void *cki_tsk_icls (struct task_struct *tsk);
-int cki_tsk_ioprio (struct task_struct *tsk);
+inline void *cki_tsk_icls(struct task_struct *tsk);
+inline int cki_tsk_ioprio(struct task_struct *tsk);
+#endif
extern void cki_cfq_set(icls_tsk_t tskicls, icls_ioprio_t tskioprio);
static inline void init_icls_one(cki_icls_t *icls)
{
- // Assign zero as initial guarantee otherwise creations
- // could fail due to inadequate share
-
- //icls->shares.my_guarantee =
- // (CKI_IOPRIO_MIN * CKRM_SHARE_DFLT_TOTAL_GUARANTEE) /
- // CKI_IOPRIO_DIV ;
- icls->shares.my_guarantee = 0;
+ icls->shares.my_guarantee =
+ (CKI_IOPRIO_MIN * CKRM_SHARE_DFLT_TOTAL_GUARANTEE) /
+ CKI_IOPRIO_DIV ;
icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
icls->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
icls->shares.cur_max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- icls->cnt_guarantee = icls->cnt_unused = IOPRIO_IDLE;
-
- //Same rationale icls->ioprio = CKI_IOPRIO_MIN;
- //IOPRIO_IDLE equivalence to zero my_guarantee (set above) relies
- //on former being zero.
+ icls->ioprio = CKI_IOPRIO_MIN;
+ icls->unused = 0 ;
init_icls_stats(icls);
}
* Caller should have a lock on icls
*/
-static void cki_recalc_propagate(cki_icls_t *res, cki_icls_t *parres)
-{
-
- ckrm_core_class_t *child = NULL;
- cki_icls_t *childres;
- int resid = cki_rcbs.resid;
-
- if (parres) {
- struct ckrm_shares *par = &parres->shares;
- struct ckrm_shares *self = &res->shares;
-
-
-
- if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
- res->cnt_guarantee = CKRM_SHARE_DONTCARE;
- } else if (par->total_guarantee) {
- u64 temp = (u64) self->my_guarantee *
- parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- res->cnt_guarantee = (int) temp;
- } else {
- res->cnt_guarantee = 0;
- }
-
- if (res->cnt_guarantee == CKRM_SHARE_DONTCARE) {
- res->cnt_unused = CKRM_SHARE_DONTCARE;
- } else if (self->total_guarantee) {
- u64 temp = (u64) self->unused_guarantee *
- res->cnt_guarantee;
- do_div(temp, self->total_guarantee);
- res->cnt_unused = (int) temp;
- } else {
- res->cnt_unused = 0;
- }
- }
- // propagate to children
- ckrm_lock_hier(res->core);
- while ((child = ckrm_get_next_child(res->core,child)) != NULL){
- childres = ckrm_get_res_class(child, resid,
- cki_icls_t);
-
- spin_lock(&childres->shares_lock);
- cki_recalc_propagate(childres, res);
- spin_unlock(&childres->shares_lock);
- }
- ckrm_unlock_hier(res->core);
-}
-
-#if 0
static inline int cki_recalc(cki_icls_t *icls, int rel2abs)
{
u64 temp;
temp = icls->shares.my_guarantee * (IOPRIO_NR-1);
do_div(temp, icls->shares.total_guarantee);
- icls->total = IOPRIO_NR-1;
icls->ioprio = temp ;
- icls->unused = icls->total - icls->ioprio;
-// icls->unused = (IOPRIO_NR-1)-icls->ioprio;
+ icls->unused = (IOPRIO_NR-1)-icls->ioprio;
} else {
cki_icls_t *parres;
return -EINVAL;
}
+ partot = parres->ioprio + parres->unused;
- temp = (icls->shares.my_guarantee *
- parres->total);
+ temp = (icls->shares.my_guarantee * (parres->ioprio + parres->unused));
do_div(temp, parres->shares.total_guarantee);
icls->ioprio = temp;
return 0;
}
-#endif
-void *cki_tsk_icls(struct task_struct *tsk)
+
+inline void *cki_icls_tsk(struct task_struct *tsk)
{
return (void *) ckrm_get_res_class(class_core(tsk->taskclass),
cki_rcbs.resid, cki_icls_t);
}
-int cki_tsk_ioprio(struct task_struct *tsk)
+inline int cki_icls_ioprio(struct task_struct *tsk)
{
cki_icls_t *icls = ckrm_get_res_class(class_core(tsk->taskclass),
cki_rcbs.resid, cki_icls_t);
- return icls->cnt_unused;
+ return icls->ioprio;
}
static void *cki_alloc(struct ckrm_core_class *core,
icls->shares_lock = SPIN_LOCK_UNLOCKED;
if (parent == NULL) {
+ u64 temp;
/* Root class gets same as "normal" CFQ priorities to
* retain compatibility of behaviour in the absence of
* other classes
*/
- icls->cnt_guarantee = icls->cnt_unused = IOPRIO_NR-1;
+ icls->ioprio = IOPRIO_NORM;
+ icls->unused = (IOPRIO_NR-1)-IOPRIO_NORM;
/* Default gets normal, not minimum */
//icls->unused = IOPRIO_NORM;
/* Compute shares in abstract units */
icls->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
+ temp = (u64) icls->ioprio * icls->shares.total_guarantee;
+ do_div(temp, CKI_IOPRIO_DIV);
+ icls->shares.my_guarantee = (int) temp;
- // my_guarantee for root is meaningless. Set to default
- icls->shares.my_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- icls->shares.unused_guarantee =
- CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
-
- //temp = (u64) icls->cnt_unused * icls->shares.total_guarantee;
- //do_div(temp, CKI_IOPRIO_DIV);
- // temp now has root's default's share
- //icls->shares.unused_guarantee =
- // icls->shares.total_guarantee - temp;
-
+ //icls->shares.my_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
+ //icls->shares.max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
icls->shares.my_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
icls->shares.max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
+
+
+ icls->shares.unused_guarantee =
+ icls->shares.total_guarantee -
+ icls->shares.my_guarantee;
+ //icls->shares.cur_max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
icls->shares.cur_max_limit = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
} else {
init_icls_one(icls);
- /* No propagation to parent needed if icls'
- initial share is zero */
}
try_module_get(THIS_MODULE);
return icls;
/* Update parent's shares */
spin_lock(&parres->shares_lock);
child_guarantee_changed(&parres->shares, icls->shares.my_guarantee, 0);
- parres->cnt_unused += icls->cnt_guarantee;
+ parres->unused += icls->ioprio;
spin_unlock(&parres->shares_lock);
kfree(res);
/* limits not supported */
if ((new->max_limit != CKRM_SHARE_UNCHANGED)
|| (new->my_limit != CKRM_SHARE_UNCHANGED)) {
- printk(KERN_ERR "limits not supported\n");
+ printk(KERN_ERR "limits changed max_limit %d my_limit %d\n",
+ new->max_limit, new->my_limit);
+
return -EINVAL;
}
}
rc = set_shares(new, cur, par);
+
printk(KERN_ERR "rc from set_shares %d\n", rc);
- if ((!rc) && parres) {
-
- if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
- parres->cnt_unused = CKRM_SHARE_DONTCARE;
- } else if (par->total_guarantee) {
- u64 temp = (u64) par->unused_guarantee *
- parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- parres->cnt_unused = (int) temp;
- } else {
- parres->cnt_unused = 0;
- }
- cki_recalc_propagate(res, parres);
-
-#if 0
+ if (!rc) {
int old = icls->ioprio;
-
rc = cki_recalc(icls,0);
if (!rc && parres) {
int raise_tot = icls->ioprio - old ;
- parres->unused -= raise_tot ;
+ parres->unused += raise_tot ;
}
-#endif
}
spin_unlock(&icls->shares_lock);
if (icls->parent) {
seq_printf(sfile, "%d total_write\n",atomic_read(&icls->stats.blkwr));
*/
- seq_printf(sfile, "%d total ioprio\n",icls->cnt_guarantee);
- seq_printf(sfile, "%d unused/default ioprio\n",icls->cnt_unused);
+ seq_printf(sfile, "%d ioprio\n",icls->ioprio);
+ seq_printf(sfile, "%d unused\n",icls->unused);
return 0;
}
struct ckrm_res_ctlr cki_rcbs = {
- .res_name = "io",
+ .res_name = "cki",
.res_hdepth = 1,
.resid = -1,
.res_alloc = cki_alloc,
resid = ckrm_register_res_ctlr(clstype, &cki_rcbs);
if (resid != -1) {
cki_rcbs.classtype = clstype;
- cki_cfq_set(cki_tsk_icls,cki_tsk_ioprio);
+ cki_cfq_set(cki_icls_tsk,cki_icls_ioprio);
}
}
spin_unlock(&stub_lock);
}
-void *cki_hash_key(struct task_struct *tsk)
+inline void *cki_hash_key(struct task_struct *tsk)
{
void *ret;
spin_lock(&stub_lock);
return ret;
}
-int cki_ioprio(struct task_struct *tsk)
+inline int cki_ioprio(struct task_struct *tsk)
{
int ret;
spin_lock(&stub_lock);
e->elevator_put_req_fn(q, rq);
}
-void elv_set_congested(request_queue_t *q)
-{
- elevator_t *e = &q->elevator;
-
- if (e->elevator_set_congested_fn)
- e->elevator_set_congested_fn(q);
-}
-
int elv_may_queue(request_queue_t *q, int rw)
{
elevator_t *e = &q->elevator;
if (e->elevator_may_queue_fn)
return e->elevator_may_queue_fn(q, rw);
- return 1;
+ return 0;
}
void elv_completed_request(request_queue_t *q, struct request *rq)
printk("\n");
} else
DPRINT("botched floppy option\n");
- DPRINT("Read Documentation/floppy.txt\n");
+ DPRINT("Read linux/Documentation/floppy.txt\n");
return 0;
}
return 0;
}
-EXPORT_SYMBOL(blk_queue_resize_tags);
-
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
printk("Using %s io scheduler\n", chosen_elevator->elevator_name);
}
+ if (elevator_init(q, chosen_elevator))
+ goto out_elv;
+
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
- /*
- * all done
- */
- if (!elevator_init(q, chosen_elevator))
- return q;
-
+ return q;
+out_elv:
blk_cleanup_queue(q);
out_init:
kmem_cache_free(requestq_cachep, q);
struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock);
-
- if (!elv_may_queue(q, rw))
- goto out_lock;
-
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
}
}
- /*
- * The queue is full and the allocating process is not a
- * "batcher", and not exempted by the IO scheduler
- */
- if (blk_queue_full(q, rw) && !ioc_batching(ioc))
- goto out_lock;
+ if (blk_queue_full(q, rw)
+ && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
+ /*
+ * The queue is full and the allocating process is not a
+ * "batcher", and not exempted by the IO scheduler
+ */
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+ }
rl->count[rw]++;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw);
- goto out_lock;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
}
if (ioc_batching(ioc))
out:
put_io_context(ioc);
return rq;
-out_lock:
- if (!rq)
- elv_set_congested(q);
- spin_unlock_irq(q->queue_lock);
- goto out;
}
/*
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
- *
- * Note: The mapped bio may need to be bounced through blk_queue_bounce()
- * before being submitted to the device, as pages mapped may be out of
- * reach. It's the callers responsibility to make sure this happens. The
- * original bio must be passed back in to blk_rq_unmap_user() for proper
- * unmapping.
*/
struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
unsigned int len)
{
- unsigned long uaddr;
- struct request *rq;
+ struct request *rq = NULL;
+ char *buf = NULL;
struct bio *bio;
-
- if (len > (q->max_sectors << 9))
- return ERR_PTR(-EINVAL);
- if ((!len && ubuf) || (len && !ubuf))
- return ERR_PTR(-EINVAL);
+ int ret;
rq = blk_get_request(q, rw, __GFP_WAIT);
if (!rq)
return ERR_PTR(-ENOMEM);
- /*
- * if alignment requirement is satisfied, map in user pages for
- * direct dma. else, set up kernel bounce buffers
- */
- uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
- else
- bio = bio_copy_user(q, uaddr, len, rw == READ);
+ bio = bio_map_user(q, NULL, (unsigned long) ubuf, len, rw == READ);
+ if (!bio) {
+ int bytes = (len + 511) & ~511;
- if (!IS_ERR(bio)) {
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
+ buf = kmalloc(bytes, q->bounce_gfp | GFP_USER);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fault;
+ }
- rq->buffer = rq->data = NULL;
- rq->data_len = len;
- return rq;
+ if (rw == WRITE) {
+ if (copy_from_user(buf, ubuf, len)) {
+ ret = -EFAULT;
+ goto fault;
+ }
+ } else
+ memset(buf, 0, len);
}
- /*
- * bio is the err-ptr
- */
- blk_put_request(rq);
- return (struct request *) bio;
+ rq->bio = rq->biotail = bio;
+ if (rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+
+ rq->buffer = rq->data = buf;
+ rq->data_len = len;
+ return rq;
+fault:
+ if (buf)
+ kfree(buf);
+ if (bio)
+ bio_unmap_user(bio, 1);
+ if (rq)
+ blk_put_request(rq);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(blk_rq_map_user);
* Description:
* Unmap a request previously mapped by blk_rq_map_user().
*/
-int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq, void __user *ubuf, struct bio *bio,
+ unsigned int ulen)
{
+ const int read = rq_data_dir(rq) == READ;
int ret = 0;
- if (bio) {
- if (bio_flagged(bio, BIO_USER_MAPPED))
- bio_unmap_user(bio);
- else
- ret = bio_uncopy_user(bio);
+ if (bio)
+ bio_unmap_user(bio, read);
+ if (rq->buffer) {
+ if (read && copy_to_user(ubuf, rq->buffer, ulen))
+ ret = -EFAULT;
+ kfree(rq->buffer);
}
blk_put_request(rq);
return queue_work(kblockd_workqueue, work);
}
-EXPORT_SYMBOL(kblockd_schedule_work);
-
void kblockd_flush(void)
{
flush_workqueue(kblockd_workqueue);
kobject_put(&disk->kobj);
}
}
-
-asmlinkage int sys_ioprio_set(int ioprio)
-{
- if (ioprio < IOPRIO_IDLE || ioprio > IOPRIO_RT)
- return -EINVAL;
- if (ioprio == IOPRIO_RT && !capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- printk("%s: set ioprio %d\n", current->comm, ioprio);
- current->ioprio = ioprio;
- return 0;
-}
-
-asmlinkage int sys_ioprio_get(void)
-{
- return current->ioprio;
-}
-
static int sock_xmit(struct socket *sock, int send, void *buf, int size,
int msg_flags)
{
+ mm_segment_t oldfs;
int result;
struct msghdr msg;
- struct kvec iov;
+ struct iovec iov;
unsigned long flags;
sigset_t oldset;
+ oldfs = get_fs();
+ set_fs(get_ds());
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
spin_lock_irqsave(¤t->sighand->siglock, flags);
iov.iov_len = size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send)
- result = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ result = sock_sendmsg(sock, &msg, size);
else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+ result = sock_recvmsg(sock, &msg, size, 0);
if (signal_pending(current)) {
siginfo_t info;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ set_fs(oldfs);
return result;
}
-#define PPCSTRUCT(pi) ((Interface *)(pi->private))
+#define PPCSTRUCT(pi) ((PPC *)(pi->private))
/****************************************************************/
/*
static int bpck6_init_proto(PIA *pi)
{
- Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL);
+ PPC *p = kmalloc(sizeof(PPC), GFP_KERNEL);
if (p) {
- memset(p, 0, sizeof(Interface));
+ memset(p, 0, sizeof(PPC));
pi->private = (unsigned long)p;
return 0;
}
unsigned cmd, unsigned long arg)
{
struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &cd->info, inode, cmd, arg);
+ return cdrom_ioctl(&cd->info, inode, cmd, arg);
}
static int pcd_block_media_changed(struct gendisk *disk)
u8 org_data; // original LPT data port contents
u8 org_ctrl; // original LPT control port contents
u8 cur_ctrl; // current control port contents
-} Interface;
+} PPC;
//***************************************************************************
//***************************************************************************
-static int ppc6_select(Interface *ppc);
-static void ppc6_deselect(Interface *ppc);
-static void ppc6_send_cmd(Interface *ppc, u8 cmd);
-static void ppc6_wr_data_byte(Interface *ppc, u8 data);
-static u8 ppc6_rd_data_byte(Interface *ppc);
-static u8 ppc6_rd_port(Interface *ppc, u8 port);
-static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
-static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
-static void ppc6_wait_for_fifo(Interface *ppc);
-static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
-static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_extout(Interface *ppc, u8 regdata);
-static int ppc6_open(Interface *ppc);
-static void ppc6_close(Interface *ppc);
+static int ppc6_select(PPC *ppc);
+static void ppc6_deselect(PPC *ppc);
+static void ppc6_send_cmd(PPC *ppc, u8 cmd);
+static void ppc6_wr_data_byte(PPC *ppc, u8 data);
+static u8 ppc6_rd_data_byte(PPC *ppc);
+static u8 ppc6_rd_port(PPC *ppc, u8 port);
+static void ppc6_wr_port(PPC *ppc, u8 port, u8 data);
+static void ppc6_rd_data_blk(PPC *ppc, u8 *data, long count);
+static void ppc6_wait_for_fifo(PPC *ppc);
+static void ppc6_wr_data_blk(PPC *ppc, u8 *data, long count);
+static void ppc6_rd_port16_blk(PPC *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_port16_blk(PPC *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_extout(PPC *ppc, u8 regdata);
+static int ppc6_open(PPC *ppc);
+static void ppc6_close(PPC *ppc);
//***************************************************************************
-static int ppc6_select(Interface *ppc)
+static int ppc6_select(PPC *ppc)
{
u8 i, j, k;
//***************************************************************************
-static void ppc6_deselect(Interface *ppc)
+static void ppc6_deselect(PPC *ppc)
{
if (ppc->mode & 4) // EPP
ppc->cur_ctrl |= port_init;
//***************************************************************************
-static void ppc6_send_cmd(Interface *ppc, u8 cmd)
+static void ppc6_send_cmd(PPC *ppc, u8 cmd)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_wr_data_byte(Interface *ppc, u8 data)
+static void ppc6_wr_data_byte(PPC *ppc, u8 data)
{
switch(ppc->mode)
{
//***************************************************************************
-static u8 ppc6_rd_data_byte(Interface *ppc)
+static u8 ppc6_rd_data_byte(PPC *ppc)
{
u8 data = 0;
//***************************************************************************
-static u8 ppc6_rd_port(Interface *ppc, u8 port)
+static u8 ppc6_rd_port(PPC *ppc, u8 port)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
//***************************************************************************
-static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
+static void ppc6_wr_port(PPC *ppc, u8 port, u8 data)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
//***************************************************************************
-static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
+static void ppc6_rd_data_blk(PPC *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_wait_for_fifo(Interface *ppc)
+static void ppc6_wait_for_fifo(PPC *ppc)
{
int i;
//***************************************************************************
-static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
+static void ppc6_wr_data_blk(PPC *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
+static void ppc6_rd_port16_blk(PPC *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
//***************************************************************************
-static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
+static void ppc6_wr_port16_blk(PPC *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
//***************************************************************************
-static void ppc6_wr_extout(Interface *ppc, u8 regdata)
+static void ppc6_wr_extout(PPC *ppc, u8 regdata)
{
ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
//***************************************************************************
-static int ppc6_open(Interface *ppc)
+static int ppc6_open(PPC *ppc)
{
int ret;
//***************************************************************************
-static void ppc6_close(Interface *ppc)
+static void ppc6_close(PPC *ppc)
{
ppc6_deselect(ppc);
}
if (size < 0)
return -EINVAL;
if (size > (q->max_sectors << 9))
- size = q->max_sectors << 9;
+ return -EINVAL;
q->sg_reserved_size = size;
return 0;
return put_user(1, p);
}
-#define CMD_READ_SAFE 0x01
-#define CMD_WRITE_SAFE 0x02
-#define safe_for_read(cmd) [cmd] = CMD_READ_SAFE
-#define safe_for_write(cmd) [cmd] = CMD_WRITE_SAFE
-
-static int verify_command(struct file *file, unsigned char *cmd)
-{
- static const unsigned char cmd_type[256] = {
-
- /* Basic read-only commands */
- safe_for_read(TEST_UNIT_READY),
- safe_for_read(REQUEST_SENSE),
- safe_for_read(READ_6),
- safe_for_read(READ_10),
- safe_for_read(READ_12),
- safe_for_read(READ_16),
- safe_for_read(READ_BUFFER),
- safe_for_read(READ_LONG),
- safe_for_read(INQUIRY),
- safe_for_read(MODE_SENSE),
- safe_for_read(MODE_SENSE_10),
- safe_for_read(START_STOP),
-
- /* Audio CD commands */
- safe_for_read(GPCMD_PLAY_CD),
- safe_for_read(GPCMD_PLAY_AUDIO_10),
- safe_for_read(GPCMD_PLAY_AUDIO_MSF),
- safe_for_read(GPCMD_PLAY_AUDIO_TI),
-
- /* CD/DVD data reading */
- safe_for_read(GPCMD_READ_CD),
- safe_for_read(GPCMD_READ_CD_MSF),
- safe_for_read(GPCMD_READ_DISC_INFO),
- safe_for_read(GPCMD_READ_CDVD_CAPACITY),
- safe_for_read(GPCMD_READ_DVD_STRUCTURE),
- safe_for_read(GPCMD_READ_HEADER),
- safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
- safe_for_read(GPCMD_READ_SUBCHANNEL),
- safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
- safe_for_read(GPCMD_REPORT_KEY),
- safe_for_read(GPCMD_SCAN),
-
- /* Basic writing commands */
- safe_for_write(WRITE_6),
- safe_for_write(WRITE_10),
- safe_for_write(WRITE_VERIFY),
- safe_for_write(WRITE_12),
- safe_for_write(WRITE_VERIFY_12),
- safe_for_write(WRITE_16),
- safe_for_write(WRITE_LONG),
- };
- unsigned char type = cmd_type[cmd[0]];
-
- /* Anybody who can open the device can do a read-safe command */
- if (type & CMD_READ_SAFE)
- return 0;
-
- /* Write-safe commands just require a writable open.. */
- if (type & CMD_WRITE_SAFE) {
- if (file->f_mode & FMODE_WRITE)
- return 0;
- }
-
- /* And root can do any command.. */
- if (capable(CAP_SYS_RAWIO))
- return 0;
-
- /* Otherwise fail it with an "Operation not permitted" */
- return -EPERM;
-}
-
-static int sg_io(struct file *file, request_queue_t *q,
- struct gendisk *bd_disk, struct sg_io_hdr *hdr)
+static int sg_io(request_queue_t *q, struct gendisk *bd_disk,
+ struct sg_io_hdr *hdr)
{
unsigned long start_time;
int reading, writing;
return -EINVAL;
if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (verify_command(file, cmd))
- return -EPERM;
/*
* we'll do that later
rq->flags |= REQ_BLOCK_PC;
bio = rq->bio;
- /*
- * bounce this after holding a reference to the original bio, it's
- * needed for proper unmapping
- */
- if (rq->bio)
- blk_queue_bounce(q, &rq->bio);
-
rq->timeout = (hdr->timeout * HZ) / 1000;
if (!rq->timeout)
rq->timeout = q->sg_timeout;
hdr->sb_len_wr = len;
}
- if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
+ if (blk_rq_unmap_user(rq, hdr->dxferp, bio, hdr->dxfer_len))
return -EFAULT;
/* may not have succeeded, but output values written to control
#define READ_DEFECT_DATA_TIMEOUT (60 * HZ )
#define OMAX_SB_LEN 16 /* For backward compatibility */
-static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
- struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
+static int sg_scsi_ioctl(request_queue_t *q, struct gendisk *bd_disk,
+ Scsi_Ioctl_Command __user *sic)
{
struct request *rq;
int err, in_len, out_len, bytes, opcode, cmdlen;
if (copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = verify_command(file, rq->cmd);
- if (err)
- goto error;
-
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
return err;
}
-int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+int scsi_cmd_ioctl(struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
{
request_queue_t *q;
struct request *rq;
err = -EFAULT;
if (copy_from_user(&hdr, arg, sizeof(hdr)))
break;
- err = sg_io(file, q, bd_disk, &hdr);
+ err = sg_io(q, bd_disk, &hdr);
if (err == -EFAULT)
break;
hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(file, q, bd_disk, &hdr);
+ err = sg_io(q, bd_disk, &hdr);
if (err == -EFAULT)
break;
if (!arg)
break;
- err = sg_scsi_ioctl(file, q, bd_disk, arg);
+ err = sg_scsi_ioctl(q, bd_disk, arg);
break;
case CDROMCLOSETRAY:
close = 1;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[i];
disk->queue = swim3_queue;
- disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", i);
sprintf(disk->devfs_name, "floppy/%d", i);
set_capacity(disk, 2880);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
outb(0x80, iobase + 0x30);
/* Wait some time */
- msleep(10);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 100);
/* Turn FPGA on */
outb(0x00, iobase + 0x30);
outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL);
/* Timeout before it is safe to send the first HCI packet */
- msleep(1250);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout((HZ * 5) / 4); // or set it to 3/2
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
}
/* Timeout before it is safe to send the first HCI packet */
- msleep(1000);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
/* Register HCI device */
err = hci_register_dev(hdev);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
btuart_change_speed(info, DEFAULT_BAUD_RATE);
/* Timeout before it is safe to send the first HCI packet */
- msleep(1000);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
spin_unlock_irqrestore(&(info->lock), flags);
/* Timeout before it is safe to send the first HCI packet */
- msleep(2000);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ * 2);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
struct sk_buff *skb;
unsigned long flags;
- BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);
-
+ BT_ERR("Timeout, retransmitting %u pkts", bcsp->unack.qlen);
spin_lock_irqsave(&bcsp->unack.lock, flags);
while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
#define URB_ZERO_PACKET 0
#endif
-#define VERSION "2.7"
+#define VERSION "2.6"
static struct usb_driver hci_usb_driver;
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
- /* Bluetooth Ultraport Module from IBM */
- { USB_DEVICE(0x04bf, 0x030a) },
+ /* Ericsson with non-standard id */
+ { USB_DEVICE(0x0bdb, 0x1002) },
- /* ALPS Modules with non-standard id */
- { USB_DEVICE(0x044e, 0x3001) },
+ /* ALPS Module with non-standard id */
{ USB_DEVICE(0x044e, 0x3002) },
- /* Ericsson with non-standard id */
- { USB_DEVICE(0x0bdb, 0x1002) },
+ /* Bluetooth Ultraport Module from IBM */
+ { USB_DEVICE(0x04bf, 0x030a) },
{ } /* Terminating entry */
};
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET },
- /* ISSC Bluetooth Adapter v3.1 */
- { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
-
/* Digianswer device */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
BT_DBG("%s", hdev->name);
- for (i = 0; i < 4; i++)
+ for (i=0; i < 4; i++)
skb_queue_purge(&husb->transmit_q[i]);
return 0;
}
+static inline void hci_usb_wait_for_urb(struct urb *urb)
+{
+ while (atomic_read(&urb->kref.refcount) > 1) {
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout((5 * HZ + 999) / 1000);
+ }
+}
+
static void hci_usb_unlink_urbs(struct hci_usb *husb)
{
int i;
BT_DBG("%s", husb->hdev->name);
- for (i = 0; i < 4; i++) {
+ for (i=0; i < 4; i++) {
struct _urb *_urb;
struct urb *urb;
urb = &_urb->urb;
BT_DBG("%s unlinking _urb %p type %d urb %p",
husb->hdev->name, _urb, _urb->type, urb);
- usb_kill_urb(urb);
+ usb_unlink_urb(urb);
+ hci_usb_wait_for_urb(urb);
_urb_queue_tail(__completed_q(husb, _urb->type), _urb);
}
}
file->private_data = hci_vhci;
- return nonseekable_open(inode, file);
+ return 0;
}
static int hci_vhci_chr_close(struct inode *inode, struct file *file)
Werner Zimmermann, August 8, 1995
V1.70 Multisession support now is completed, but there is still not
enough testing done. If you can test it, please contact me. For
- details please read Documentation/cdrom/aztcd
+ details please read /usr/src/linux/Documentation/cdrom/aztcd
Werner Zimmermann, August 19, 1995
V1.80 Modification to suit the new kernel boot procedure introduced
with kernel 1.3.33. Will definitely not work with older kernels.
if (!cdrom_is_mrw(cdi, &mrw_write))
mrw = 1;
- if (CDROM_CAN(CDC_MO_DRIVE))
- ram_write = 1;
- else
- (void) cdrom_is_random_writable(cdi, &ram_write);
-
+ (void) cdrom_is_random_writable(cdi, &ram_write);
+
if (mrw)
cdi->mask &= ~CDC_MRW;
else
else if (CDROM_CAN(CDC_DVD_RAM))
ret = cdrom_dvdram_open_write(cdi);
else if (CDROM_CAN(CDC_RAM) &&
- !CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW|CDC_MO_DRIVE))
+ !CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW))
ret = cdrom_ram_open_write(cdi);
else if (CDROM_CAN(CDC_MO_DRIVE))
ret = mo_open_write(cdi);
goto err;
if (fp->f_mode & FMODE_WRITE) {
ret = -EROFS;
- if (cdrom_open_write(cdi))
- goto err;
if (!CDROM_CAN(CDC_RAM))
goto err;
+ if (cdrom_open_write(cdi))
+ goto err;
ret = 0;
}
}
struct packet_command cgc;
int nr, ret;
- cdi->last_sense = 0;
-
memset(&cgc, 0, sizeof(cgc));
/*
if (!q)
return -ENXIO;
- cdi->last_sense = 0;
-
while (nframes) {
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
rq->timeout = 60 * HZ;
bio = rq->bio;
- if (rq->bio)
- blk_queue_bounce(q, &rq->bio);
-
if (blk_execute_rq(q, cdi->disk, rq)) {
struct request_sense *s = rq->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
- if (blk_rq_unmap_user(rq, bio, len))
+ if (blk_rq_unmap_user(rq, ubuf, bio, len))
ret = -EFAULT;
if (ret)
nframes -= nr;
lba += nr;
- ubuf += len;
}
return ret;
* these days. ATAPI / SCSI specific code now mainly resides in
* mmc_ioct().
*/
-int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
- struct inode *ip, unsigned int cmd, unsigned long arg)
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct inode *ip,
+ unsigned int cmd, unsigned long arg)
{
struct cdrom_device_ops *cdo = cdi->ops;
int ret;
/* Try the generic SCSI command ioctl's first.. */
- ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, (void __user *)arg);
+ ret = scsi_cmd_ioctl(ip->i_bdev->bd_disk, cmd, (void __user *)arg);
if (ret != -ENOTTY)
return ret;
} cdrom_sysctl_settings;
int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int pos;
struct cdrom_device_info *cdi;
char *info = cdrom_sysctl_settings.info;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
strcpy(info+pos,"\n\n");
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, filp, buffer, lenp);
}
/* Unfortunately, per device settings are not implemented through
}
static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val) {
static int scd_block_ioctl(struct inode *inode, struct file *file,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, &scd_info, inode, cmd, arg);
+ return cdrom_ioctl(&scd_info, inode, cmd, arg);
}
static int scd_block_media_changed(struct gendisk *disk)
static int cm206_block_ioctl(struct inode *inode, struct file *file,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, &cm206_info, inode, cmd, arg);
+ return cdrom_ioctl(&cm206_info, inode, cmd, arg);
}
static int cm206_block_media_changed(struct gendisk *disk)
static int mcd_block_ioctl(struct inode *inode, struct file *file,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, &mcd_info, inode, cmd, arg);
+ return cdrom_ioctl(&mcd_info, inode, cmd, arg);
}
static int mcd_block_media_changed(struct gendisk *disk)
unsigned cmd, unsigned long arg)
{
struct s_drive_stuff *p = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &p->info, inode, cmd, arg);
+ return cdrom_ioctl(&p->info, inode, cmd, arg);
}
static int mcdx_block_media_changed(struct gendisk *disk)
#endif /* MULTISESSION */
if (disk_info.multi)
printk(KERN_WARNING "optcd: Multisession support experimental, "
- "see Documentation/cdrom/optcd\n");
+ "see linux/Documentation/cdrom/optcd\n");
DEBUG((DEBUG_TOC, "exiting update_toc"));
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
static DECLARE_WAIT_QUEUE_HEAD(sbp_waitq);
#endif /* FUTURE */
u_char TocEnt_number;
u_char TocEnt_format; /* em */
u_int TocEnt_address;
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
char has_data;
#endif /* SAFE_MIXED */
u_char ored_ctl_adr; /* to detect if CDROM contains data tracks */
return (0);
}
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
static int cc_SubChanInfo(int frame, int count, u_char *buffer)
/* "frame" is a RED BOOK (msf-bin) address */
{
return (0);
}
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
/*
* obtain if requested service disturbs current audio state
*/
/*==========================================================================*/
-#ifdef FUTURE
+#if FUTURE
/*
* called always if driver gets entered
* returns 0 or ERROR2 or ERROR15
case CDROMREADMODE1:
msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
cc_ModeSelect(CD_FRAMESIZE);
case CDROMREADMODE2: /* not usable at the moment */
msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
cc_ModeSelect(CD_FRAMESIZE_RAW1);
if (famL_drive) RETURN_UP(-EINVAL);
if (famV_drive) RETURN_UP(-EINVAL);
if (famT_drive) RETURN_UP(-EINVAL);
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
- if (copy_from_user(&read_audio, (void __user *)arg,
+ if (copy_from_user(&read_audio, (void *)arg,
sizeof(struct cdrom_read_audio)))
RETURN_UP(-EFAULT);
if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
continue;
}
- if (copy_to_user(read_audio.buf,
- current_drive->aud_buf,
+ if (copy_to_user((u_char *)read_audio.buf,
+ (u_char *) current_drive->aud_buf,
read_audio.nframes * CD_FRAMESIZE_RAW))
RETURN_UP(-EFAULT);
msg(DBG_AUD,"read_audio: copy_to_user done.\n");
case CDROMPLAYMSF:
msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->audio_state==audio_playing)
case CDROMPLAYTRKIND: /* Play a track. This currently ignores index. */
msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->audio_state==audio_playing)
case CDROMSTOP: /* Spin down the drive */
msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
i=cc_Pause_Resume(1);
goto request_loop;
}
-#ifdef FUTURE
+#if FUTURE
i=prepare(0,0); /* at moment not really a hassle check, but ... */
if (i!=0)
msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i);
sbp_sleep(0);
if (sbp_data(req) != 0)
{
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
current_drive->has_data=2; /* is really a data disk */
#endif /* SAFE_MIXED */
#ifdef DEBUG_GTL
unsigned cmd, unsigned long arg)
{
struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg);
+ return cdrom_ioctl(p->sbpcd_infop, inode, cmd, arg);
}
static int sbpcd_block_media_changed(struct gendisk *disk)
if ((current_drive->ored_ctl_adr&0x40)==0)
{
msg(DBG_INF,"CD contains no data tracks.\n");
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
current_drive->has_data=0;
#endif /* SAFE_MIXED */
}
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
else if (current_drive->has_data<1) current_drive->has_data=1;
#endif /* SAFE_MIXED */
}
if (p->f_eject) cc_SpinDown();
p->diskstate_flags &= ~cd_size_bit;
p->open_count=0;
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
p->has_data=0;
#endif /* SAFE_MIXED */
}
if (port_index>0)
{
- msg(DBG_INF, "You should read Documentation/cdrom/sbpcd\n");
+ msg(DBG_INF, "You should read linux/Documentation/cdrom/sbpcd\n");
msg(DBG_INF, "and then configure sbpcd.h for your hardware.\n");
}
check_datarate();
if (p->drv_id==-1) continue;
switch_drive(p);
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
p->has_data=0;
#endif /* SAFE_MIXED */
/*
current_drive->diskstate_flags &= ~toc_bit;
/* we *don't* need invalidate here, it's done by caller */
current_drive->diskstate_flags &= ~cd_size_bit;
-#ifdef SAFE_MIXED
+#if SAFE_MIXED
current_drive->has_data=0;
#endif /* SAFE_MIXED */
/*
* Attention! This file contains user-serviceable parts!
* I recommend to make use of it...
- * If you feel helpless, look into Documentation/cdrom/sbpcd
+ * If you feel helpless, look into linux/Documentation/cdrom/sbpcd
* (good idea anyway, at least before mailing me).
*
* The definitions for the first controller can get overridden by
unsigned cmd, unsigned long arg)
{
struct disk_info *di = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &di->viocd_info, inode, cmd, arg);
+ return cdrom_ioctl(&di->viocd_info, inode, cmd, arg);
}
static int viocd_blk_media_changed(struct gendisk *disk)
config ISI
tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
- depends on SERIAL_NONSTANDARD && PCI && EXPERIMENTAL && BROKEN_ON_SMP && m
+ depends on SERIAL_NONSTANDARD && EXPERIMENTAL && BROKEN_ON_SMP && m
help
This is a driver for the Multi-Tech cards which provide several
serial ports. The driver is experimental and can currently only be
config SYNCLINK
tristate "Microgate SyncLink card support"
- depends on SERIAL_NONSTANDARD && PCI
+ depends on SERIAL_NONSTANDARD
help
Provides support for the SyncLink ISA and PCI multiprotocol serial
adapters. These adapters support asynchronous and HDLC bit
If you have an Alchemy AU1000 processor (MIPS based) and you want
to use serial ports, say Y. Otherwise, say N.
+config SGI_L1_SERIAL
+ bool "SGI Altix L1 serial support"
+ depends on SERIAL_NONSTANDARD && IA64 && DISCONTIGMEM
+ help
+ If you have an SGI Altix and you want to use the serial port
+ connected to the system controller (you want this!), say Y.
+ Otherwise, say N.
+
+config SGI_L1_SERIAL_CONSOLE
+ bool "SGI Altix L1 serial console support"
+ depends on SGI_L1_SERIAL
+ help
+ If you have an SGI Altix and you would like to use the system
+ controller serial port as your console (you want this!),
+ say Y. Otherwise, say N.
+
config AU1000_SERIAL_CONSOLE
bool "Enable Au1000 serial console"
depends on AU1000_UART
console. This driver allows each pSeries partition to have a console
which is accessed via the HMC.
-config HVCS
- tristate "IBM Hypervisor Virtual Console Server support"
- depends on PPC_PSERIES
- help
- Partitionable IBM Power5 ppc64 machines allow hosting of
- firmware virtual consoles from one Linux partition by
- another Linux partition. This driver allows console data
- from Linux partitions to be accessed through TTY device
- interfaces in the device tree of a Linux partition running
- this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called hvcs.ko. Additionally, this module
- will depend on arch specific APIs exported from hvcserver.ko
- which will also be compiled when this driver is built as a
- module.
-
config QIC02_TAPE
tristate "QIC-02 tape support"
help
config APPLICOM
tristate "Applicom intelligent fieldbus card support"
- depends on PCI
---help---
This driver provides the kernel-side support for the intelligent
fieldbus cards made by Applicom International. More information
config FTAPE
tristate "Ftape (QIC-80/Travan) support"
- depends on BROKEN_ON_SMP && (ALPHA || X86)
+ depends on BROKEN_ON_SMP
---help---
If you have a tape drive that is connected to your floppy
controller, say Y here.
is assumed the platform called hpet_alloc with the RTC IRQ values for
the HPET timers.
-config HPET_MMAP
- bool "Allow mmap of HPET"
- default y
+config HPET_NOMMAP
+ bool "HPET - Control mmap capability."
+ default n
depends on HPET
help
- If you say Y here, user applications will be able to mmap
- the HPET registers.
-
- In some hardware implementations, the page containing HPET
- registers may also contain other things that shouldn't be
- exposed to the user. If this applies to your hardware,
- say N here.
+ If you say Y here, then the mmap interface for the HPET driver returns ENOSYS.
+ Some hardware implementations might not want all the memory in the page the
+ HPET control registers reside to be exposed.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-8192)"
obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
+obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o
obj-$(CONFIG_VIOCONS) += viocons.o
obj-$(CONFIG_VIOTAPE) += viotape.o
-obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_TIPAR) += tipar.o
../net/scc.c
A subset of the documentation is in
- Documentation/networking/z8530drv.txt
+ ../../Documentation/networking/z8530drv.txt
This option gives you AGP support for the GLX component of XFree86 4.x
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
- 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+ 852GM, 855GM and 865G integrated graphics chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI, or if you have any Intel integrated graphics
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
- /* VIA K8T890 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
- .device = PCI_DEVICE_ID_VIA_3238_0,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- /* VIA K8T800/K8M800/K8N800 */
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_VIA,
- .device = PCI_DEVICE_ID_VIA_838X_1,
+ .device = PCI_DEVICE_ID_VIA_8380_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
-
/* NForce3 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
/*
- * HP zx1 AGPGART routines.
- *
- * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * HP AGPGART routines.
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Bjorn Helgaas <bjorn_helgaas@hp.com>
*/
#include <linux/acpi.h>
/*
* Intel(R) 855GM/852GM and 865G support added by David Dawes
* <dawes@tungstengraphics.com>.
- *
- * Intel(R) 915G support added by Alan Hourihane
- * <alanh@tungstengraphics.com>.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include "agp.h"
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
-/* intel 915G registers */
-#define I915_GMADDR 0x18
-#define I915_MMADDR 0x10
-#define I915_PTEADDR 0x1C
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-
-
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
return;
}
-/* Exists to support ARGB cursors */
-static void *i8xx_alloc_pages(void)
-{
- struct page * page;
-
- page = alloc_pages(GFP_KERNEL, 2);
- if (page == NULL) {
- return 0;
- }
- if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
- __free_page(page);
- return 0;
- }
- get_page(page);
- SetPageLocked(page);
- atomic_inc(&agp_bridge->current_memory_agp);
- return page_address(page);
-}
-
-static void i8xx_destroy_pages(void *addr)
-{
- struct page *page;
-
- if (addr == NULL)
- return;
-
- page = virt_to_page(addr);
- change_page_attr(page, 4, PAGE_KERNEL);
- put_page(page);
- unlock_page(page);
- free_pages((unsigned long)addr, 2);
- atomic_dec(&agp_bridge->current_memory_agp);
-}
-
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
struct agp_memory *new;
void *addr;
- if (pg_count != 1 && pg_count != 4)
+ if (pg_count != 1)
return NULL;
- switch (pg_count) {
- case 1: addr = agp_bridge->driver->agp_alloc_page();
- break;
- case 4:
- /* kludge to get 4 physical pages for ARGB cursor */
- addr = i8xx_alloc_pages();
- break;
- default:
- return NULL;
- }
-
+ addr = agp_bridge->driver->agp_alloc_page();
if (addr == NULL)
return NULL;
- new = agp_create_memory(pg_count);
+ new = agp_create_memory(1);
if (new == NULL)
return NULL;
- new->memory[0] = virt_to_phys(addr);
- if (pg_count == 4) {
- /* kludge to get 4 physical pages for ARGB cursor */
- new->memory[1] = new->memory[0] + PAGE_SIZE;
- new->memory[2] = new->memory[1] + PAGE_SIZE;
- new->memory[3] = new->memory[2] + PAGE_SIZE;
- }
- new->page_count = pg_count;
- new->num_scratch_pages = pg_count;
+ new->memory[0] = agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
+ new->page_count = 1;
+ new->num_scratch_pages = 1;
new->type = AGP_PHYS_MEMORY;
new->physical = new->memory[0];
return new;
{
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
- if (curr->page_count == 4)
- i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
- else
- agp_bridge->driver->agp_destroy_page(
- phys_to_virt(curr->memory[0]));
+ agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[0]));
vfree(curr->memory);
}
kfree(curr);
{
{128, 32768, 5},
/* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
- {256, 65536, 6},
+ {64, 16384, 5}
};
static struct _intel_i830_private {
struct pci_dev *i830_dev; /* device one */
volatile u8 *registers;
- volatile u32 *gtt; /* I915G */
int gtt_entries;
} intel_i830_private;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- int size;
pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
- /* We obtain the size of the GTT, which is also stored (for some
- * reason) at the top of stolen memory. Then we add 4KB to that
- * for the video BIOS popup, which is also stored in there. */
- size = agp_bridge->driver->fetch_size() + 4;
-
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
+ gtt_entries = KB(512) - KB(132);
break;
case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
+ gtt_entries = MB(1) - KB(132);
break;
case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
+ gtt_entries = MB(8) - KB(132);
break;
case I830_GMCH_GMS_LOCAL:
rdct = INREG8(intel_i830_private.registers,
} else {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
+ gtt_entries = MB(1) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
+ gtt_entries = MB(4) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
+ gtt_entries = MB(8) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
+ gtt_entries = MB(16) - KB(132);
break;
case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
+ gtt_entries = MB(32) - KB(132);
break;
- case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
default:
gtt_entries = 0;
break;
agp_bridge->aperture_size_idx = 0;
return(values[0].size);
} else {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
agp_bridge->aperture_size_idx = 1;
return(values[1].size);
}
return(NULL);
}
-static int intel_i915_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
-
- OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED);
- global_cache_flush();
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
- OUTREG32(intel_i830_private.gtt, i, agp_bridge->scratch_page);
- }
-
- return (0);
-}
-
-static void intel_i915_cleanup(void)
-{
- iounmap((void *) intel_i830_private.gtt);
- iounmap((void *) intel_i830_private.registers);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
- int type)
-{
- int i,j,num_entries;
- void *temp;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_i830_private.gtt_entries) {
- printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
- pg_start,intel_i830_private.gtt_entries);
-
- printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
- return (-EINVAL);
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- return (-EINVAL);
-
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
- if ((type != 0 && type != AGP_PHYS_MEMORY) ||
- (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
- return (-EINVAL);
-
- global_cache_flush();
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
- OUTREG32(intel_i830_private.gtt, j, agp_bridge->driver->mask_memory(mem->memory[i], mem->type));
-
- global_cache_flush();
-
- agp_bridge->driver->tlb_flush(mem);
-
- return(0);
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
- int type)
-{
- int i;
-
- global_cache_flush();
-
- if (pg_start < intel_i830_private.gtt_entries) {
- printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
- return (-EINVAL);
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- OUTREG32(intel_i830_private.gtt, i, agp_bridge->scratch_page);
-
- global_cache_flush();
-
- agp_bridge->driver->tlb_flush(mem);
-
- return (0);
-}
-
-static int intel_i915_fetch_size(void)
-{
- struct aper_size_info_fixed *values;
- u32 temp, offset = 0;
-
-#define I915_256MB_ADDRESS_MASK (1<<27)
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
- if (temp & I915_256MB_ADDRESS_MASK)
- offset = 0; /* 128MB aperture */
- else
- offset = 2; /* 256MB aperture */
- agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
- return(values[offset].size);
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(void)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = 0;
-
- pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_i830_private.i830_dev, I915_PTEADDR,&temp2);
-
- intel_i830_private.gtt = (volatile u32 *) ioremap(temp2, 256 * 1024);
- if (!intel_i830_private.gtt)
- return (-ENOMEM);
-
- temp &= 0xfff80000;
-
- intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096);
- if (!intel_i830_private.registers)
- return (-ENOMEM);
-
- temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush();
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return(0);
-}
-
static int intel_fetch_size(void)
{
int i;
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 3,
+ .num_aperture_sizes = 2,
.needs_scratch_page = TRUE,
.configure = intel_i830_configure,
.fetch_size = intel_i830_fetch_size,
.agp_destroy_page = agp_generic_destroy_page,
};
-static struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 3,
- .needs_scratch_page = TRUE,
- .configure = intel_i915_configure,
- .fetch_size = intel_i915_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_destroy_page = agp_generic_destroy_page,
-};
-
-
static struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
bridge->driver = &intel_845_driver;
name = "i875";
break;
- case PCI_DEVICE_ID_INTEL_82915G_HB:
- if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
- bridge->driver = &intel_915_driver;
- } else {
- bridge->driver = &intel_845_driver;
- }
- name = "915G";
- break;
case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver;
- name = "E7505";
+ name = "E7505";
break;
case PCI_DEVICE_ID_INTEL_7205_0:
bridge->driver = &intel_7505_driver;
intel_845_configure();
else if (bridge->driver == &intel_830mp_driver)
intel_830mp_configure();
- else if (bridge->driver == &intel_915_driver)
- intel_i915_configure();
return 0;
}
static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
{
- {
- .device_id = PCI_DEVICE_ID_SI_5591_AGP,
- .chipset_name = "5591",
- },
{
.device_id = PCI_DEVICE_ID_SI_530,
.chipset_name = "530",
.device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
.chipset_name = "PM800/PN800/PM880/PN880",
},
- /* KT880 */
- {
- .device_id = PCI_DEVICE_ID_VIA_3269_0,
- .chipset_name = "KT880",
- },
- /* KTxxx/Px8xx */
- {
- .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
- .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
- },
- /* P4M800 */
- {
- .device_id = PCI_DEVICE_ID_VIA_3296_0,
- .chipset_name = "P4M800",
- },
{ }, /* dummy final entry, always present */
};
ID(PCI_DEVICE_ID_VIA_8378_0),
ID(PCI_DEVICE_ID_VIA_PT880),
ID(PCI_DEVICE_ID_VIA_8783_0),
- ID(PCI_DEVICE_ID_VIA_PX8X0_0),
- ID(PCI_DEVICE_ID_VIA_3269_0),
- ID(PCI_DEVICE_ID_VIA_83_87XX_1),
- ID(PCI_DEVICE_ID_VIA_3296_0),
+ ID(PCI_DEVICE_ID_VIA_PX8X0_0),
{ }
};
int version_minor; /**< Minor version */
int version_patchlevel;/**< Patch level */
size_t name_len; /**< Length of name buffer */
- char __user *name; /**< Name of driver */
+ char *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
- char __user *date; /**< User-space buffer to hold date */
+ char *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
- char __user *desc; /**< User-space buffer to hold desc */
+ char *desc; /**< User-space buffer to hold desc */
} drm_version_t;
*/
typedef struct drm_unique {
size_t unique_len; /**< Length of unique */
- char __user *unique; /**< Unique name for driver instantiation */
+ char *unique; /**< Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /**< Length of user-space structures */
- drm_version_t __user *version;
+ drm_version_t *version;
} drm_list_t;
*/
typedef struct drm_buf_info {
int count; /**< Entries in list */
- drm_buf_desc_t __user *list;
+ drm_buf_desc_t *list;
} drm_buf_info_t;
*/
typedef struct drm_buf_free {
int count;
- int __user *list;
+ int *list;
} drm_buf_free_t;
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
- void __user *address; /**< Address of buffer */
+ void *address; /**< Address of buffer */
} drm_buf_pub_t;
*/
typedef struct drm_buf_map {
int count; /**< Length of the buffer list */
- void __user *virtual; /**< Mmap'd area in user-virtual */
- drm_buf_pub_t __user *list; /**< Buffer information */
+ void *virtual; /**< Mmap'd area in user-virtual */
+ drm_buf_pub_t *list; /**< Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
- int __user *send_indices; /**< List of handles to buffers */
- int __user *send_sizes; /**< Lengths of data to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
drm_dma_flags_t flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
- int __user *request_indices; /**< Buffer information */
- int __user *request_sizes;
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
int granted_count; /**< Number of buffers granted */
} drm_dma_t;
*/
typedef struct drm_ctx_res {
int count;
- drm_ctx_t __user *contexts;
+ drm_ctx_t *contexts;
} drm_ctx_res_t;
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig {
struct list_head head;
struct work_struct work;
/** \name VBLANK IRQ support */
/*@{*/
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
atomic_t vbl_received;
spinlock_t vbl_lock;
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
extern unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait);
-extern ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off);
+extern ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off);
/* Memory management support (drm_memory.h) */
extern void DRM(mem_init)(void);
extern void DRM(driver_irq_preinstall)( drm_device_t *dev );
extern void DRM(driver_irq_postinstall)( drm_device_t *dev );
extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
-#ifdef __HAVE_IRQ_BH
+#if __HAVE_IRQ_BH
extern void DRM(irq_immediate_bh)( void *dev );
#endif
#endif
struct proc_dir_entry *root,
struct proc_dir_entry *dev_root);
-#ifdef __HAVE_SG
+#if __HAVE_SG
/* Scatter Gather Support (drm_scatter.h) */
extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
extern int DRM(sg_alloc)(struct inode *inode, struct file *filp,
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
- if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info)))
+ if (copy_to_user((drm_agp_info_t *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
if (!dev->agp || !dev->agp->acquired || !drm_agp->enable)
return -EINVAL;
- if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
+ if (copy_from_user(&mode, (drm_agp_mode_t *)arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
- drm_agp_buffer_t __user *argp = (void __user *)arg;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, argp, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(alloc)(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
request.handle = entry->handle;
request.physical = memory->physical;
- if (copy_to_user(argp, &request, sizeof(request))) {
+ if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) {
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
DRM(free_agp)(memory, pages);
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!dev->agp || !dev->agp->acquired || !drm_agp->bind_memory)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_buffer_t __user *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
}
DRM_DEBUG("%u\n", auth.magic);
- if (copy_to_user((drm_auth_t __user *)arg, &auth, sizeof(auth)))
+ if (copy_to_user((drm_auth_t *)arg, &auth, sizeof(auth)))
return -EFAULT;
return 0;
}
drm_auth_t auth;
drm_file_t *file;
- if (copy_from_user(&auth, (drm_auth_t __user *)arg, sizeof(auth)))
+ if (copy_from_user(&auth, (drm_auth_t *)arg, sizeof(auth)))
return -EFAULT;
DRM_DEBUG("%u\n", auth.magic);
if ((file = DRM(find_file)(dev, auth.magic))) {
int order;
unsigned long tmp;
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
- ;
+ for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
- if (size & (size - 1))
+ if ( size & ~(1 << order) )
++order;
return order;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map;
- drm_map_t __user *argp = (void __user *)arg;
drm_map_list_t *list;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
if ( !map )
return -ENOMEM;
- if ( copy_from_user( map, argp, sizeof(*map) ) ) {
+ if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EFAULT;
}
list_add(&list->head, &dev->maplist->head);
up(&dev->struct_sem);
- if ( copy_to_user( argp, map, sizeof(*map) ) )
+ if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
return -EFAULT;
if ( map->type != _DRM_SHM ) {
- if ( copy_to_user( &argp->handle,
+ if ( copy_to_user( &((drm_map_t *)arg)->handle,
&map->offset,
sizeof(map->offset) ) )
return -EFAULT;
drm_map_t request;
int found_maps = 0;
- if (copy_from_user(&request, (drm_map_t __user *)arg,
+ if (copy_from_user(&request, (drm_map_t *)arg,
sizeof(request))) {
return -EFAULT;
}
int byte_count;
int i;
drm_buf_t **temp_buflist;
- drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, argp,
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
int page_count;
unsigned long *temp_pagelist;
drm_buf_t **temp_buflist;
- drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
count = request.count;
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
atomic_dec( &dev->buf_alloc );
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
- drm_buf_desc_t __user *argp = (void __user *)arg;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
count = request.count;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
- buf->filp = NULL;
+ buf->filp = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG;
{
drm_buf_desc_t request;
- if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
+ if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
- drm_buf_info_t __user *argp = (void __user *)arg;
int i;
int count;
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock( &dev->count_lock );
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request,
+ (drm_buf_info_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( request.count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) {
- drm_buf_desc_t __user *to = &request.list[count];
+ drm_buf_desc_t *to = &request.list[count];
drm_buf_entry_t *from = &dma->bufs[i];
drm_freelist_t *list = &dma->bufs[i].freelist;
if ( copy_to_user( &to->count,
}
request.count = count;
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_info_t *)arg,
+ &request,
+ sizeof(request) ) )
return -EFAULT;
return 0;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request,
- (drm_buf_desc_t __user *)arg,
+ (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request,
- (drm_buf_free_t __user *)arg,
+ (drm_buf_free_t *)arg,
sizeof(request) ) )
return -EFAULT;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
- drm_buf_map_t __user *argp = (void __user *)arg;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock( &dev->count_lock );
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request, (drm_buf_map_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
if ( request.count >= dma->buf_count ) {
retcode = (signed long)virtual;
goto done;
}
- request.virtual = (void __user *)virtual;
+ request.virtual = (void *)virtual;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
if ( copy_to_user( &request.list[i].idx,
request.count = dma->buf_count;
DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
- if ( copy_to_user( argp, &request, sizeof(request) ) )
+ if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
return retcode;
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
- if (copy_from_user(&request, argp, sizeof(request)))
+ if (copy_from_user(&request,
+ (drm_ctx_priv_map_t *)arg,
+ sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
up(&dev->struct_sem);
request.handle = map->handle;
- if (copy_to_user(argp, &request, sizeof(request)))
+ if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return 0;
}
struct list_head *list;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t __user *)arg,
+ (drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
unsigned int cmd, unsigned long arg )
{
drm_ctx_res_t res;
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
int i;
- if ( copy_from_user( &res, argp, sizeof(res) ) )
+ if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
return -EFAULT;
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
}
res.count = DRM_RESERVED_CONTEXTS;
- if ( copy_to_user( argp, &res, sizeof(res) ) )
+ if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
return -EFAULT;
return 0;
}
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_list_t * ctx_entry;
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
ctx.handle = DRM(ctxbitmap_next)( dev );
++dev->ctx_count;
up( &dev->ctxlist_sem );
- if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
+ if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
return 0;
}
int DRM(getctx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
return -EFAULT;
/* This is 0, because we don't handle any context flags */
ctx.flags = 0;
- if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
+ if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
return 0;
}
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
{
drm_control_t ctl;
- if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
+ if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
- if (copy_to_user((drm_draw_t __user *)arg, &draw, sizeof(draw)))
+ if (copy_to_user((drm_draw_t *)arg, &draw, sizeof(draw)))
return -EFAULT;
return 0;
}
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
#endif
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
#endif
int DRM(version)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
- drm_version_t __user *argp = (void __user *)arg;
drm_version_t version;
int len;
- if ( copy_from_user( &version, argp, sizeof(version) ) )
+ if ( copy_from_user( &version,
+ (drm_version_t *)arg,
+ sizeof(version) ) )
return -EFAULT;
#define DRM_COPY( name, value ) \
DRM_COPY( version.date, DRIVER_DATE );
DRM_COPY( version.desc, DRIVER_DESC );
- if ( copy_to_user( argp, &version, sizeof(version) ) )
+ if ( copy_to_user( (drm_version_t *)arg,
+ &version,
+ sizeof(version) ) )
return -EFAULT;
return 0;
}
++priv->lock_count;
- if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
+ if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
drm_device_t *dev = priv->dev;
drm_lock_t lock;
- if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
+ if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
* agent to request it then we should just be able to
* take it immediately and not eat the ioctl.
*/
- dev->lock.filp = NULL;
+ dev->lock.filp = 0;
{
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
unsigned int old, new, prev, ctx;
#if !__HAVE_DRIVER_FOPS_READ
/** No-op. */
-ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
+ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
{
return 0;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_unique_t __user *argp = (void __user *)arg;
drm_unique_t u;
- if (copy_from_user(&u, argp, sizeof(u)))
+ if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
return -EFAULT;
if (u.unique_len >= dev->unique_len) {
if (copy_to_user(u.unique, dev->unique, dev->unique_len))
return -EFAULT;
}
u.unique_len = dev->unique_len;
- if (copy_to_user(argp, &u, sizeof(u)))
+ if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u)))
return -EFAULT;
return 0;
}
if (dev->unique_len || dev->unique) return -EBUSY;
- if (copy_from_user(&u, (drm_unique_t __user *)arg, sizeof(u)))
- return -EFAULT;
+ if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) return -EFAULT;
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_map_t __user *argp = (void __user *)arg;
drm_map_t map;
drm_map_list_t *r_list = NULL;
struct list_head *list;
int idx;
int i;
- if (copy_from_user(&map, argp, sizeof(map)))
+ if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
return -EFAULT;
idx = map.offset;
map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem);
- if (copy_to_user(argp, &map, sizeof(map))) return -EFAULT;
+ if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;
return 0;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_client_t __user *argp = (void __user *)arg;
drm_client_t client;
drm_file_t *pt;
int idx;
int i;
- if (copy_from_user(&client, argp, sizeof(client)))
+ if (copy_from_user(&client, (drm_client_t *)arg, sizeof(client)))
return -EFAULT;
idx = client.idx;
down(&dev->struct_sem);
client.iocs = pt->ioctl_count;
up(&dev->struct_sem);
- if (copy_to_user((drm_client_t __user *)arg, &client, sizeof(client)))
+ if (copy_to_user((drm_client_t *)arg, &client, sizeof(client)))
return -EFAULT;
return 0;
}
up(&dev->struct_sem);
- if (copy_to_user((drm_stats_t __user *)arg, &stats, sizeof(stats)))
+ if (copy_to_user((drm_stats_t *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
drm_set_version_t sv;
drm_set_version_t retv;
int if_version;
- drm_set_version_t __user *argp = (void __user *)data;
- DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
+ DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv));
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = DRIVER_MAJOR;
retv.drm_dd_minor = DRIVER_MINOR;
- DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
+ DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv));
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_irq_busid_t __user *argp = (void __user *)arg;
drm_irq_busid_t p;
- if (copy_from_user(&p, argp, sizeof(p)))
+ if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
return -EFAULT;
if ((p.busnum >> 8) != dev->pci_domain ||
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
- if (copy_to_user(argp, &p, sizeof(p)))
+ if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
return -EFAULT;
return 0;
}
dev->dma->this_buffer = NULL;
#endif
-#ifdef __HAVE_IRQ_BH
+#if __HAVE_IRQ_BH
INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev);
#endif
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
drm_device_t *dev = priv->dev;
drm_control_t ctl;
- if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
+ if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
}
}
-#ifdef __HAVE_VBL_IRQ
+#if __HAVE_VBL_IRQ
/**
* Wait for VBLANK.
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_wait_vblank_t __user *argp = (void __user *)data;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret = 0;
if (!dev->irq)
return -EINVAL;
- DRM_COPY_FROM_USER_IOCTL( vblwait, argp, sizeof(vblwait) );
+ DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
+ sizeof(vblwait) );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
}
done:
- DRM_COPY_TO_USER_IOCTL( argp, vblwait, sizeof(vblwait) );
+ DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
+ sizeof(vblwait) );
return ret;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_scatter_gather_t __user *argp = (void __user *)arg;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages, i, j;
if ( dev->sg )
return -EINVAL;
- if ( copy_from_user( &request, argp, sizeof(request) ) )
+ if ( copy_from_user( &request,
+ (drm_scatter_gather_t *)arg,
+ sizeof(request) ) )
return -EFAULT;
entry = DRM(alloc)( sizeof(*entry), DRM_MEM_SGLISTS );
request.handle = entry->handle;
- if ( copy_to_user( argp, &request, sizeof(request) ) ) {
+ if ( copy_to_user( (drm_scatter_gather_t *)arg,
+ &request,
+ sizeof(request) ) ) {
DRM(sg_cleanup)( entry );
return -EFAULT;
}
drm_sg_mem_t *entry;
if ( copy_from_user( &request,
- (drm_scatter_gather_t __user *)arg,
+ (drm_scatter_gather_t *)arg,
sizeof(request) ) )
return -EFAULT;
struct drm_agp_mem *agpmem;
struct page *page;
-#ifdef __alpha__
+#if __alpha__
/*
* Adjust to a bus-relative address
*/
{
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
-#ifdef DRM_DMA_HISTOGRAM
+#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, (drm_ctx_res_t __user *)arg, sizeof(res)))
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
}
}
res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user((drm_ctx_res_t __user *)arg, &res, sizeof(res)))
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
idx = DRM(alloc_queue)(dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
if (idx < 0)
DRM_DEBUG("%d\n", ctx.handle);
ctx.handle = idx;
- if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
else
ctx.flags = 0;
- if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
{
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
unsigned long addr = -ENOMEM;
if (!map)
- return get_unmapped_area(NULL, hint, len, pgoff, flags);
+ return get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
if (map->type == _DRM_FRAME_BUFFER ||
map->type == _DRM_REGISTERS) {
#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);
#else
- addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
#endif
} else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {
unsigned long slack = SHMLBA - PAGE_SIZE;
- addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);
+ addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags, 0);
if (!(addr & ~PAGE_MASK)) {
unsigned long kvirt = (unsigned long) map->handle;
}
}
} else {
- addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
+ addr = get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
}
return addr;
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
-ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
+ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int DRM(resctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_ctx_res_t __user *argp = (void __user *)arg;
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, argp, sizeof(res)))
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
}
}
res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user(argp, &res, sizeof(res)))
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- drm_ctx_t __user *argp = (void __user *)arg;
- if (copy_from_user(&ctx, argp, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
- if (copy_to_user(argp, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
drm_queue_t *q;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
drm_queue_t *q;
- if (copy_from_user(&ctx, argp, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
ctx.flags = q->flags;
atomic_dec(&q->use_count);
- if (copy_to_user(argp, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
drm_queue_t *q;
drm_buf_t *buf;
- if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
- drm_dma_t __user *argp = (void __user *)arg;
drm_dma_t d;
- if (copy_from_user(&d, argp, sizeof(d)))
+ if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
return -EFAULT;
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
- if (copy_to_user(argp, &d, sizeof(d)))
+ if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
return retcode;
LOCK_TEST_WITH_RETURN( dev, filp );
- if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
+ if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
return -EFAULT;
switch ( init.func ) {
drm_device_t *dev = priv->dev;
drm_gamma_copy_t copy;
- if ( copy_from_user( ©, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
+ if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
return -EFAULT;
return gamma_do_copy_dma( dev, © );
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
- if (copy_from_user(&request, argp, sizeof(request)))
+ if (copy_from_user(&request,
+ (drm_ctx_priv_map_t *)arg,
+ sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
up(&dev->struct_sem);
request.handle = map->handle;
- if (copy_to_user(argp, &request, sizeof(request)))
+ if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return 0;
}
struct list_head *list;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t __user *)arg,
+ (drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
DRM_DEBUG("\n");
- if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
- int *ind;
- int err;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
remove_wait_queue(&q->write_queue, &entry);
}
- ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
- if (!ind)
- return -ENOMEM;
-
- if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
- err = -EFAULT;
- goto out;
- }
-
- err = -EINVAL;
for (i = 0; i < d->send_count; i++) {
- idx = ind[i];
+ idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
- ind[i], dma->buf_count - 1);
- goto out;
+ d->send_indices[i], dma->buf_count - 1);
+ return -EINVAL;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
- goto out;
+ return -EINVAL;
}
if (buf->list != DRM_LIST_NONE) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
- goto out;
}
- buf->used = ind[i];
+ buf->used = d->send_sizes[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
- ind[i], i);
- goto out;
+ d->send_indices[i], i);
+ return -EINVAL;
}
if (buf->waiting) {
+ atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
- ind[i], i);
- goto out;
+ d->send_indices[i], i);
+ return -EINVAL;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
atomic_dec(&q->use_count);
return 0;
-
-out:
- DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
- atomic_dec(&q->use_count);
- return err;
}
static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
/* Real error */
DRM_ERROR("mmap error\n");
retcode = (signed int)buf_priv->virtual;
- buf_priv->virtual = NULL;
+ buf_priv->virtual = 0;
}
up_write( ¤t->mm->mmap_sem );
up_write(¤t->mm->mmap_sem);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
+ buf_priv->virtual = 0;
return retcode;
}
{
/* Get v1.1 init data */
- if (copy_from_user(init, (drm_i810_pre12_init_t __user *)arg,
+ if (copy_from_user(init, (drm_i810_pre12_init_t *)arg,
sizeof(drm_i810_pre12_init_t))) {
return -EFAULT;
}
/* This is a v1.2 client, just get the v1.2 init data */
DRM_INFO("Using POST v1.2 init.\n");
- if (copy_from_user(init, (drm_i810_init_t __user *)arg,
+ if (copy_from_user(init, (drm_i810_init_t *)arg,
sizeof(drm_i810_init_t))) {
return -EFAULT;
}
int retcode = 0;
/* Get only the init func */
- if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
+ if (copy_from_user(&init, (void *)arg, sizeof(drm_i810_init_func_t)))
return -EFAULT;
switch(init.func) {
default:
case I810_INIT_DMA_1_4:
DRM_INFO("Using v1.4 init.\n");
- if (copy_from_user(&init, (drm_i810_init_t __user *)arg,
+ if (copy_from_user(&init, (drm_i810_init_t *)arg,
sizeof(drm_i810_init_t))) {
return -EFAULT;
}
dev_priv->sarea_priv;
drm_i810_vertex_t vertex;
- if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex)))
+ if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
drm_device_t *dev = priv->dev;
drm_i810_clear_t clear;
- if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear)))
+ if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
- if (copy_from_user(&d, (drm_i810_dma_t __user *)arg, sizeof(d)))
+ if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
- if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
+ if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];
dev_priv->sarea_priv;
drm_i810_mc_t mc;
- if (copy_from_user(&mc, (drm_i810_mc_t __user *)arg, sizeof(mc)))
+ if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
return -EFAULT;
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
- if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data)))
+ if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
return -EFAULT;
return 0;
}
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_private_t *dev_priv = dev->dev_private;
struct file_operations *old_fops;
- unsigned long virtual;
int retcode = 0;
if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
old_fops = filp->f_op;
filp->f_op = &i830_buffer_fops;
dev_priv->mmap_buffer = buf;
- virtual = do_mmap(filp, 0, buf->total, PROT_READ|PROT_WRITE,
- MAP_SHARED, buf->bus_address);
+ buf_priv->virtual = (void __user *)do_mmap(filp, 0, buf->total,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ buf->bus_address);
dev_priv->mmap_buffer = NULL;
filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
+ if (IS_ERR(buf_priv->virtual)) {
/* Real error */
DRM_ERROR("mmap error\n");
- retcode = virtual;
+ retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
}
up_write( ¤t->mm->mmap_sem );
}
int i830_dma_init(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_flush_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_dma_vertex(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_clear_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_swap_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_flip_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long __user arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i830_copybuf(struct inode *inode,
struct file *filp,
unsigned int cmd,
- unsigned long arg)
+ unsigned long __user arg)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg )
+ unsigned long __user arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg )
+ unsigned long __user arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg )
+ unsigned long __user arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
return -EINVAL;
}
- if (copy_from_user( &emit, (drm_i830_irq_emit_t __user *)arg, sizeof(emit) ))
+ if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) ))
return -EFAULT;
result = i830_emit_irq( dev );
return -EINVAL;
}
- if (copy_from_user( &irqwait, (drm_i830_irq_wait_t __user *)arg,
+ if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg,
sizeof(irqwait) ))
return -EFAULT;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t *)data, sizeof(init) );
switch ( init.func ) {
case MGA_INIT_DMA:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t __user *)data, sizeof(lock) );
+ DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
DRM_DEBUG( "%s%s%s\n",
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
int ret = 0;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
/* Please don't send us buffers.
*/
ret = mga_dma_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
return ret;
}
typedef struct drm_mga_getparam {
int param;
- void __user *value;
+ void *value;
} drm_mga_getparam_t;
#endif
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t __user *)data, sizeof(clear) );
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( vertex,
- (drm_mga_vertex_t __user *)data,
+ (drm_mga_vertex_t *)data,
sizeof(vertex) );
if(vertex.idx < 0 || vertex.idx > dma->buf_count) return DRM_ERR(EINVAL);
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( indices,
- (drm_mga_indices_t __user *)data,
+ (drm_mga_indices_t *)data,
sizeof(indices) );
if(indices.idx < 0 || indices.idx > dma->buf_count) return DRM_ERR(EINVAL);
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t __user *)data, sizeof(iload) );
+ DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) );
#if 0
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t __user *)data, sizeof(blit) );
+ DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t __user *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t *)data, sizeof(init) );
switch ( init.func ) {
case R128_INIT_CCE:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *)data, sizeof(stop) );
+ DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) );
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) );
/* Please don't send us buffers.
*/
ret = r128_cce_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) );
return ret;
}
R128_READ_PIXELS = 0x04
} func;
int n;
- int __user *x;
- int __user *y;
- unsigned int __user *buffer;
- unsigned char __user *mask;
+ int *x;
+ int *y;
+ unsigned int *buffer;
+ unsigned char *mask;
} drm_r128_depth_t;
typedef struct drm_r128_stipple {
- unsigned int __user *mask;
+ unsigned int *mask;
} drm_r128_stipple_t;
typedef struct drm_r128_indirect {
typedef struct drm_r128_getparam {
int param;
- void __user *value;
+ void *value;
} drm_r128_getparam_t;
#endif
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data,
sizeof(clear) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t *) data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t *) data,
sizeof(elts) );
DRM_DEBUG( "pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data,
sizeof(blit) );
DRM_DEBUG( "pid=%d index=%d\n", DRM_CURRENTPID, blit.idx );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data,
sizeof(depth) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data,
sizeof(stipple) );
if ( DRM_COPY_FROM_USER( &mask, stipple.mask,
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t *) data,
sizeof(indirect) );
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t *)data, sizeof(init) );
switch ( init.func ) {
case RADEON_INIT_CP:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t __user *)data, sizeof(stop) );
+ DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
if (!dev_priv->cp_running)
return 0;
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
/* Please don't send us buffers.
*/
ret = radeon_cp_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
return ret;
}
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
- drm_radeon_clear_rect_t __user *depth_boxes;
+ drm_radeon_clear_rect_t *depth_boxes;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
- drm_radeon_state_t __user *state;
+ drm_radeon_state_t *state;
int nr_prims;
- drm_radeon_prim_t __user *prim;
+ drm_radeon_prim_t *prim;
} drm_radeon_vertex2_t;
/* v1.3 - obsoletes drm_radeon_vertex2
*/
typedef struct drm_radeon_cmd_buffer {
int bufsz;
- char __user *buf;
+ char *buf;
int nbox;
- drm_clip_rect_t __user *boxes;
+ drm_clip_rect_t *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
- const void __user *data;
+ const void *data;
} drm_radeon_tex_image_t;
typedef struct drm_radeon_texture {
int format;
int width; /* Texture image coordinates */
int height;
- drm_radeon_tex_image_t __user *image;
+ drm_radeon_tex_image_t *image;
} drm_radeon_texture_t;
typedef struct drm_radeon_stipple {
- unsigned int __user *mask;
+ unsigned int *mask;
} drm_radeon_stipple_t;
typedef struct drm_radeon_indirect {
typedef struct drm_radeon_getparam {
int param;
- void __user *value;
+ void *value;
} drm_radeon_getparam_t;
/* 1.6: Set up a memory manager for regions of shared memory:
int region;
int alignment;
int size;
- int __user *region_offset; /* offset from start of fb or GART */
+ int *region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc_t;
typedef struct drm_radeon_mem_free {
/* 1.6: Userspace can request & wait on irq's:
*/
typedef struct drm_radeon_irq_emit {
- int __user *irq_seq;
+ int *irq_seq;
} drm_radeon_irq_emit_t;
typedef struct drm_radeon_irq_wait {
#define OUT_RING_USER_TABLE( tab, sz ) do { \
int _size = (sz); \
- int __user *_tab = (tab); \
+ int *_tab = (tab); \
\
if (write + _size > mask) { \
int i = (mask+1) - write; \
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data,
sizeof(emit) );
result = radeon_emit_irq( dev );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t __user*)data,
+ DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data,
sizeof(irqwait) );
return radeon_wait_irq( dev, irqwait.irq_seq );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data,
sizeof(alloc) );
heap = get_heap( dev_priv, alloc.region );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data,
sizeof(memfree) );
heap = get_heap( dev_priv, memfree.region );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data,
sizeof(initheap) );
heap = get_heap( dev_priv, initheap.region );
static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
- u32 __user *offset ) {
+ u32 *offset ) {
u32 off;
DRM_GET_USER_UNCHECKED( off, offset );
static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
int id,
- u32 __user *data ) {
+ u32 *data ) {
switch ( id ) {
case RADEON_EMIT_PP_MISC:
drm_file_t *filp_priv,
drm_radeon_cmd_buffer_t *cmdbuf,
unsigned int *cmdsz ) {
- u32 tmp[4];
- u32 __user *cmd = (u32 __user *)cmdbuf->buf;
+ u32 tmp[4], *cmd = ( u32* )cmdbuf->buf;
if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
DRM_ERROR( "Failed to copy data from user space\n" );
drm_buf_t *buf;
u32 format;
u32 *buffer;
- const u8 __user *data;
+ const u8 *data;
int size, dwords, tex_width, blit_width;
u32 height;
int i;
* update them for a multi-pass texture blit.
*/
height = image->height;
- data = (const u8 __user *)image->data;
+ data = (const u8 *)image->data;
size = height * blit_width;
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
- image->data = (const u8 __user *)image->data + size;
+ image->data = (const u8 *)image->data + size;
} while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
sizeof(clear) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t *)data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t *)data,
sizeof(elts) );
DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
+ DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
if ( tex.image == NULL ) {
DRM_ERROR( "null texture image!\n" );
}
if ( DRM_COPY_FROM_USER( &image,
- (drm_radeon_tex_image_t __user *)tex.image,
+ (drm_radeon_tex_image_t *)tex.image,
sizeof(image) ) )
return DRM_ERR(EFAULT);
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
sizeof(stipple) );
if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t *)data,
sizeof(indirect) );
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d discard=%d\n",
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
int start = header.scalars.offset;
int stride = header.scalars.stride;
RING_LOCALS;
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
int start = ((unsigned int)header.scalars.offset) + 0x100;
int stride = header.scalars.stride;
RING_LOCALS;
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.vectors.count;
- int __user *data = (int __user *)cmdbuf->buf;
+ int *data = (int *)cmdbuf->buf;
int start = header.vectors.offset;
int stride = header.vectors.stride;
RING_LOCALS;
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int cmdsz;
- int __user *cmd = (int __user *)cmdbuf->buf;
- int ret;
+ int *cmd = (int *)cmdbuf->buf, ret;
RING_LOCALS;
DRM_DEBUG("\n");
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_clip_rect_t box;
unsigned int cmdsz;
- int __user *cmd = (int __user *)cmdbuf->buf;
- int ret;
- drm_clip_rect_t __user *boxes = cmdbuf->boxes;
+ int *cmd = (int *)cmdbuf->buf, ret;
+ drm_clip_rect_t *boxes = cmdbuf->boxes;
int i = 0;
RING_LOCALS;
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data,
sizeof(cmdbuf) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
while ( cmdbuf.bufsz >= sizeof(header) ) {
- if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) {
+ if (DRM_GET_USER_UNCHECKED( header.i, (int *)cmdbuf.buf )) {
DRM_ERROR("__get_user %p\n", cmdbuf.buf);
return DRM_ERR(EFAULT);
}
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t __user *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t __user * )data,
+ DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t* )data,
sizeof( sp ) );
switch( sp.param ) {
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t fb;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t *)data, sizeof(fb));
if (dev_priv == NULL) {
dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t),
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t fb;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb));
block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0);
if (block) {
fb.free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
+ DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, fb, sizeof(fb));
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset);
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb));
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock)fb.free))
return DRM_ERR(EINVAL);
if (dev_priv->AGPHeap != NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t *)data, sizeof(agp));
dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t agp;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp));
block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0);
if (block) {
agp.free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));
+ DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, agp, sizeof(agp));
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp));
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock)agp.free))
return DRM_ERR(EINVAL);
}
static ssize_t
-ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
+ds1620_read(struct file *file, char *buf, size_t count, loff_t *ptr)
{
signed int cur_temp;
signed char cur_temp_degF;
+ /* Can't seek (pread) on this device */
+ if (ptr != &file->f_pos)
+ return -ESPIPE;
+
cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1;
/* convert to Fahrenheit, as per wdt.c */
ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct therm therm;
- union {
- struct therm __user *therm;
- int __user *i;
- } uarg;
int i;
- uarg.i = (int __user *)arg;
-
switch(cmd) {
case CMD_SET_THERMOSTATE:
case CMD_SET_THERMOSTATE2:
return -EPERM;
if (cmd == CMD_SET_THERMOSTATE) {
- if (get_user(therm.hi, uarg.i))
+ if (get_user(therm.hi, (int *)arg))
return -EFAULT;
therm.lo = therm.hi - 3;
} else {
- if (copy_from_user(&therm, uarg.therm, sizeof(therm)))
+ if (copy_from_user(&therm, (void *)arg, sizeof(therm)))
return -EFAULT;
}
therm.hi >>= 1;
if (cmd == CMD_GET_THERMOSTATE) {
- if (put_user(therm.hi, uarg.i))
+ if (put_user(therm.hi, (int *)arg))
return -EFAULT;
} else {
- if (copy_to_user(uarg.therm, &therm, sizeof(therm)))
+ if (copy_to_user((void *)arg, &therm, sizeof(therm)))
return -EFAULT;
}
break;
if (cmd == CMD_GET_TEMPERATURE)
i >>= 1;
- return put_user(i, uarg.i) ? -EFAULT : 0;
+ return put_user(i, (int *)arg) ? -EFAULT : 0;
case CMD_GET_STATUS:
i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3;
- return put_user(i, uarg.i) ? -EFAULT : 0;
+ return put_user(i, (int *)arg) ? -EFAULT : 0;
case CMD_GET_FAN:
i = netwinder_get_fan();
- return put_user(i, uarg.i) ? -EFAULT : 0;
+ return put_user(i, (int *)arg) ? -EFAULT : 0;
case CMD_SET_FAN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (get_user(i, uarg.i))
+ if (get_user(i, (int *)arg))
return -EFAULT;
netwinder_set_fan(i);
static struct file_operations ds1620_fops = {
.owner = THIS_MODULE,
- .open = nonseekable_open,
.read = ds1620_read,
.ioctl = ds1620_ioctl,
};
return ret;
#ifdef THERM_USE_PROC
- proc_therm_ds1620 = create_proc_entry("therm", 0, NULL);
+ proc_therm_ds1620 = create_proc_entry("therm", 0, 0);
if (proc_therm_ds1620)
proc_therm_ds1620->read_proc = proc_therm_ds1620_read;
else
}
case 2: /* 16 bit */
{
- const short *data;
+ short *data;
count /= 2;
- data = (const short *)buf;
+ data = (short*) buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.w[1], data+n++));
return 2*n;
}
case 4: /* 32 bit */
{
- const long *data;
+ long *data;
count /= 4;
- data = (const long *)buf;
+ data = (long*) buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.l, data+n++));
return 4*n;
char ch;
int i = 0, retries;
+ /* Can't seek (pread) on the DoubleTalk. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
TRACE_TEXT("(dtlk_read");
/* printk("DoubleTalk PC - dtlk_read()\n"); */
}
#endif
+ /* Can't seek (pwrite) on the DoubleTalk. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (iminor(file->f_dentry->d_inode) != DTLK_MINOR)
return -EINVAL;
{
TRACE_TEXT("(dtlk_open");
- nonseekable_open(inode, file);
switch (iminor(inode)) {
case DTLK_MINOR:
if (dtlk_busy)
return -EBUSY;
- return nonseekable_open(inode, file);
+ return 0;
default:
return -ENXIO;
#define ENABLE_PCI
#endif /* CONFIG_PCI */
-#define putUser(arg1, arg2) put_user(arg1, (unsigned long __user *)arg2)
-#define getUser(arg1, arg2) get_user(arg1, (unsigned __user *)arg2)
+#define putUser(arg1, arg2) put_user(arg1, (unsigned long *)arg2)
+#define getUser(arg1, arg2) get_user(arg1, (unsigned int *)arg2)
#ifdef ENABLE_PCI
#include <linux/pci.h>
void epca_setup(char *, int *);
void console_print(const char *);
-static int get_termio(struct tty_struct *, struct termio __user *);
+static int get_termio(struct tty_struct *, struct termio *);
static int pc_write(struct tty_struct *, int, const unsigned char *, int);
int pc_init(void);
if (bytesAvailable)
{ /* Begin bytesAvailable */
- /* ---------------------------------------------------------------
- The below function reads data from user memory. This routine
- can not be used in an interrupt routine. (Because it may
- generate a page fault) It can only be called while we can the
- user context is accessible.
-
- The prototype is :
- inline void copy_from_user(void * to, const void * from,
- unsigned long count);
-
- I also think (Check hackers guide) that optimization must
- be turned ON. (Which sounds strange to me...)
-
- Remember copy_from_user WILL generate a page fault if the
- user memory being accessed has been swapped out. This can
- cause this routine to temporarily sleep while this page
- fault is occurring.
-
- ----------------------------------------------------------------- */
- if (copy_from_user(ch->tmp_buf, buf,
- bytesAvailable))
- return -EFAULT;
+ /* Can the user buffer be accessed at the moment ? */
+ if (verify_area(VERIFY_READ, (char*)buf, bytesAvailable))
+ bytesAvailable = 0; /* Can't do; try again later */
+ else /* Evidently it can, began transmission */
+ { /* Begin if area verified */
+ /* ---------------------------------------------------------------
+ The below function reads data from user memory. This routine
+ can not be used in an interrupt routine. (Because it may
+ generate a page fault) It can only be called while we can the
+ user context is accessible.
+
+ The prototype is :
+ inline void copy_from_user(void * to, const void * from,
+ unsigned long count);
+
+ I also think (Check hackers guide) that optimization must
+ be turned ON. (Which sounds strange to me...)
+
+ Remember copy_from_user WILL generate a page fault if the
+ user memory being accessed has been swapped out. This can
+ cause this routine to temporarily sleep while this page
+ fault is occurring.
+
+ ----------------------------------------------------------------- */
+
+ if (copy_from_user(ch->tmp_buf, buf,
+ bytesAvailable))
+ return -EFAULT;
+
+ } /* End if area verified */
+
} /* End bytesAvailable */
/* ------------------------------------------------------------------
ch->boardnum = crd;
ch->channelnum = i;
ch->magic = EPCA_MAGIC;
- ch->tty = NULL;
+ ch->tty = 0;
if (shrinkmem)
{
{ /* Begin receive_data */
unchar *rptr;
- struct termios *ts = NULL;
+ struct termios *ts = 0;
struct tty_struct *tty;
volatile struct board_chan *bc;
register int dataToRead, wrapgap, bytesAvailable;
static int info_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
+ int error;
+
switch (cmd)
{ /* Begin switch cmd */
struct digi_info di ;
int brd;
- getUser(brd, (unsigned int __user *)arg);
+ getUser(brd, (unsigned int *)arg);
+
+ if ((error = verify_area(VERIFY_WRITE, (char*)arg, sizeof(di))))
+ {
+ printk(KERN_ERR "DIGI_GETINFO : verify area size 0x%x failed\n",sizeof(di));
+ return(error);
+ }
if ((brd < 0) || (brd >= num_cards) || (num_cards == 0))
return (-ENODEV);
di.port = boards[brd].port ;
di.membase = boards[brd].membase ;
- if (copy_to_user((void __user *)arg, &di, sizeof (di)))
+ if (copy_to_user((char *)arg, &di, sizeof (di)))
return -EFAULT;
break;
epcaparam(tty,ch);
memoff(ch);
restore_flags(flags);
- return 0;
}
static int pc_ioctl(struct tty_struct *tty, struct file * file,
{ /* Begin pc_ioctl */
digiflow_t dflow;
- int retval;
+ int retval, error;
unsigned long flags;
unsigned int mflag, mstat;
unsigned char startc, stopc;
volatile struct board_chan *bc;
struct channel *ch = (struct channel *) tty->driver_data;
- void __user *argp = (void __user *)arg;
if (ch)
bc = ch->brdchan;
{ /* Begin switch cmd */
case TCGETS:
- if (copy_to_user(argp,
+ if (copy_to_user((struct termios *)arg,
tty->termios, sizeof(struct termios)))
return -EFAULT;
return(0);
case TCGETA:
- return get_termio(tty, argp);
+ return get_termio(tty, (struct termio *)arg);
case TCSBRK: /* SVID version: non-zero arg --> no break */
return 0;
case TIOCGSOFTCAR:
- if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
- return -EFAULT;
+
+ error = verify_area(VERIFY_WRITE, (void *) arg,sizeof(long));
+ if (error)
+ return error;
+
+ putUser(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long *) arg);
return 0;
case TIOCSSOFTCAR:
+ /*RONNIE PUT VERIFY_READ (See above) check here */
{
unsigned int value;
- if (get_user(value, (unsigned __user *)argp))
- return -EFAULT;
+ getUser(value, (unsigned int *)arg);
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(value ? CLOCAL : 0));
case TIOCMODG:
mflag = pc_tiocmget(tty, file);
- if (put_user(mflag, (unsigned long __user *)argp))
+ if (putUser(mflag, (unsigned int *) arg))
return -EFAULT;
break;
case TIOCMODS:
- if (get_user(mstat, (unsigned __user *)argp))
+ if (getUser(mstat, (unsigned int *)arg))
return -EFAULT;
return pc_tiocmset(tty, file, mstat, ~mstat);
break;
case DIGI_GETA:
- if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
+ if (copy_to_user((char*)arg, &ch->digiext,
+ sizeof(digi_t)))
return -EFAULT;
break;
/* Fall Thru */
case DIGI_SETA:
- if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
+ if (copy_from_user(&ch->digiext, (char*)arg,
+ sizeof(digi_t)))
return -EFAULT;
if (ch->digiext.digi_flags & DIGI_ALTPIN)
memoff(ch);
restore_flags(flags);
- if (copy_to_user(argp, &dflow, sizeof(dflow)))
+ if (copy_to_user((char*)arg, &dflow, sizeof(dflow)))
return -EFAULT;
break;
stopc = ch->stopca;
}
- if (copy_from_user(&dflow, argp, sizeof(dflow)))
+ if (copy_from_user(&dflow, (char*)arg, sizeof(dflow)))
return -EFAULT;
if (dflow.startc != startc || dflow.stopc != stopc)
/* --------------------- Begin get_termio ----------------------- */
-static int get_termio(struct tty_struct * tty, struct termio __user * termio)
+static int get_termio(struct tty_struct * tty, struct termio * termio)
{ /* Begin get_termio */
- return kernel_termios_to_user_termio(termio, tty->termios);
+ int error;
+
+ error = verify_area(VERIFY_WRITE, termio, sizeof (struct termio));
+ if (error)
+ return error;
+
+ kernel_termios_to_user_termio(termio, tty->termios);
+
+ return 0;
} /* End get_termio */
/* ---------------------- Begin epca_setup -------------------------- */
void epca_setup(char *str, int *ints)
else if (request_dma(dma, "esp serial")) {
free_pages((unsigned long)dma_buffer,
get_order(DMA_BUFFER_SZ));
- dma_buffer = NULL;
+ dma_buffer = 0;
info->stat_flags |= ESP_STAT_USE_PIO;
}
free_dma(dma);
free_pages((unsigned long)dma_buffer,
get_order(DMA_BUFFER_SZ));
- dma_buffer = NULL;
+ dma_buffer = 0;
}
}
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
+ info->xmit_buf = 0;
}
info->IER = 0;
*/
static int get_serial_info(struct esp_struct * info,
- struct serial_struct __user *retinfo)
+ struct serial_struct * retinfo)
{
struct serial_struct tmp;
+ if (!retinfo)
+ return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
tmp.line = info->line;
}
static int get_esp_config(struct esp_struct * info,
- struct hayes_esp_config __user *retinfo)
+ struct hayes_esp_config * retinfo)
{
struct hayes_esp_config tmp;
}
static int set_serial_info(struct esp_struct * info,
- struct serial_struct __user *new_info)
+ struct serial_struct * new_info)
{
struct serial_struct new_serial;
struct esp_struct old_info;
}
static int set_esp_config(struct esp_struct * info,
- struct hayes_esp_config __user * new_info)
+ struct hayes_esp_config * new_info)
{
struct hayes_esp_config new_config;
unsigned int change_dma;
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int get_lsr_info(struct esp_struct * info, unsigned int __user *value)
+static int get_lsr_info(struct esp_struct * info, unsigned int *value)
{
unsigned char status;
unsigned int result;
{
struct esp_struct * info = (struct esp_struct *)tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser; /* user space */
- void __user *argp = (void __user *)arg;
+ struct serial_icounter_struct *p_cuser; /* user space */
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
switch (cmd) {
case TIOCGSERIAL:
- return get_serial_info(info, argp);
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
- return set_serial_info(info, argp);
+ return set_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSERCONFIG:
/* do not reconfigure after initial configuration */
return 0;
case TIOCSERGWILD:
- return put_user(0L, (unsigned long __user *)argp);
+ return put_user(0L, (unsigned long *) arg);
case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, argp);
+ return get_lsr_info(info, (unsigned int *) arg);
case TIOCSERSWILD:
if (!capable(CAP_SYS_ADMIN))
cli();
cnow = info->icount;
sti();
- p_cuser = argp;
+ p_cuser = (struct serial_icounter_struct *) arg;
if (put_user(cnow.cts, &p_cuser->cts) ||
put_user(cnow.dsr, &p_cuser->dsr) ||
put_user(cnow.rng, &p_cuser->rng) ||
return 0;
case TIOCGHAYESESP:
- return get_esp_config(info, argp);
+ return (get_esp_config(info, (struct hayes_esp_config *)arg));
case TIOCSHAYESESP:
- return set_esp_config(info, argp);
+ return (set_esp_config(info, (struct hayes_esp_config *)arg));
default:
return -ENOIOCTLCMD;
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
info->event = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
}
int i, offset;
int region_start;
struct esp_struct * info;
- struct esp_struct *last_primary = NULL;
+ struct esp_struct *last_primary = 0;
int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380};
esp_driver = alloc_tty_driver(NR_PORTS);
/* compress a block of memory, decompress a block of memory, or to identify */
/* itself. For more information, see the specification file "compress.h". */
-EXPORT void lzrw3_compress(
- UWORD action, /* Action to be performed. */
- UBYTE *wrk_mem, /* Address of working memory we can use.*/
- UBYTE *src_adr, /* Address of input data. */
- LONG src_len, /* Length of input data. */
- UBYTE *dst_adr, /* Address to put output data. */
- void *p_dst_len /* Address of longword for length of output data.*/
-)
+EXPORT void lzrw3_compress(action,wrk_mem,src_adr,src_len,dst_adr,p_dst_len)
+UWORD action; /* Action to be performed. */
+UBYTE *wrk_mem; /* Address of working memory we can use. */
+UBYTE *src_adr; /* Address of input data. */
+LONG src_len; /* Length of input data. */
+UBYTE *dst_adr; /* Address to put output data. */
+void *p_dst_len; /* Address of longword for length of output data. */
{
switch (action)
{
(((40543*(((*(PTR))<<8)^((*((PTR)+1))<<4)^(*((PTR)+2))))>>4) & 0xFFF)
/******************************************************************************/
-
+
+LOCAL void compress_compress
+ (p_wrk_mem,p_src_first,src_len,p_dst_first,p_dst_len)
/* Input : Hand over the required amount of working memory in p_wrk_mem. */
/* Input : Specify input block using p_src_first and src_len. */
/* Input : Point p_dst_first to the start of the output zone (OZ). */
/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. May */
/* Output : write in OZ=Mem[p_dst_first..p_dst_first+src_len+MAX_CMP_GROUP-1].*/
/* Output : Upon completion guaranteed *p_dst_len<=src_len+FLAG_BYTES. */
-LOCAL void compress_compress(UBYTE *p_wrk_mem,
- UBYTE *p_src_first, ULONG src_len,
- UBYTE *p_dst_first, LONG *p_dst_len)
+UBYTE *p_wrk_mem;
+UBYTE *p_src_first;
+ULONG src_len;
+UBYTE *p_dst_first;
+LONG *p_dst_len;
{
/* p_src and p_dst step through the source and destination blocks. */
register UBYTE *p_src = p_src_first;
/* to the hash table entry corresponding to the second youngest literal. */
/* Note: p_h1=0=>p_h2=0 because zero values denote absence of a pending */
/* literal. The variables are initialized to zero meaning an empty "buffer". */
- UBYTE **p_h1=NULL;
- UBYTE **p_h2=NULL;
+ UBYTE **p_h1=0;
+ UBYTE **p_h2=0;
/* To start, we write the flag bytes. Being optimistic, we set the flag to */
/* FLAG_COMPRESS. The remaining flag bytes are zeroed so as to keep the */
/* upon the arrival of extra context bytes. */
if (p_h1!=0)
{
- if (p_h2)
- {*p_h2=p_ziv-2; p_h2=NULL;}
- *p_h1=p_ziv-1; p_h1=NULL;
+ if (p_h2!=0)
+ {*p_h2=p_ziv-2; p_h2=0;}
+ *p_h1=p_ziv-1; p_h1=0;
}
/* In any case, we can update the hash table based on the current */
/******************************************************************************/
+LOCAL void compress_decompress
+ (p_wrk_mem,p_src_first,src_len,p_dst_first,p_dst_len)
/* Input : Hand over the required amount of working memory in p_wrk_mem. */
/* Input : Specify input block using p_src_first and src_len. */
/* Input : Point p_dst_first to the start of the output zone. */
/* Output : Length of output block written to *p_dst_len. */
/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
/* Output : Writes only in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
-LOCAL void compress_decompress( UBYTE *p_wrk_mem,
- UBYTE *p_src_first, LONG src_len,
- UBYTE *p_dst_first, ULONG *p_dst_len)
+UBYTE *p_wrk_mem;
+UBYTE *p_src_first;
+LONG src_len;
+UBYTE *p_dst_first;
+ULONG *p_dst_len;
{
/* Byte pointers p_src and p_dst scan through the input and output blocks. */
register UBYTE *p_src = p_src_first+FLAG_BYTES;
/* forward */
static int zftc_write(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume);
static int zftc_read(int *read_cnt,
- __u8 __user *dst_buf, const int to_do,
+ __u8 *dst_buf, const int to_do,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume);
static int zftc_seek(unsigned int new_block_pos,
*/
static int zftc_write(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume)
{
int req_len_left = req_len;
* be set to 0
*/
static int zftc_read (int *read_cnt,
- __u8 __user *dst_buf, const int to_do,
+ __u8 *dst_buf, const int to_do,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume)
{
TRACE(ft_t_info, "ftape_init @ 0x%p", ftape_init);
/* Allocate the DMA buffers. They are deallocated at cleanup() time.
*/
-#ifdef TESTING
+#if TESTING
#ifdef MODULE
while (ftape_set_nr_buffers(CONFIG_FT_NR_BUFFERS) < 0) {
ftape_sleep(FT_SECOND/20);
ptr += get_history_info(ptr);
len = strlen(page);
- *start = NULL;
+ *start = 0;
if (off+count >= len) {
*eof = 1;
} else {
/* IOCTL routine called by kernel-interface code
*/
-int _zft_ioctl(unsigned int command, void __user * arg)
+int _zft_ioctl(unsigned int command, void * arg)
{
int result;
union { struct mtop mtop;
*/
extern int _zft_open(unsigned int dev_minor, unsigned int access_mode);
extern int _zft_close(void);
-extern int _zft_ioctl(unsigned int command, void __user *arg);
+extern int _zft_ioctl(unsigned int command, void *arg);
#endif
static int zft_ioctl(struct inode *ino, struct file *filep,
unsigned int command, unsigned long arg);
static int zft_mmap(struct file *filep, struct vm_area_struct *vma);
-static ssize_t zft_read (struct file *fp, char __user *buff,
+static ssize_t zft_read (struct file *fp, char *buff,
size_t req_len, loff_t *ppos);
-static ssize_t zft_write(struct file *fp, const char __user *buff,
+static ssize_t zft_write(struct file *fp, const char *buff,
size_t req_len, loff_t *ppos);
static struct file_operations zft_cdev =
int result;
TRACE_FUN(ft_t_flow);
- nonseekable_open(ino, filep);
TRACE(ft_t_flow, "called for minor %d", iminor(ino));
if ( test_and_set_bit(0,&busy_flag) ) {
TRACE_ABORT(-EBUSY, ft_t_warn, "failed: already busy");
old_sigmask = current->blocked; /* save mask */
sigfillset(¤t->blocked);
/* This will work as long as sizeof(void *) == sizeof(long) */
- result = _zft_ioctl(command, (void __user *) arg);
+ result = _zft_ioctl(command, (void *) arg);
current->blocked = old_sigmask; /* restore mask */
TRACE_EXIT result;
}
/* Read from floppy tape device
*/
-static ssize_t zft_read(struct file *fp, char __user *buff,
+static ssize_t zft_read(struct file *fp, char *buff,
size_t req_len, loff_t *ppos)
{
int result = -EIO;
/* Write to tape device
*/
-static ssize_t zft_write(struct file *fp, const char __user *buff,
+static ssize_t zft_write(struct file *fp, const char *buff,
size_t req_len, loff_t *ppos)
{
int result = -EIO;
struct zft_cmpr_ops {
int (*write)(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume);
int (*read)(int *read_cnt,
- __u8 __user *dst_buf, const int req_len,
+ __u8 *dst_buf, const int req_len,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume);
int (*seek)(unsigned int new_block_pos,
* amount of data actually * copied to the user-buffer
*/
static int zft_simple_read (int *read_cnt,
- __u8 __user *dst_buf,
+ __u8 *dst_buf,
const int to_do,
const __u8 *src_buf,
const int seg_sz,
* req_len: how much data should be read at most.
* volume: contains information on current volume (blk_sz etc.)
*/
-static int empty_deblock_buf(__u8 __user *usr_buf, const int req_len,
+static int empty_deblock_buf(__u8 *usr_buf, const int req_len,
const __u8 *src_buf, const int seg_sz,
zft_position *pos,
const zft_volinfo *volume)
* use small block-sizes. The block-size may be 1kb (SECTOR_SIZE). In
* this case a MTFSR 28 maybe still inside the same segment.
*/
-int _zft_read(char __user *buff, int req_len)
+int _zft_read(char* buff, int req_len)
{
int req_clipped;
int result = 0;
0, FT_SEGMENT_SIZE)
/* hook for the VFS interface
*/
-extern int _zft_read(char __user *buff, int req_len);
+extern int _zft_read(char* buff, int req_len);
#endif /* _ZFTAPE_READ_H */
*/
static int zft_simple_write(int *cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 __user *src_buf, const int req_len,
+ const __u8 *src_buf, const int req_len,
const zft_position *pos,const zft_volinfo *volume)
{
int space_left;
static int fill_deblock_buf(__u8 *dst_buf, const int seg_sz,
zft_position *pos, const zft_volinfo *volume,
- const char __user *usr_buf, const int req_len)
+ const char *usr_buf, const int req_len)
{
int cnt = 0;
int result = 0;
/* called by the kernel-interface routine "zft_write()"
*/
-int _zft_write(const char __user *buff, int req_len)
+int _zft_write(const char* buff, int req_len)
{
int result = 0;
int written = 0;
/* hook for the VFS interface
*/
-extern int _zft_write(const char __user *buff, int req_len);
+extern int _zft_write(const char *buff, int req_len);
#endif /* _ZFTAPE_WRITE_H */
#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__)
#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __FUNCTION__)
-#ifdef NEW_WRITE_LOCKING
+#if NEW_WRITE_LOCKING
#define DECL /* Nothing */
#define LOCKIT down (& port->port_write_sem);
#define RELEASEIT up (&port->port_write_sem);
if (port->xmit_buf) {
free_page((unsigned long) port->xmit_buf);
- port->xmit_buf = NULL;
+ port->xmit_buf = 0;
}
if (port->tty)
port->event = 0;
port->rd->close (port);
port->rd->shutdown_port (port);
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
}
-int gs_setserial(struct gs_port *port, struct serial_struct __user *sp)
+int gs_setserial(struct gs_port *port, struct serial_struct *sp)
{
struct serial_struct sio;
* Generate the serial struct info.
*/
-int gs_getserial(struct gs_port *port, struct serial_struct __user *sp)
+int gs_getserial(struct gs_port *port, struct serial_struct *sp)
{
struct serial_struct sio;
{
struct proc_dir_entry *r;
- r = create_proc_read_entry("driver/rtc", 0, NULL, gen_rtc_read_proc, NULL);
+ r = create_proc_read_entry("driver/rtc", 0, 0, gen_rtc_read_proc, NULL);
if (!r)
return -ENOMEM;
return 0;
static int hangcheck_tick = DEFAULT_IOFENCE_TICK;
static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN;
-static int hangcheck_reboot = 1; /* Defaults to reboot */
+static int hangcheck_reboot; /* Defaults to not reboot */
/* Driver options */
module_param(hangcheck_tick, int, 0);
/*
* Intel & MS High Precision Event Timer Implementation.
- *
- * Copyright (C) 2003 Intel Corporation
+ * Contributors:
* Venki Pallipadi
- * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
- * Bob Picco <robert.picco@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Bob Picco
*/
#include <linux/config.h>
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
-#ifdef CONFIG_HPET_MMAP
+#ifdef CONFIG_HPET_NOMMAP
+ return -ENOSYS;
+#else
struct hpet_dev *devp;
unsigned long addr;
}
return 0;
-#else
- return -ENOSYS;
#endif
}
hdp->hd_nirqs = irqp->number_of_interrupts;
for (i = 0; i < hdp->hd_nirqs; i++)
+#ifdef CONFIG_IA64
hdp->hd_irq[i] =
acpi_register_gsi(irqp->interrupts[i],
irqp->edge_level,
irqp->active_high_low);
+#else
+ hdp->hd_irq[i] = irqp->interrupts[i];
+#endif
}
}
+++ /dev/null
-/*
- * IBM eServer Hypervisor Virtual Console Server Device Driver
- * Copyright (C) 2003, 2004 IBM Corp.
- * Ryan S. Arnold (rsa@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
- *
- * This is the device driver for the IBM Hypervisor Virtual Console Server,
- * "hvcs". The IBM hvcs provides a tty driver interface to allow Linux
- * user space applications access to the system consoles of logically
- * partitioned operating systems, e.g. Linux, running on the same partitioned
- * Power5 ppc64 system. Physical hardware consoles per partition are not
- * practical on this hardware so system consoles are accessed by this driver
- * using inter-partition firmware interfaces to virtual terminal devices.
- *
- * A vty is known to the HMC as a "virtual serial server adapter". It is a
- * virtual terminal device that is created by firmware upon partition creation
- * to act as a partitioned OS's console device.
- *
- * Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64
- * Linux system upon their creation by the HMC or their exposure during boot.
- * The non-user interactive backend of this driver is implemented as a vio
- * device driver so that it can receive notification of vty-server lifetimes
- * after it registers with the vio bus to handle vty-server probe and remove
- * callbacks.
- *
- * Many vty-servers can be configured to connect to one vty, but a vty can
- * only be actively connected to by a single vty-server, in any manner, at one
- * time. If the HMC is currently hosting the console for a target Linux
- * partition; attempts to open the tty device to the partition's console using
- * the hvcs on any partition will return -EBUSY with every open attempt until
- * the HMC frees the connection between its vty-server and the desired
- * partition's vty device. Conversely, a vty-server may only be connected to
- * a single vty at one time even though it may have several configured vty
- * partner possibilities.
- *
- * Firmware does not provide notification of vty partner changes to this
- * driver. This means that an HMC Super Admin may add or remove partner vtys
- * from a vty-server's partner list but the changes will not be signaled to
- * the vty-server. Firmware only notifies the driver when a vty-server is
- * added or removed from the system. To compensate for this deficiency, this
- * driver implements a sysfs update attribute which provides a method for
- * rescanning partner information upon a user's request.
- *
- * Each vty-server, prior to being exposed to this driver is reference counted
- * using the 2.6 Linux kernel kobject construct. This kobject is also used by
- * the vio bus to provide a vio device sysfs entry that this driver attaches
- * device specific attributes to, including partner information. The vio bus
- * framework also provides a sysfs entry for each vio driver. The hvcs driver
- * provides driver attributes in this entry.
- *
- * For direction on installation and usage of this driver please reference
- * Documentation/powerpc/hvcs.txt.
- */
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/major.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <asm/hvconsole.h>
-#include <asm/hvcserver.h>
-#include <asm/uaccess.h>
-#include <asm/vio.h>
-
-/*
- * 1.0.0 -> 1.1.0 Added kernel_thread scheduling methodology to driver to
- * replace wait_task constructs.
- *
- * 1.1.0 -> 1.2.0 Moved pi_buff initialization out of arch code into driver code
- * and added locking to share this buffer between hvcs_struct instances. This
- * is because the page_size kmalloc can't be done with a spin_lock held.
- *
- * Also added sysfs attribute to manually disconnect the vty-server from the vty
- * due to stupid firmware behavior when opening the connection then sending data
- * then then quickly closing the connection would cause data loss on the
- * receiving side. This required some reordering of the termination code.
- *
- * Fixed the hangup scenario and fixed memory leaks on module_exit.
- *
- * 1.2.0 -> 1.3.0 Moved from manual kernel thread creation & execution to
- * kthread construct which replaced in-kernel IPC for thread termination with
- * kthread_stop and kthread_should_stop. Explicit wait_queue handling was
- * removed because kthread handles this. Minor bug fix to postpone partner_info
- * clearing on hvcs_close until adapter removal to preserve context data for
- * printk on partner connection free. Added lock to protect hvcs_structs so
- * that hvcs_struct instances aren't added or removed during list traversal.
- * Cleaned up comment style, added spaces after commas, and broke function
- * declaration lines to be under 80 columns.
- */
-#define HVCS_DRIVER_VERSION "1.3.0"
-
-MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
-MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HVCS_DRIVER_VERSION);
-
-/*
- * Since the Linux TTY code does not currently (2-04-2004) support dynamic
- * addition of tty derived devices and we shouldn't allocate thousands of
- * tty_device pointers when the number of vty-server & vty partner connections
- * will most often be much lower than this, we'll arbitrarily allocate
- * HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we
- * register the tty_driver. This can be overridden using an insmod parameter.
- */
-#define HVCS_DEFAULT_SERVER_ADAPTERS 64
-
-/*
- * The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device
- * nodes as a sanity check. Theoretically there can be over 1 Billion
- * vty-server & vty partner connections.
- */
-#define HVCS_MAX_SERVER_ADAPTERS 1024
-
-/*
- * We let Linux assign us a major number and we start the minors at zero. There
- * is no intuitive mapping between minor number and the target partition. The
- * mapping of minor number is related to the order the vty-servers are exposed
- * to this driver via the hvcs_probe function.
- */
-#define HVCS_MINOR_START 0
-
-/*
- * The hcall interface involves putting 8 chars into each of two registers.
- * We load up those 2 registers (in arch/ppc64/hvconsole.c) by casting char[16]
- * to long[2]. It would work without __ALIGNED__, but a little (tiny) bit
- * slower because an unaligned load is slower than aligned load.
- */
-#define __ALIGNED__ __attribute__((__aligned__(8)))
-
-/* Converged location code string length + 1 null terminator */
-#define CLC_LENGTH 80
-
-/*
- * How much data can firmware send with each hvc_put_chars()? Maybe this
- * should be moved into an architecture specific area.
- */
-#define HVCS_BUFF_LEN 16
-
-/*
- * This is the maximum amount of data we'll let the user send us (hvcs_write) at
- * once in a chunk as a sanity check.
- */
-#define HVCS_MAX_FROM_USER 4096
-
-/*
- * Be careful when adding flags to this line discipline. Don't add anything
- * that will cause echoing or we'll go into recursive loop echoing chars back
- * and forth with the console drivers.
- */
-static struct termios hvcs_tty_termios = {
- .c_iflag = IGNBRK | IGNPAR,
- .c_oflag = OPOST,
- .c_cflag = B38400 | CS8 | CREAD | HUPCL,
- .c_cc = INIT_C_CC
-};
-
-/*
- * This value is used to take the place of a command line parameter when the
- * module is inserted. It starts as -1 and stays as such if the user doesn't
- * specify a module insmod parameter. If they DO specify one then it is set to
- * the value of the integer passed in.
- */
-static int hvcs_parm_num_devs = -1;
-module_param(hvcs_parm_num_devs, int, 0);
-
-char hvcs_driver_name[] = "hvcs";
-char hvcs_device_node[] = "hvcs";
-char hvcs_driver_string[]
- = "IBM hvcs (Hypervisor Virtual Console Server) Driver";
-
-/* Status of partner info rescan triggered via sysfs. */
-static int hvcs_rescan_status = 0;
-
-static struct tty_driver *hvcs_tty_driver;
-
-/*
- * This is used to associate a vty-server, as it is exposed to this driver, with
- * a preallocated tty_struct.index. The dev node and hvcs index numbers are not
- * re-used after device removal otherwise removing and adding a new one would
- * link a /dev/hvcs* entry to a different vty-server than it did before the
- * removal. Incidentally, a newly exposed vty-server will always map to an
- * incrementally higher /dev/hvcs* entry than the last exposed vty-server.
- */
-static int hvcs_struct_count = -1;
-
-/*
- * Used by the khvcsd to pick up I/O operations when the kernel_thread is
- * already awake but potentially shifted to TASK_INTERRUPTIBLE state.
- */
-static int hvcs_kicked = 0;
-
-/* Used the the kthread construct for task operations */
-static struct task_struct *hvcs_task;
-
-/*
- * We allocate this for the use of all of the hvcs_structs when they fetch
- * partner info.
- */
-static unsigned long *hvcs_pi_buff;
-
-static spinlock_t hvcs_pi_lock;
-
-/* One vty-server per hvcs_struct */
-struct hvcs_struct {
- spinlock_t lock;
-
- /*
- * This index identifies this hvcs device as the complement to a
- * specific tty index.
- */
- unsigned int index;
-
- struct tty_struct *tty;
- unsigned int open_count;
-
- /*
- * Used to tell the driver kernel_thread what operations need to take
- * place upon this hvcs_struct instance.
- */
- int todo_mask;
-
- /*
- * This buffer is required so that when hvcs_write_room() reports that
- * it can send HVCS_BUFF_LEN characters that it will buffer the full
- * HVCS_BUFF_LEN characters if need be. This is essential for opost
- * writes since they do not do high level buffering and expect to be
- * able to send what the driver commits to sending buffering
- * [e.g. tab to space conversions in n_tty.c opost()].
- */
- char buffer[HVCS_BUFF_LEN];
- int chars_in_buffer;
-
- /*
- * Any variable below the kobject is valid before a tty is connected and
- * stays valid after the tty is disconnected. These shouldn't be
- * whacked until the koject refcount reaches zero though some entries
- * may be changed via sysfs initiatives.
- */
- struct kobject kobj; /* ref count & hvcs_struct lifetime */
- int connected; /* is the vty-server currently connected to a vty? */
- unsigned int p_unit_address; /* partner unit address */
- unsigned int p_partition_ID; /* partner partition ID */
- char p_location_code[CLC_LENGTH];
- struct list_head next; /* list management */
- struct vio_dev *vdev;
-};
-
-/* Required to back map a kobject to its containing object */
-#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj)
-
-static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
-static spinlock_t hvcs_structs_lock;
-
-static void hvcs_unthrottle(struct tty_struct *tty);
-static void hvcs_throttle(struct tty_struct *tty);
-static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance,
- struct pt_regs *regs);
-
-static int hvcs_write(struct tty_struct *tty, int from_user,
- const unsigned char *buf, int count);
-static int hvcs_write_room(struct tty_struct *tty);
-static int hvcs_chars_in_buffer(struct tty_struct *tty);
-
-static int hvcs_has_pi(struct hvcs_struct *hvcsd);
-static void hvcs_set_pi(struct hvcs_partner_info *pi,
- struct hvcs_struct *hvcsd);
-static int hvcs_get_pi(struct hvcs_struct *hvcsd);
-static int hvcs_rescan_devices_list(void);
-
-static int hvcs_partner_connect(struct hvcs_struct *hvcsd);
-static void hvcs_partner_free(struct hvcs_struct *hvcsd);
-
-static int hvcs_enable_device(struct hvcs_struct *hvcsd,
- uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
-static void hvcs_final_close(struct hvcs_struct *hvcsd);
-
-static void destroy_hvcs_struct(struct kobject *kobj);
-static int hvcs_open(struct tty_struct *tty, struct file *filp);
-static void hvcs_close(struct tty_struct *tty, struct file *filp);
-static void hvcs_hangup(struct tty_struct * tty);
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd);
-static void hvcs_remove_device_attrs(struct vio_dev *vdev);
-static void hvcs_create_driver_attrs(void);
-static void hvcs_remove_driver_attrs(void);
-
-static int __devinit hvcs_probe(struct vio_dev *dev,
- const struct vio_device_id *id);
-static int __devexit hvcs_remove(struct vio_dev *dev);
-static int __init hvcs_module_init(void);
-static void __exit hvcs_module_exit(void);
-
-#define HVCS_SCHED_READ 0x00000001
-#define HVCS_QUICK_READ 0x00000002
-#define HVCS_TRY_WRITE 0x00000004
-#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
-
-static void hvcs_kick(void)
-{
- hvcs_kicked = 1;
- wmb();
- wake_up_process(hvcs_task);
-}
-
-static void hvcs_unthrottle(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- hvcs_kick();
-}
-
-static void hvcs_throttle(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- vio_disable_interrupts(hvcsd->vdev);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-}
-
-/*
- * If the device is being removed we don't have to worry about this interrupt
- * handler taking any further interrupts because they are disabled which means
- * the hvcs_struct will always be valid in this handler.
- */
-static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance,
- struct pt_regs *regs)
-{
- struct hvcs_struct *hvcsd = dev_instance;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- vio_disable_interrupts(hvcsd->vdev);
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- hvcs_kick();
-
- return IRQ_HANDLED;
-}
-
-/* This function must be called with the hvcsd->lock held */
-static void hvcs_try_write(struct hvcs_struct *hvcsd)
-{
- unsigned int unit_address = hvcsd->vdev->unit_address;
- struct tty_struct *tty = hvcsd->tty;
- int sent;
-
- if (hvcsd->todo_mask & HVCS_TRY_WRITE) {
- /* won't send partial writes */
- sent = hvc_put_chars(unit_address,
- &hvcsd->buffer[0],
- hvcsd->chars_in_buffer );
- if (sent > 0) {
- hvcsd->chars_in_buffer = 0;
- wmb();
- hvcsd->todo_mask &= ~(HVCS_TRY_WRITE);
- wmb();
-
- /*
- * We are still obligated to deliver the data to the
- * hypervisor even if the tty has been closed because
- * we commited to delivering it. But don't try to wake
- * a non-existent tty.
- */
- if (tty) {
- if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP))
- && tty->ldisc.write_wakeup)
- (tty->ldisc.write_wakeup) (tty);
- wake_up_interruptible(&tty->write_wait);
- }
- }
- }
-}
-
-static int hvcs_io(struct hvcs_struct *hvcsd)
-{
- unsigned int unit_address;
- struct tty_struct *tty;
- char buf[HVCS_BUFF_LEN] __ALIGNED__;
- unsigned long flags;
- int got;
- int i;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- unit_address = hvcsd->vdev->unit_address;
- tty = hvcsd->tty;
-
- hvcs_try_write(hvcsd);
-
- if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) {
- hvcsd->todo_mask &= ~(HVCS_READ_MASK);
- goto bail;
- } else if (!(hvcsd->todo_mask & (HVCS_READ_MASK)))
- goto bail;
-
- /* remove the read masks */
- hvcsd->todo_mask &= ~(HVCS_READ_MASK);
-
- if ((tty->flip.count + HVCS_BUFF_LEN) < TTY_FLIPBUF_SIZE) {
- got = hvc_get_chars(unit_address,
- &buf[0],
- HVCS_BUFF_LEN);
- for (i=0;got && i<got;i++)
- tty_insert_flip_char(tty, buf[i], TTY_NORMAL);
- }
-
- /* Give the TTY time to process the data we just sent. */
- if (got)
- hvcsd->todo_mask |= HVCS_QUICK_READ;
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (tty->flip.count) {
- /* This is synch because tty->low_latency == 1 */
- tty_flip_buffer_push(tty);
- }
-
- if (!got) {
- /* Do this _after_ the flip_buffer_push */
- spin_lock_irqsave(&hvcsd->lock, flags);
- vio_enable_interrupts(hvcsd->vdev);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- }
-
- return hvcsd->todo_mask;
-
- bail:
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return hvcsd->todo_mask;
-}
-
-static int khvcsd(void *unused)
-{
- struct hvcs_struct *hvcsd = NULL;
- struct list_head *element;
- struct list_head *safe_temp;
- int hvcs_todo_mask;
- unsigned long structs_flags;
-
- __set_current_state(TASK_RUNNING);
-
- do {
- hvcs_todo_mask = 0;
- hvcs_kicked = 0;
- wmb();
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
- list_for_each_safe(element, safe_temp, &hvcs_structs) {
- hvcsd = list_entry(element, struct hvcs_struct, next);
- hvcs_todo_mask |= hvcs_io(hvcsd);
- }
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
-
- /*
- * If any of the hvcs adapters want to try a write or quick read
- * don't schedule(), yield a smidgen then execute the hvcs_io
- * thread again for those that want the write.
- */
- if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) {
- yield();
- continue;
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- if (!hvcs_kicked)
- schedule();
- __set_current_state(TASK_RUNNING);
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-static struct vio_device_id hvcs_driver_table[] __devinitdata= {
- {"serial-server", "hvterm2"},
- { 0, }
-};
-MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
-
-/* callback when the kboject ref count reaches zero */
-static void destroy_hvcs_struct(struct kobject *kobj)
-{
- struct hvcs_struct *hvcsd = from_kobj(kobj);
- struct vio_dev *vdev;
- unsigned long flags;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- /* the list_del poisons the pointers */
- list_del(&(hvcsd->next));
-
- if (hvcsd->connected == 1) {
- hvcs_partner_free(hvcsd);
- printk(KERN_INFO "HVCS: Closed vty-server@%X and"
- " partner vty@%X:%d connection.\n",
- hvcsd->vdev->unit_address,
- hvcsd->p_unit_address,
- (unsigned int)hvcsd->p_partition_ID);
- }
- printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n",
- hvcsd->vdev->unit_address);
-
- vdev = hvcsd->vdev;
- hvcsd->vdev = NULL;
-
- hvcsd->p_unit_address = 0;
- hvcsd->p_partition_ID = 0;
- memset(&hvcsd->p_location_code[0], 0x00, CLC_LENGTH);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- hvcs_remove_device_attrs(vdev);
-
- kfree(hvcsd);
-}
-
-/* This function must be called with hvcsd->lock held. */
-static void hvcs_final_close(struct hvcs_struct *hvcsd)
-{
- vio_disable_interrupts(hvcsd->vdev);
- free_irq(hvcsd->vdev->irq, hvcsd);
-
- hvcsd->todo_mask = 0;
-
- /* These two may be redundant if the operation was a close. */
- if (hvcsd->tty) {
- hvcsd->tty->driver_data = NULL;
- hvcsd->tty = NULL;
- }
-
- hvcsd->open_count = 0;
-
- memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
- hvcsd->chars_in_buffer = 0;
-}
-
-static struct kobj_type hvcs_kobj_type = {
- .release = destroy_hvcs_struct,
-};
-
-static int __devinit hvcs_probe(
- struct vio_dev *dev,
- const struct vio_device_id *id)
-{
- struct hvcs_struct *hvcsd;
- unsigned long structs_flags;
-
- if (!dev || !id) {
- printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
- return -EPERM;
- }
-
- hvcsd = kmalloc(sizeof(*hvcsd), GFP_KERNEL);
- if (!hvcsd) {
- return -ENODEV;
- }
-
- /* hvcsd->tty is zeroed out with the memset */
- memset(hvcsd, 0x00, sizeof(*hvcsd));
-
- hvcsd->lock = SPIN_LOCK_UNLOCKED;
- /* Automatically incs the refcount the first time */
- kobject_init(&hvcsd->kobj);
- /* Set up the callback for terminating the hvcs_struct's life */
- hvcsd->kobj.ktype = &hvcs_kobj_type;
-
- hvcsd->vdev = dev;
- dev->dev.driver_data = hvcsd;
-
- hvcsd->index = ++hvcs_struct_count;
- hvcsd->chars_in_buffer = 0;
- hvcsd->todo_mask = 0;
- hvcsd->connected = 0;
-
- /*
- * This will populate the hvcs_struct's partner info fields for the
- * first time.
- */
- if (hvcs_get_pi(hvcsd)) {
- printk(KERN_ERR "HVCS: Failed to fetch partner"
- " info for vty-server@%X on device probe.\n",
- hvcsd->vdev->unit_address);
- }
-
- /*
- * If a user app opens a tty that corresponds to this vty-server before
- * the hvcs_struct has been added to the devices list then the user app
- * will get -ENODEV.
- */
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
-
- list_add_tail(&(hvcsd->next), &hvcs_structs);
-
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
-
- hvcs_create_device_attrs(hvcsd);
-
- printk(KERN_INFO "HVCS: Added vty-server@%X.\n", dev->unit_address);
-
- /*
- * DON'T enable interrupts here because there is no user to receive the
- * data.
- */
- return 0;
-}
-
-static int __devexit hvcs_remove(struct vio_dev *dev)
-{
- struct hvcs_struct *hvcsd = dev->dev.driver_data;
- unsigned long flags;
- struct kobject *kobjp;
- struct tty_struct *tty;
-
- if (!hvcsd)
- return -ENODEV;
-
- /* By this time the vty-server won't be getting any more interrups */
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- tty = hvcsd->tty;
-
- kobjp = &hvcsd->kobj;
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * Let the last holder of this object cause it to be removed, which
- * would probably be tty_hangup below.
- */
- kobject_put (kobjp);
-
- /*
- * The hangup is a scheduled function which will auto chain call
- * hvcs_hangup. The tty should always be valid at this time unless a
- * simultaneous tty close already cleaned up the hvcs_struct.
- */
- if (tty)
- tty_hangup(tty);
-
- printk(KERN_INFO "HVCS: vty-server@%X removed from the"
- " vio bus.\n", dev->unit_address);
- return 0;
-};
-
-static struct vio_driver hvcs_vio_driver = {
- .name = hvcs_driver_name,
- .id_table = hvcs_driver_table,
- .probe = hvcs_probe,
- .remove = hvcs_remove,
-};
-
-/* Only called from hvcs_get_pi please */
-static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
-{
- int clclength;
-
- hvcsd->p_unit_address = pi->unit_address;
- hvcsd->p_partition_ID = pi->partition_ID;
- clclength = strlen(&pi->location_code[0]);
- if (clclength > CLC_LENGTH - 1)
- clclength = CLC_LENGTH - 1;
-
- /* copy the null-term char too */
- strncpy(&hvcsd->p_location_code[0],
- &pi->location_code[0], clclength + 1);
-}
-
-/*
- * Traverse the list and add the partner info that is found to the hvcs_struct
- * struct entry. NOTE: At this time I know that partner info will return a
- * single entry but in the future there may be multiple partner info entries per
- * vty-server and you'll want to zero out that list and reset it. If for some
- * reason you have an old version of this driver but there IS more than one
- * partner info then hvcsd->p_* will hold the last partner info data from the
- * firmware query. A good way to update this code would be to replace the three
- * partner info fields in hvcs_struct with a list of hvcs_partner_info
- * instances.
- *
- * This function must be called with the hvcsd->lock held.
- */
-static int hvcs_get_pi(struct hvcs_struct *hvcsd)
-{
- /* struct hvcs_partner_info *head_pi = NULL; */
- struct hvcs_partner_info *pi = NULL;
- unsigned int unit_address = hvcsd->vdev->unit_address;
- struct list_head head;
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcs_pi_lock, flags);
- if (!hvcs_pi_buff) {
- spin_unlock_irqrestore(&hvcs_pi_lock, flags);
- return -EFAULT;
- }
- retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff);
- spin_unlock_irqrestore(&hvcs_pi_lock, flags);
- if (retval) {
- printk(KERN_ERR "HVCS: Failed to fetch partner"
- " info for vty-server@%x.\n", unit_address);
- return retval;
- }
-
- /* nixes the values if the partner vty went away */
- hvcsd->p_unit_address = 0;
- hvcsd->p_partition_ID = 0;
-
- list_for_each_entry(pi, &head, node)
- hvcs_set_pi(pi, hvcsd);
-
- hvcs_free_partner_info(&head);
- return 0;
-}
-
-/*
- * This function is executed by the driver "rescan" sysfs entry. It shouldn't
- * be executed elsewhere, in order to prevent deadlock issues.
- */
-static int hvcs_rescan_devices_list(void)
-{
- struct hvcs_struct *hvcsd = NULL;
- unsigned long flags;
- unsigned long structs_flags;
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
-
- list_for_each_entry(hvcsd, &hvcs_structs, next) {
- spin_lock_irqsave(&hvcsd->lock, flags);
- hvcs_get_pi(hvcsd);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- }
-
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
-
- return 0;
-}
-
-/*
- * Farm this off into its own function because it could be more complex once
- * multiple partners support is added. This function should be called with
- * the hvcsd->lock held.
- */
-static int hvcs_has_pi(struct hvcs_struct *hvcsd)
-{
- if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID))
- return 0;
- return 1;
-}
-
-/*
- * NOTE: It is possible that the super admin removed a partner vty and then
- * added a different vty as the new partner.
- *
- * This function must be called with the hvcsd->lock held.
- */
-static int hvcs_partner_connect(struct hvcs_struct *hvcsd)
-{
- int retval;
- unsigned int unit_address = hvcsd->vdev->unit_address;
-
- /*
- * If there wasn't any pi when the device was added it doesn't meant
- * there isn't any now. This driver isn't notified when a new partner
- * vty is added to a vty-server so we discover changes on our own.
- * Please see comments in hvcs_register_connection() for justification
- * of this bizarre code.
- */
- retval = hvcs_register_connection(unit_address,
- hvcsd->p_partition_ID,
- hvcsd->p_unit_address);
- if (!retval) {
- hvcsd->connected = 1;
- return 0;
- } else if (retval != -EINVAL)
- return retval;
-
- /*
- * As per the spec re-get the pi and try again if -EINVAL after the
- * first connection attempt.
- */
- if (hvcs_get_pi(hvcsd))
- return -ENOMEM;
-
- if (!hvcs_has_pi(hvcsd))
- return -ENODEV;
-
- retval = hvcs_register_connection(unit_address,
- hvcsd->p_partition_ID,
- hvcsd->p_unit_address);
- if (retval != -EINVAL) {
- hvcsd->connected = 1;
- return retval;
- }
-
- /*
- * EBUSY is the most likely scenario though the vty could have been
- * removed or there really could be an hcall error due to the parameter
- * data but thanks to ambiguous firmware return codes we can't really
- * tell.
- */
- printk(KERN_INFO "HVCS: vty-server or partner"
- " vty is busy. Try again later.\n");
- return -EBUSY;
-}
-
-/* This function must be called with the hvcsd->lock held */
-static void hvcs_partner_free(struct hvcs_struct *hvcsd)
-{
- int retval;
- do {
- retval = hvcs_free_connection(hvcsd->vdev->unit_address);
- } while (retval == -EBUSY);
- hvcsd->connected = 0;
-}
-
-/* This helper function must be called WITHOUT the hvcsd->lock held */
-static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
- unsigned int irq, struct vio_dev *vdev)
-{
- unsigned long flags;
-
- /*
- * It is possible that the vty-server was removed between the time that
- * the conn was registered and now.
- */
- if (!request_irq(irq, &hvcs_handle_interrupt,
- SA_INTERRUPT, "ibmhvcs", hvcsd)) {
- /*
- * It is possible the vty-server was removed after the irq was
- * requested but before we have time to enable interrupts.
- */
- if (vio_enable_interrupts(vdev) == H_Success)
- return 0;
- else {
- printk(KERN_ERR "HVCS: int enable failed for"
- " vty-server@%X.\n", unit_address);
- free_irq(irq, hvcsd);
- }
- } else
- printk(KERN_ERR "HVCS: irq req failed for"
- " vty-server@%X.\n", unit_address);
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- hvcs_partner_free(hvcsd);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- return -ENODEV;
-
-}
-
-/*
- * This always increments the kobject ref count if the call is successful.
- * Please remember to dec when you are done with the instance.
- *
- * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
- * calling this function or you will get deadlock.
- */
-struct hvcs_struct *hvcs_get_by_index(int index)
-{
- struct hvcs_struct *hvcsd = NULL;
- struct list_head *element;
- struct list_head *safe_temp;
- unsigned long flags;
- unsigned long structs_flags;
-
- spin_lock_irqsave(&hvcs_structs_lock, structs_flags);
- /* We can immediately discard OOB requests */
- if (index >= 0 && index < HVCS_MAX_SERVER_ADAPTERS) {
- list_for_each_safe(element, safe_temp, &hvcs_structs) {
- hvcsd = list_entry(element, struct hvcs_struct, next);
- spin_lock_irqsave(&hvcsd->lock, flags);
- if (hvcsd->index == index) {
- kobject_get(&hvcsd->kobj);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- spin_unlock_irqrestore(&hvcs_structs_lock,
- structs_flags);
- return hvcsd;
- }
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- }
- hvcsd = NULL;
- }
-
- spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags);
- return hvcsd;
-}
-
-/*
- * This is invoked via the tty_open interface when a user app connects to the
- * /dev node.
- */
-static int hvcs_open(struct tty_struct *tty, struct file *filp)
-{
- struct hvcs_struct *hvcsd = NULL;
- int retval = 0;
- unsigned long flags;
- unsigned int irq;
- struct vio_dev *vdev;
- unsigned long unit_address;
-
- if (tty->driver_data)
- goto fast_open;
-
- /*
- * Is there a vty-server that shares the same index?
- * This function increments the kobject index.
- */
- if (!(hvcsd = hvcs_get_by_index(tty->index))) {
- printk(KERN_WARNING "HVCS: open failed, no index.\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- if (hvcsd->connected == 0)
- if ((retval = hvcs_partner_connect(hvcsd)))
- goto error_release;
-
- hvcsd->open_count = 1;
- hvcsd->tty = tty;
- tty->driver_data = hvcsd;
-
- /*
- * Set this driver to low latency so that we actually have a chance at
- * catching a throttled TTY after we flip_buffer_push. Otherwise the
- * flush_to_async may not execute until after the kernel_thread has
- * yielded and resumed the next flip_buffer_push resulting in data
- * loss.
- */
- tty->low_latency = 1;
-
- memset(&hvcsd->buffer[0], 0x3F, HVCS_BUFF_LEN);
-
- /*
- * Save these in the spinlock for the enable operations that need them
- * outside of the spinlock.
- */
- irq = hvcsd->vdev->irq;
- vdev = hvcsd->vdev;
- unit_address = hvcsd->vdev->unit_address;
-
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * This must be done outside of the spinlock because it requests irqs
- * and will grab the spinlcok and free the connection if it fails.
- */
- if ((hvcs_enable_device(hvcsd, unit_address, irq, vdev))) {
- kobject_put(&hvcsd->kobj);
- printk(KERN_WARNING "HVCS: enable device failed.\n");
- return -ENODEV;
- }
-
- goto open_success;
-
-fast_open:
- hvcsd = tty->driver_data;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- if (!kobject_get(&hvcsd->kobj)) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_ERR "HVCS: Kobject of open"
- " hvcs doesn't exist.\n");
- return -EFAULT; /* Is this the right return value? */
- }
-
- hvcsd->open_count++;
-
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-open_success:
- hvcs_kick();
-
- printk(KERN_INFO "HVCS: vty-server@%X opened.\n",
- hvcsd->vdev->unit_address );
-
- return 0;
-
-error_release:
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- kobject_put(&hvcsd->kobj);
-
- printk(KERN_WARNING "HVCS: HVCS partner connect failed.\n");
- return retval;
-}
-
-static void hvcs_close(struct tty_struct *tty, struct file *filp)
-{
- struct hvcs_struct *hvcsd;
- unsigned long flags;
- struct kobject *kobjp;
-
- /*
- * Is someone trying to close the file associated with this device after
- * we have hung up? If so tty->driver_data wouldn't be valid.
- */
- if (tty_hung_up_p(filp))
- return;
-
- /*
- * No driver_data means that this close was probably issued after a
- * failed hvcs_open by the tty layer's release_dev() api and we can just
- * exit cleanly.
- */
- if (!tty->driver_data)
- return;
-
- hvcsd = tty->driver_data;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- if (--hvcsd->open_count == 0) {
-
- /*
- * This line is important because it tells hvcs_open that this
- * device needs to be re-configured the next time hvcs_open is
- * called.
- */
- hvcsd->tty->driver_data = NULL;
-
- /*
- * NULL this early so that the kernel_thread doesn't try to
- * execute any operations on the TTY even though it is obligated
- * to deliver any pending I/O to the hypervisor.
- */
- hvcsd->tty = NULL;
-
- /*
- * Block the close until all the buffered data has been
- * delivered.
- */
- while(hvcsd->chars_in_buffer) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * Give the kernel thread the hvcs_struct so that it can
- * try to deliver the remaining data but block the close
- * operation by spinning in this function so that other
- * tty operations have to wait.
- */
- yield();
- spin_lock_irqsave(&hvcsd->lock, flags);
- }
-
- hvcs_final_close(hvcsd);
-
- } else if (hvcsd->open_count < 0) {
- printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
- " is missmanaged.\n",
- hvcsd->vdev->unit_address, hvcsd->open_count);
- }
- kobjp = &hvcsd->kobj;
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- kobject_put(kobjp);
-}
-
-static void hvcs_hangup(struct tty_struct * tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
- int temp_open_count;
- struct kobject *kobjp;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- /* Preserve this so that we know how many kobject refs to put */
- temp_open_count = hvcsd->open_count;
-
- /*
- * Don't kobject put inside the spinlock because the destruction
- * callback may use the spinlock and it may get called before the
- * spinlock has been released. Get a pointer to the kobject and
- * kobject_put on that instead.
- */
- kobjp = &hvcsd->kobj;
-
- /* Calling this will drop any buffered data on the floor. */
- hvcs_final_close(hvcsd);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
- /*
- * We need to kobject_put() for every open_count we have since the
- * tty_hangup() function doesn't invoke a close per open connection on a
- * non-console device.
- */
- while(temp_open_count) {
- --temp_open_count;
- /*
- * The final put will trigger destruction of the hvcs_struct.
- * NOTE: If this hangup was signaled from user space then the
- * final put will never happen.
- */
- kobject_put(kobjp);
- }
-}
-
-/*
- * NOTE: This is almost always from_user since user level apps interact with the
- * /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by
- * hvcs_remove (which removes the target device and executes tty_hangup()) that
- * tty_hangup will allow hvcs_write time to complete execution before it
- * terminates our device.
- */
-static int hvcs_write(struct tty_struct *tty, int from_user,
- const unsigned char *buf, int count)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned int unit_address;
- unsigned char *charbuf;
- unsigned long flags;
- int total_sent = 0;
- int tosend = 0;
- int result = 0;
-
- /*
- * If they don't check the return code off of their open they may
- * attempt this even if there is no connected device.
- */
- if (!hvcsd)
- return -ENODEV;
-
- /* Reasonable size to prevent user level flooding */
- if (count > HVCS_MAX_FROM_USER) {
- printk(KERN_WARNING "HVCS write: count being truncated to"
- " HVCS_MAX_FROM_USER.\n");
- count = HVCS_MAX_FROM_USER;
- }
-
- if (!from_user)
- charbuf = (unsigned char *)buf;
- else {
- charbuf = kmalloc(count, GFP_KERNEL);
- if (!charbuf) {
- printk(KERN_WARNING "HVCS: write -ENOMEM.\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(charbuf, buf, count)) {
- kfree(charbuf);
- printk(KERN_WARNING "HVCS: write -EFAULT.\n");
- return -EFAULT;
- }
- }
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- /*
- * Somehow an open succedded but the device was removed or the
- * connection terminated between the vty-server and partner vty during
- * the middle of a write operation? This is a crummy place to do this
- * but we want to keep it all in the spinlock.
- */
- if (hvcsd->open_count <= 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (from_user)
- kfree(charbuf);
- return -ENODEV;
- }
-
- unit_address = hvcsd->vdev->unit_address;
-
- while (count > 0) {
- tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
- /*
- * No more space, this probably means that the last call to
- * hvcs_write() didn't succeed and the buffer was filled up.
- */
- if (!tosend)
- break;
-
- memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer],
- &charbuf[total_sent],
- tosend);
-
- hvcsd->chars_in_buffer += tosend;
-
- result = 0;
-
- /*
- * If this is true then we don't want to try writing to the
- * hypervisor because that is the kernel_threads job now. We'll
- * just add to the buffer.
- */
- if (!(hvcsd->todo_mask & HVCS_TRY_WRITE))
- /* won't send partial writes */
- result = hvc_put_chars(unit_address,
- &hvcsd->buffer[0],
- hvcsd->chars_in_buffer);
-
- /*
- * Since we know we have enough room in hvcsd->buffer for
- * tosend we record that it was sent regardless of whether the
- * hypervisor actually took it because we have it buffered.
- */
- total_sent+=tosend;
- count-=tosend;
- if (result == 0) {
- hvcsd->todo_mask |= HVCS_TRY_WRITE;
- hvcs_kick();
- break;
- }
-
- hvcsd->chars_in_buffer = 0;
- /*
- * Test after the chars_in_buffer reset otherwise this could
- * deadlock our writes if hvc_put_chars fails.
- */
- if (result < 0)
- break;
- }
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (from_user)
- kfree(charbuf);
-
- if (result == -1)
- return -EIO;
- else
- return total_sent;
-}
-
-/*
- * This is really asking how much can we guarentee that we can send or that we
- * absolutely WILL BUFFER if we can't send it. This driver MUST honor the
- * return value, hence the reason for hvcs_struct buffering.
- */
-static int hvcs_write_room(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
- int retval;
-
- if (!hvcsd || hvcsd->open_count <= 0)
- return 0;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static int hvcs_chars_in_buffer(struct tty_struct *tty)
-{
- struct hvcs_struct *hvcsd = tty->driver_data;
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = hvcsd->chars_in_buffer;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static struct tty_operations hvcs_ops = {
- .open = hvcs_open,
- .close = hvcs_close,
- .hangup = hvcs_hangup,
- .write = hvcs_write,
- .write_room = hvcs_write_room,
- .chars_in_buffer = hvcs_chars_in_buffer,
- .unthrottle = hvcs_unthrottle,
- .throttle = hvcs_throttle,
-};
-
-static int __init hvcs_module_init(void)
-{
- int rc;
- int num_ttys_to_alloc;
-
- printk(KERN_INFO "Initializing %s\n", hvcs_driver_string);
-
- /* Has the user specified an overload with an insmod param? */
- if (hvcs_parm_num_devs <= 0 ||
- (hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) {
- num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS;
- } else
- num_ttys_to_alloc = hvcs_parm_num_devs;
-
- hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc);
- if (!hvcs_tty_driver)
- return -ENOMEM;
-
- hvcs_tty_driver->owner = THIS_MODULE;
-
- hvcs_tty_driver->driver_name = hvcs_driver_name;
- hvcs_tty_driver->name = hvcs_device_node;
-
- /*
- * We'll let the system assign us a major number, indicated by leaving
- * it blank.
- */
-
- hvcs_tty_driver->minor_start = HVCS_MINOR_START;
- hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
-
- /*
- * We role our own so that we DONT ECHO. We can't echo because the
- * device we are connecting to already echoes by default and this would
- * throw us into a horrible recursive echo-echo-echo loop.
- */
- hvcs_tty_driver->init_termios = hvcs_tty_termios;
- hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW;
-
- tty_set_operations(hvcs_tty_driver, &hvcs_ops);
-
- /*
- * The following call will result in sysfs entries that denote the
- * dynamically assigned major and minor numbers for our devices.
- */
- if (tty_register_driver(hvcs_tty_driver)) {
- printk(KERN_ERR "HVCS: registration "
- " as a tty driver failed.\n");
- put_tty_driver(hvcs_tty_driver);
- return rc;
- }
-
- hvcs_structs_lock = SPIN_LOCK_UNLOCKED;
-
- hvcs_pi_lock = SPIN_LOCK_UNLOCKED;
- hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
- hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
- if (IS_ERR(hvcs_task)) {
- printk("khvcsd creation failed. Driver not loaded.\n");
- kfree(hvcs_pi_buff);
- put_tty_driver(hvcs_tty_driver);
- return -EIO;
- }
-
- rc = vio_register_driver(&hvcs_vio_driver);
-
- /*
- * This needs to be done AFTER the vio_register_driver() call or else
- * the kobjects won't be initialized properly.
- */
- hvcs_create_driver_attrs();
-
- printk(KERN_INFO "HVCS: driver module inserted.\n");
-
- return rc;
-}
-
-static void __exit hvcs_module_exit(void)
-{
- unsigned long flags;
-
- /*
- * This driver receives hvcs_remove callbacks for each device upon
- * module removal.
- */
-
- /*
- * This synchronous operation will wake the khvcsd kthread if it is
- * asleep and will return when khvcsd has terminated.
- */
- kthread_stop(hvcs_task);
-
- spin_lock_irqsave(&hvcs_pi_lock, flags);
- kfree(hvcs_pi_buff);
- hvcs_pi_buff = NULL;
- spin_unlock_irqrestore(&hvcs_pi_lock, flags);
-
- hvcs_remove_driver_attrs();
-
- vio_unregister_driver(&hvcs_vio_driver);
-
- tty_unregister_driver(hvcs_tty_driver);
-
- put_tty_driver(hvcs_tty_driver);
-
- printk(KERN_INFO "HVCS: driver module removed.\n");
-}
-
-module_init(hvcs_module_init);
-module_exit(hvcs_module_exit);
-
-static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
-{
- return viod->dev.driver_data;
-}
-/* The sysfs interface for the driver and devices */
-
-static ssize_t hvcs_partner_vtys_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
-
-static ssize_t hvcs_partner_clcs_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
-
-static ssize_t hvcs_current_vty_store(struct device *dev, const char * buf,
- size_t count)
-{
- /*
- * Don't need this feature at the present time because firmware doesn't
- * yet support multiple partners.
- */
- printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
- return -EPERM;
-}
-
-static ssize_t hvcs_current_vty_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static DEVICE_ATTR(current_vty,
- S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
-
-static ssize_t hvcs_vterm_state_store(struct device *dev, const char *buf,
- size_t count)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
-
- /* writing a '0' to this sysfs entry will result in the disconnect. */
- if (simple_strtol(buf, NULL, 0) != 0)
- return -EINVAL;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- if (hvcsd->open_count > 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. "
- "The hvcs device node is still in use.\n");
- return -EPERM;
- }
-
- if (hvcsd->connected == 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. The"
- " vty-server is not connected to a vty.\n");
- return -EPERM;
- }
-
- hvcs_partner_free(hvcsd);
- printk(KERN_INFO "HVCS: Closed vty-server@%X and"
- " partner vty@%X:%d connection.\n",
- hvcsd->vdev->unit_address,
- hvcsd->p_unit_address,
- (unsigned int)hvcsd->p_partition_ID);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return count;
-}
-
-static ssize_t hvcs_vterm_state_show(struct device *dev, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%d\n", hvcsd->connected);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
- hvcs_vterm_state_show, hvcs_vterm_state_store);
-
-static struct attribute *hvcs_attrs[] = {
- &dev_attr_partner_vtys.attr,
- &dev_attr_partner_clcs.attr,
- &dev_attr_current_vty.attr,
- &dev_attr_vterm_state.attr,
- NULL,
-};
-
-static struct attribute_group hvcs_attr_group = {
- .attrs = hvcs_attrs,
-};
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd)
-{
- struct vio_dev *vdev = hvcsd->vdev;
- sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static void hvcs_remove_device_attrs(struct vio_dev *vdev)
-{
- sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
-{
- /* A 1 means it is updating, a 0 means it is done updating */
- return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
-}
-
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
- size_t count)
-{
- if ((simple_strtol(buf, NULL, 0) != 1)
- && (hvcs_rescan_status != 0))
- return -EINVAL;
-
- hvcs_rescan_status = 1;
- printk(KERN_INFO "HVCS: rescanning partner info for all"
- " vty-servers.\n");
- hvcs_rescan_devices_list();
- hvcs_rescan_status = 0;
- return count;
-}
-static DRIVER_ATTR(rescan,
- S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
-
-static void hvcs_create_driver_attrs(void)
-{
- struct device_driver *driverfs = &(hvcs_vio_driver.driver);
- driver_create_file(driverfs, &driver_attr_rescan);
-}
-
-static void hvcs_remove_driver_attrs(void)
-{
- struct device_driver *driverfs = &(hvcs_vio_driver.driver);
- driver_remove_file(driverfs, &driver_attr_rescan);
-}
static void set_params (i2ChanStrPtr, struct termios *);
static int set_modem_info(i2ChanStrPtr, unsigned int, unsigned int *);
-static int get_serial_info(i2ChanStrPtr, struct serial_struct __user *);
-static int set_serial_info(i2ChanStrPtr, struct serial_struct __user *);
+static int get_serial_info(i2ChanStrPtr, struct serial_struct *);
+static int set_serial_info(i2ChanStrPtr, struct serial_struct *);
-static ssize_t ip2_ipl_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t ip2_ipl_write(struct file *, const char __user *, size_t, loff_t *);
+static ssize_t ip2_ipl_read(struct file *, char *, size_t, loff_t *);
+static ssize_t ip2_ipl_write(struct file *, const char *, size_t, loff_t *);
static int ip2_ipl_ioctl(struct inode *, struct file *, UINT, ULONG);
static int ip2_ipl_open(struct inode *, struct file *);
-static int DumpTraceBuffer(char __user *, int);
-static int DumpFifoBuffer( char __user *, int);
+static int DumpTraceBuffer(char *, int);
+static int DumpFifoBuffer( char *, int);
static void ip2_init_board(int);
static unsigned short find_eisa_board(int);
/******************************************************************************/
static inline void
-service_all_boards(void)
+service_all_boards()
{
int i;
i2eBordStrPtr pB;
wait_queue_t wait;
i2ChanStrPtr pCh = DevTable[tty->index];
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser;
+ struct serial_icounter_struct *p_cuser; /* user space */
int rc = 0;
unsigned long flags;
- void __user *argp = (void __user *)arg;
if ( pCh == NULL ) {
return -ENODEV;
ip2trace (CHANN, ITRC_IOCTL, 2, 1, rc );
- rc = get_serial_info(pCh, argp);
+ rc = get_serial_info(pCh, (struct serial_struct *) arg);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 3, 1, rc );
- rc = set_serial_info(pCh, argp);
+ rc = set_serial_info(pCh, (struct serial_struct *) arg);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 6, 1, rc );
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 7, 1, rc );
- rc = get_user(arg,(unsigned long __user *) argp);
+ rc = get_user(arg,(unsigned long *) arg);
if (rc)
return rc;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL)
save_flags(flags);cli();
cnow = pCh->icount;
restore_flags(flags);
- p_cuser = argp;
+ p_cuser = (struct serial_icounter_struct *) arg;
rc = put_user(cnow.cts, &p_cuser->cts);
rc = put_user(cnow.dsr, &p_cuser->dsr);
rc = put_user(cnow.rng, &p_cuser->rng);
/* standard Linux serial structure. */
/******************************************************************************/
static int
-get_serial_info ( i2ChanStrPtr pCh, struct serial_struct __user *retinfo )
+get_serial_info ( i2ChanStrPtr pCh, struct serial_struct *retinfo )
{
struct serial_struct tmp;
+ int rc;
+
+ if ( !retinfo ) {
+ return -EFAULT;
+ }
memset ( &tmp, 0, sizeof(tmp) );
tmp.type = pCh->pMyBord->channelBtypes.bid_value[(pCh->port_index & (IP2_PORTS_PER_BOARD-1))/16];
tmp.close_delay = pCh->ClosingDelay;
tmp.closing_wait = pCh->ClosingWaitTime;
tmp.custom_divisor = pCh->BaudDivisor;
- return copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ rc = copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ return rc;
}
/******************************************************************************/
/* change the IRQ, address or type of the port the ioctl fails. */
/******************************************************************************/
static int
-set_serial_info( i2ChanStrPtr pCh, struct serial_struct __user *new_info )
+set_serial_info( i2ChanStrPtr pCh, struct serial_struct *new_info )
{
struct serial_struct ns;
int old_flags, old_baud_divisor;
- if (copy_from_user(&ns, new_info, sizeof (ns)))
+ if ( !new_info ) {
return -EFAULT;
+ }
+
+ if (copy_from_user(&ns, new_info, sizeof (ns))) {
+ return -EFAULT;
+ }
/*
* We don't allow setserial to change IRQ, board address, type or baud
static
ssize_t
-ip2_ipl_read(struct file *pFile, char __user *pData, size_t count, loff_t *off )
+ip2_ipl_read(struct file *pFile, char *pData, size_t count, loff_t *off )
{
unsigned int minor = iminor(pFile->f_dentry->d_inode);
int rc = 0;
}
static int
-DumpFifoBuffer ( char __user *pData, int count )
+DumpFifoBuffer ( char *pData, int count )
{
#ifdef DEBUG_FIFO
int rc;
}
static int
-DumpTraceBuffer ( char __user *pData, int count )
+DumpTraceBuffer ( char *pData, int count )
{
#ifdef IP2DEBUG_TRACE
int rc;
int dumpcount;
int chunk;
- int *pIndex = (int __user *)pData;
+ int *pIndex = (int*)pData;
if ( count < (sizeof(int) * 6) ) {
return -EIO;
/* */
/******************************************************************************/
static ssize_t
-ip2_ipl_write(struct file *pFile, const char __user *pData, size_t count, loff_t *off)
+ip2_ipl_write(struct file *pFile, const char *pData, size_t count, loff_t *off)
{
#ifdef IP2DEBUG_IPL
printk (KERN_DEBUG "IP2IPL: write %p, %d bytes\n", pData, count );
{
unsigned int iplminor = iminor(pInode);
int rc = 0;
- void __user *argp = (void __user *)arg;
- ULONG __user *pIndex = argp;
+ ULONG *pIndex = (ULONG*)arg;
i2eBordStrPtr pB = i2BoardPtrTable[iplminor / 4];
i2ChanStrPtr pCh;
case 65: /* Board - ip2stat */
if ( pB ) {
- rc = copy_to_user(argp, pB, sizeof(i2eBordStr));
+ rc = copy_to_user((char*)arg, (char*)pB, sizeof(i2eBordStr) );
rc = put_user(INB(pB->i2eStatus),
- (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
+ (ULONG*)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
} else {
rc = -ENODEV;
}
pCh = DevTable[cmd];
if ( pCh )
{
- rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+ rc = copy_to_user((char*)arg, (char*)pCh, sizeof(i2ChanStr) );
} else {
rc = -ENODEV;
}
{
int rv;
struct ipmi_addr addr;
- struct kernel_ipmi_msg msg;
+ unsigned char *msgdata;
if (req->addr_len > sizeof(struct ipmi_addr))
return -EINVAL;
if (copy_from_user(&addr, req->addr, req->addr_len))
return -EFAULT;
- msg.netfn = req->msg.netfn;
- msg.cmd = req->msg.cmd;
- msg.data_len = req->msg.data_len;
- msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (!msg.data)
+ msgdata = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msgdata)
return -ENOMEM;
/* From here out we cannot return, we must jump to "out" for
goto out;
}
- if (copy_from_user(msg.data,
+ if (copy_from_user(msgdata,
req->msg.data,
req->msg.data_len))
{
goto out;
}
} else {
- msg.data_len = 0;
+ req->msg.data_len = 0;
}
+ req->msg.data = msgdata;
rv = ipmi_request_settime(user,
&addr,
req->msgid,
- &msg,
+ &(req->msg),
NULL,
0,
retries,
retry_time_ms);
out:
- kfree(msg.data);
+ kfree(msgdata);
return rv;
}
}
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
struct ipmi_ipmb_addr *ipmb_addr,
long msgid,
unsigned char ipmb_seq,
}
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
struct ipmi_lan_addr *lan_addr,
long msgid,
unsigned char ipmb_seq,
ipmi_smi_t intf,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
goto out_err;
}
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
{
int m;
for (m=0; m<smi_msg->data_size; m++)
int ipmi_request(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority)
{
int ipmi_request_settime(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
int retries,
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
int ipmi_request_with_source(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
unsigned char source_address,
static int
send_channel_info_cmd(ipmi_smi_t intf, int chan)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
unsigned char data[1];
struct ipmi_system_interface_addr si;
msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
msg->data_size = 11;
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
{
int m;
printk("Invalid command:");
int requeue;
int chan;
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
int m;
printk("Recv:");
for (m=0; m<msg->rsp_size; m++)
MC, which don't get resent. */
intf->handlers->sender(intf->send_info, smi_msg, 0);
-#ifdef DEBUG_MSGING
+#if DEBUG_MSGING
{
int m;
printk("Resend: ");
static void send_panic_events(char *str)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
ipmi_smi_t intf;
unsigned char data[16];
int i;
200 /* priority: INT_MAX >= x >= 0 */
};
-static int ipmi_init_msghandler(void)
+static __init int ipmi_init_msghandler(void)
{
int i;
return 0;
}
-static __init int ipmi_init_msghandler_mod(void)
-{
- ipmi_init_msghandler();
- return 0;
-}
-
static __exit void cleanup_ipmi(void)
{
int count;
}
module_exit(cleanup_ipmi);
-module_init(ipmi_init_msghandler_mod);
+module_init(ipmi_init_msghandler);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ipmi_alloc_recv_msg);
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
#ifdef CONFIG_HIGH_RES_TIMERS
#include <linux/hrtime.h>
# if defined(schedule_next_int)
static int acpi_failure = 0;
/* For GPE-type interrupts. */
-void ipmi_acpi_gpe(void *context)
+u32 ipmi_acpi_gpe(void *context)
{
struct smi_info *smi_info = context;
unsigned long flags;
smi_event_handler(smi_info, 0);
out:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return 0;
}
static int acpi_gpe_irq_setup(struct smi_info *info)
#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
#endif
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-static int nowayout = 1;
-#else
-static int nowayout;
-#endif
-
static ipmi_user_t watchdog_user = NULL;
/* Default the timeout to 10 seconds. */
module_param(start_now, int, 0);
MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
"soon as the driver is loaded.");
-module_param(nowayout, int, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
/* Default state of the timer. */
static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
struct ipmi_recv_msg *recv_msg,
int *send_heartbeat_now)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
unsigned char data[6];
int rv;
struct ipmi_system_interface_addr addr;
static int ipmi_heartbeat(void)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
static void panic_halt_ipmi_heartbeat(void)
{
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
struct ipmi_system_interface_addr addr;
{
int rv;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
rv = ipmi_heartbeat();
if (rv)
int rv = 0;
wait_queue_t wait;
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count <= 0)
return 0;
/* Don't start the timer now, let it start on the
first heartbeat. */
ipmi_start_timer_on_heartbeat = 1;
- return nonseekable_open(ino, filep);
+ return(0);
default:
return (-ENODEV);
{
if (iminor(ino)==WATCHDOG_MINOR)
{
- if (!nowayout) {
- ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
- }
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+#endif
ipmi_wdog_open = 0;
}
static void isicom_tx(unsigned long _data);
static void isicom_start(struct tty_struct * tty);
-static unsigned char * tmp_buf;
+static unsigned char * tmp_buf = 0;
static DECLARE_MUTEX(tmp_buf_sem);
/* baud index mappings from linux defns to isi */
unsigned long t;
unsigned short word_count, base;
bin_frame frame;
- void __user *argp = (void __user *)arg;
/* exec_record exec_rec; */
- if(get_user(card, (int __user *)argp))
+ if(get_user(card, (int *)arg))
return -EFAULT;
if(card < 0 || card >= BOARD_COUNT)
return -EIO;
}
printk("-Done\n");
- return put_user(signature,(unsigned __user *)argp);
+ return put_user(signature,(unsigned int*)arg);
case MIOCTL_LOAD_FIRMWARE:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if(copy_from_user(&frame, argp, sizeof(bin_frame)))
+ if(copy_from_user(&frame, (void *) arg, sizeof(bin_frame)))
return -EFAULT;
if (WaitTillCardIsFree(base))
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if(copy_from_user(&frame, argp, sizeof(bin_header)))
+ if(copy_from_user(&frame, (void *) arg, sizeof(bin_header)))
return -EFAULT;
if (WaitTillCardIsFree(base))
return -EIO;
}
- if(copy_to_user(argp, &frame, sizeof(bin_frame)))
+ if(copy_to_user((void *) arg, &frame, sizeof(bin_frame)))
return -EFAULT;
return 0;
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
}
static int isicom_set_serial_info(struct isi_port * port,
- struct serial_struct __user *info)
+ struct serial_struct * info)
{
struct serial_struct newinfo;
unsigned long flags;
}
static int isicom_get_serial_info(struct isi_port * port,
- struct serial_struct __user *info)
+ struct serial_struct * info)
{
struct serial_struct out_info;
unsigned int cmd, unsigned long arg)
{
struct isi_port * port = (struct isi_port *) tty->driver_data;
- void __user *argp = (void __user *)arg;
int retval;
if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
return 0;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
case TIOCSSOFTCAR:
- if(get_user(arg, (unsigned long __user *) argp))
+ if(get_user(arg, (unsigned long *) arg))
return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
return 0;
case TIOCGSERIAL:
- return isicom_get_serial_info(port, argp);
+ return isicom_get_serial_info(port,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
- return isicom_set_serial_info(port, argp);
+ return isicom_set_serial_info(port,
+ (struct serial_struct *) arg);
default:
return -ENOIOCTLCMD;
isicom_shutdown_port(port);
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = NULL;
+ port->tty = 0;
wake_up_interruptible(&port->open_wait);
}
static int stli_brdinit(stlibrd_t *brdp);
static int stli_startbrd(stlibrd_t *brdp);
-static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp);
-static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp);
+static ssize_t stli_memread(struct file *fp, char *buf, size_t count, loff_t *offp);
+static ssize_t stli_memwrite(struct file *fp, const char *buf, size_t count, loff_t *offp);
static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp);
static void stli_poll(unsigned long arg);
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts);
static long stli_mktiocm(unsigned long sigvalue);
static void stli_read(stlibrd_t *brdp, stliport_t *portp);
-static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp);
-static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp);
-static int stli_getbrdstats(combrd_t __user *bp);
-static int stli_getportstats(stliport_t *portp, comstats_t __user *cp);
+static int stli_getserial(stliport_t *portp, struct serial_struct *sp);
+static int stli_setserial(stliport_t *portp, struct serial_struct *sp);
+static int stli_getbrdstats(combrd_t *bp);
+static int stli_getportstats(stliport_t *portp, comstats_t *cp);
static int stli_portcmdstats(stliport_t *portp);
-static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp);
-static int stli_getportstruct(stliport_t __user *arg);
-static int stli_getbrdstruct(stlibrd_t __user *arg);
+static int stli_clrportstats(stliport_t *portp, comstats_t *cp);
+static int stli_getportstruct(unsigned long arg);
+static int stli_getbrdstruct(unsigned long arg);
static void *stli_memalloc(int len);
static stlibrd_t *stli_allocbrd(void);
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("init_module()\n");
#endif
unsigned long flags;
int i, j;
-#ifdef DEBUG
+#if DEBUG
printk("cleanup_module()\n");
#endif
* Check for any arguments passed in on the module load command line.
*/
-static void stli_argbrds(void)
+static void stli_argbrds()
{
stlconf_t conf;
stlibrd_t *brdp;
int nrargs, i;
-#ifdef DEBUG
+#if DEBUG
printk("stli_argbrds()\n");
#endif
char *sp;
int nrbrdnames, i;
-#ifdef DEBUG
+#if DEBUG
printk("stli_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
#endif
unsigned int minordev;
int brdnr, portnr, rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_open(tty=%x,filp=%x): device=%s\n", (int) tty,
(int) filp, tty->name);
#endif
stliport_t *portp;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
#endif
asyport_t aport;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_initopen(brdp=%x,portp=%x)\n", (int) brdp, (int) portp);
#endif
unsigned long flags;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_rawopen(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
(int) brdp, (int) portp, (int) arg, wait);
#endif
unsigned long flags;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_rawclose(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
(int) brdp, (int) portp, (int) arg, wait);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_cmdwait(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
"copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
(int) arg, size, copyback);
stlibrd_t *brdp;
asyport_t aport;
-#ifdef DEBUG
+#if DEBUG
printk("stli_setport(portp=%x)\n", (int) portp);
#endif
static void stli_delay(int len)
{
-#ifdef DEBUG
+#if DEBUG
printk("stli_delay(len=%d)\n", len);
#endif
if (len > 0) {
unsigned long flags;
int rc, doclocal;
-#ifdef DEBUG
+#if DEBUG
printk("stli_waitcarrier(brdp=%x,portp=%x,filp=%x)\n",
(int) brdp, (int) portp, (int) filp);
#endif
unsigned int len, stlen, head, tail, size;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_write(tty=%x,from_user=%d,buf=%x,count=%d)\n",
(int) tty, from_user, (int) buf, count);
#endif
static void stli_putchar(struct tty_struct *tty, unsigned char ch)
{
-#ifdef DEBUG
+#if DEBUG
printk("stli_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
#endif
unsigned char *buf, *shbuf;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_flushchars(tty=%x)\n", (int) tty);
#endif
unsigned int head, tail, len;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_writeroom(tty=%x)\n", (int) tty);
#endif
unsigned int head, tail, len;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stli_charsinbuffer(tty=%x)\n", (int) tty);
#endif
* Generate the serial struct info.
*/
-static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp)
+static int stli_getserial(stliport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
stlibrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk("stli_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
* just quietly ignore any requests to change irq, etc.
*/
-static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp)
+static int stli_setserial(stliport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
int rc;
-#ifdef DEBUG
- printk("stli_setserial(portp=%p,sp=%p)\n", portp, sp);
+#if DEBUG
+ printk("stli_setserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
stlibrd_t *brdp;
unsigned int ival;
int rc;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk("stli_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
(int) tty, (int) file, cmd, (int) arg);
#endif
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned __user *) arg);
+ (unsigned int *) arg);
break;
case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned __user *) arg)) == 0)
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0)
tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
break;
case TIOCGSERIAL:
- rc = stli_getserial(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stli_getserial(portp, (struct serial_struct *) arg);
break;
case TIOCSSERIAL:
- rc = stli_setserial(portp, argp);
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stli_setserial(portp, (struct serial_struct *)arg);
break;
case STL_GETPFLAG:
- rc = put_user(portp->pflag, (unsigned __user *)argp);
+ rc = put_user(portp->pflag, (unsigned int *) arg);
break;
case STL_SETPFLAG:
- if ((rc = get_user(portp->pflag, (unsigned __user *)argp)) == 0)
+ if ((rc = get_user(portp->pflag, (unsigned int *) arg)) == 0)
stli_setport(portp);
break;
case COM_GETPORTSTATS:
- rc = stli_getportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stli_getportstats(portp, (comstats_t *) arg);
break;
case COM_CLRPORTSTATS:
- rc = stli_clrportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stli_clrportstats(portp, (comstats_t *) arg);
break;
case TIOCSERCONFIG:
case TIOCSERGWILD:
struct termios *tiosp;
asyport_t aport;
-#ifdef DEBUG
+#if DEBUG
printk("stli_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
#endif
{
stliport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stli_throttle(tty=%x)\n", (int) tty);
#endif
{
stliport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stli_unthrottle(tty=%x)\n", (int) tty);
#endif
stliport_t *portp;
asyctrl_t actrl;
-#ifdef DEBUG
+#if DEBUG
printk("stli_stop(tty=%x)\n", (int) tty);
#endif
stlibrd_t *brdp;
asyctrl_t actrl;
-#ifdef DEBUG
+#if DEBUG
printk("stli_start(tty=%x)\n", (int) tty);
#endif
{
stliport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_dohangup(portp=%x)\n", (int) arg);
#endif
stlibrd_t *brdp;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_hangup(tty=%x)\n", (int) tty);
#endif
stlibrd_t *brdp;
unsigned long ftype, flags;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_flushbuffer(tty=%x)\n", (int) tty);
#endif
long arg;
/* long savestate, savetime; */
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_breakctl(tty=%x,state=%d)\n", (int) tty, state);
#endif
stliport_t *portp;
unsigned long tend;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_waituntilsent(tty=%x,timeout=%x)\n", (int) tty, timeout);
#endif
stliport_t *portp;
asyctrl_t actrl;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
#endif
int curoff, maxoff;
char *pos;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
"data=%x\n", (int) page, (int) start, (int) off, count,
(int) eof, (int) data);
volatile unsigned char *bits;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_sendcmd(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
"copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
(int) arg, size, copyback);
unsigned int head, tail, size;
unsigned int len, stlen;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_read(brdp=%x,portp=%d)\n",
(int) brdp, (int) portp);
#endif
unsigned long oldsigs;
int rc, donerx;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_hostcmd(brdp=%x,channr=%d)\n",
(int) brdp, channr);
#endif
static void stli_mkasyport(stliport_t *portp, asyport_t *pp, struct termios *tiosp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_mkasyport(portp=%x,pp=%x,tiosp=%d)\n",
(int) portp, (int) pp, (int) tiosp);
#endif
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_mkasysigs(sp=%x,dtr=%d,rts=%d)\n",
(int) sp, dtr, rts);
#endif
{
long tiocm;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_mktiocm(sigvalue=%x)\n", (int) sigvalue);
#endif
stliport_t *portp;
int i, panelnr, panelport;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initports(brdp=%x)\n", (int) brdp);
#endif
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_ecpenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpenable(brdp=%x)\n", (int) brdp);
#endif
outb(ECP_ATENABLE, (brdp->iobase + ECP_ATCONFR));
static void stli_ecpdisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpdisable(brdp=%x)\n", (int) brdp);
#endif
outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_ATPAGESIZE);
static void stli_ecpreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpreset(brdp=%x)\n", (int) brdp);
#endif
static void stli_ecpintr(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpintr(brdp=%x)\n", (int) brdp);
#endif
outb(0x1, brdp->iobase);
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpeiinit(brdp=%x)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecpeigetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_EIPAGESIZE);
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_MCPAGESIZE);
static void stli_ecppciinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecppciinit(brdp=%x)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_ecppcigetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), board=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_PCIPAGESIZE);
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_onbenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbenable(brdp=%x)\n", (int) brdp);
#endif
outb((brdp->enabval | ONB_ATENABLE), (brdp->iobase + ONB_ATCONFR));
static void stli_onbdisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbdisable(brdp=%x)\n", (int) brdp);
#endif
outb((brdp->enabval | ONB_ATDISABLE), (brdp->iobase + ONB_ATCONFR));
{
void *ptr;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
} else {
ptr = brdp->membase + (offset % ONB_ATPAGESIZE);
}
static void stli_onbreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbreset(brdp=%x)\n", (int) brdp);
#endif
{
unsigned long memconf;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbeinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_onbeenable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbeenable(brdp=%x)\n", (int) brdp);
#endif
outb(ONB_EIENABLE, (brdp->iobase + ONB_EICONFR));
static void stli_onbedisable(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbedisable(brdp=%x)\n", (int) brdp);
#endif
outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_onbegetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % ONB_EIPAGESIZE);
static void stli_onbereset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_ERR "stli_onbereset(brdp=%x)\n", (int) brdp);
#endif
static void stli_bbyinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_ERR "stli_bbyinit(brdp=%d)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_ERR "stli_bbygetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
val = 0;
} else {
ptr = brdp->membase + (offset % BBY_PAGESIZE);
static void stli_bbyreset(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_bbyreset(brdp=%x)\n", (int) brdp);
#endif
static void stli_stalinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_stalinit(brdp=%d)\n", (int) brdp);
#endif
{
void *ptr;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_stalgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
+ ptr = 0;
} else {
ptr = brdp->membase + (offset % STAL_PAGESIZE);
}
{
volatile unsigned long *vecp;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_stalreset(brdp=%x)\n", (int) brdp);
#endif
char *name;
int panelnr, nrports;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initecp(brdp=%x)\n", (int) brdp);
#endif
char *name;
int i;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initonb(brdp=%x)\n", (int) brdp);
#endif
stliport_t *portp;
int portnr, nrdevs, i, rc;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_startbrd(brdp=%x)\n", (int) brdp);
#endif
static int __init stli_brdinit(stlibrd_t *brdp)
{
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_brdinit(brdp=%x)\n", (int) brdp);
#endif
cdkonbsig_t onbsig, *onbsigp;
int i, foundit;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_eisamemprobe(brdp=%x)\n", (int) brdp);
#endif
if (! foundit) {
brdp->memaddr = 0;
- brdp->membase = NULL;
+ brdp->membase = 0;
printk(KERN_ERR "STALLION: failed to probe shared memory "
"region for %s in EISA slot=%d\n",
stli_brdnames[brdp->brdtype], (brdp->iobase >> 12));
return(0);
}
-static inline int stli_getbrdnr(void)
-{
- int i;
-
- for (i = 0; i < STL_MAXBRDS; i++) {
- if (!stli_brds[i]) {
- if (i >= stli_nrbrds)
- stli_nrbrds = i + 1;
- return i;
- }
- }
- return -1;
-}
-
/*****************************************************************************/
/*
* do is go probing around in the usual places hoping we can find it.
*/
-static inline int stli_findeisabrds(void)
+static inline int stli_findeisabrds()
{
stlibrd_t *brdp;
unsigned int iobase, eid;
int i;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_findeisabrds()\n");
#endif
* Find the next available board number that is free.
*/
+static inline int stli_getbrdnr()
+{
+ int i;
+
+ for (i = 0; (i < STL_MAXBRDS); i++) {
+ if (stli_brds[i] == (stlibrd_t *) NULL) {
+ if (i >= stli_nrbrds)
+ stli_nrbrds = i + 1;
+ return(i);
+ }
+ }
+ return(-1);
+}
+
/*****************************************************************************/
#ifdef CONFIG_PCI
{
stlibrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n",
brdtype, dev->bus->number, dev->devfn);
#endif
}
brdp->brdtype = brdtype;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "%s(%d): BAR[]=%lx,%lx,%lx,%lx\n", __FILE__, __LINE__,
pci_resource_start(devp, 0),
pci_resource_start(devp, 1),
* one as it is found.
*/
-static inline int stli_findpcibrds(void)
+static inline int stli_findpcibrds()
{
struct pci_dev *dev = NULL;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stli_findpcibrds()\n");
#endif
* Allocate a new board structure. Fill out the basic info in it.
*/
-static stlibrd_t *stli_allocbrd(void)
+static stlibrd_t *stli_allocbrd()
{
stlibrd_t *brdp;
* can find.
*/
-static inline int stli_initbrds(void)
+static inline int stli_initbrds()
{
stlibrd_t *brdp, *nxtbrdp;
stlconf_t *confp;
int i, j;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_initbrds()\n");
#endif
* the slave image (and debugging :-)
*/
-static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp)
+static ssize_t stli_memread(struct file *fp, char *buf, size_t count, loff_t *offp)
{
unsigned long flags;
void *memptr;
stlibrd_t *brdp;
int brdnr, size, n;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_memread(fp=%x,buf=%x,count=%x,offp=%x)\n",
(int) fp, (int) buf, count, (int) offp);
#endif
* the slave image (and debugging :-)
*/
-static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp)
+static ssize_t stli_memwrite(struct file *fp, const char *buf, size_t count, loff_t *offp)
{
unsigned long flags;
void *memptr;
stlibrd_t *brdp;
- char __user *chbuf;
+ char *chbuf;
int brdnr, size, n;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_memwrite(fp=%x,buf=%x,count=%x,offp=%x)\n",
(int) fp, (int) buf, count, (int) offp);
#endif
if (fp->f_pos >= brdp->memsize)
return(0);
- chbuf = (char __user *) buf;
+ chbuf = (char *) buf;
size = MIN(count, (brdp->memsize - fp->f_pos));
save_flags(flags);
* Return the board stats structure to user app.
*/
-static int stli_getbrdstats(combrd_t __user *bp)
+static int stli_getbrdstats(combrd_t *bp)
{
stlibrd_t *brdp;
int i;
* what port to get stats for (used through board control device).
*/
-static int stli_getportstats(stliport_t *portp, comstats_t __user *cp)
+static int stli_getportstats(stliport_t *portp, comstats_t *cp)
{
stlibrd_t *brdp;
int rc;
- if (!portp) {
+ if (portp == (stliport_t *) NULL) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
stli_comstats.port);
- if (!portp)
- return -ENODEV;
+ if (portp == (stliport_t *) NULL)
+ return(-ENODEV);
}
brdp = stli_brds[portp->brdnr];
- if (!brdp)
- return -ENODEV;
+ if (brdp == (stlibrd_t *) NULL)
+ return(-ENODEV);
if ((rc = stli_portcmdstats(portp)) < 0)
- return rc;
+ return(rc);
return copy_to_user(cp, &stli_comstats, sizeof(comstats_t)) ?
-EFAULT : 0;
* Clear the port stats structure. We also return it zeroed out...
*/
-static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp)
+static int stli_clrportstats(stliport_t *portp, comstats_t *cp)
{
stlibrd_t *brdp;
int rc;
- if (!portp) {
+ if (portp == (stliport_t *) NULL) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
stli_comstats.port);
- if (!portp)
- return -ENODEV;
+ if (portp == (stliport_t *) NULL)
+ return(-ENODEV);
}
brdp = stli_brds[portp->brdnr];
- if (!brdp)
- return -ENODEV;
+ if (brdp == (stlibrd_t *) NULL)
+ return(-ENODEV);
if (brdp->state & BST_STARTED) {
- if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, NULL, 0, 0)) < 0)
- return rc;
+ if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, 0, 0, 0)) < 0)
+ return(rc);
}
memset(&stli_comstats, 0, sizeof(comstats_t));
if (copy_to_user(cp, &stli_comstats, sizeof(comstats_t)))
return -EFAULT;
- return 0;
+ return(0);
}
/*****************************************************************************/
* Return the entire driver ports structure to a user app.
*/
-static int stli_getportstruct(stliport_t __user *arg)
+static int stli_getportstruct(unsigned long arg)
{
stliport_t *portp;
- if (copy_from_user(&stli_dummyport, arg, sizeof(stliport_t)))
+ if (copy_from_user(&stli_dummyport, (void *)arg, sizeof(stliport_t)))
return -EFAULT;
portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
stli_dummyport.portnr);
- if (!portp)
- return -ENODEV;
- if (copy_to_user(arg, portp, sizeof(stliport_t)))
+ if (portp == (stliport_t *) NULL)
+ return(-ENODEV);
+ if (copy_to_user((void *) arg, portp, sizeof(stliport_t)))
return -EFAULT;
- return 0;
+ return(0);
}
/*****************************************************************************/
* Return the entire driver board structure to a user app.
*/
-static int stli_getbrdstruct(stlibrd_t __user *arg)
+static int stli_getbrdstruct(unsigned long arg)
{
stlibrd_t *brdp;
- if (copy_from_user(&stli_dummybrd, arg, sizeof(stlibrd_t)))
+ if (copy_from_user(&stli_dummybrd, (void *)arg, sizeof(stlibrd_t)))
return -EFAULT;
if ((stli_dummybrd.brdnr < 0) || (stli_dummybrd.brdnr >= STL_MAXBRDS))
- return -ENODEV;
+ return(-ENODEV);
brdp = stli_brds[stli_dummybrd.brdnr];
- if (!brdp)
- return -ENODEV;
- if (copy_to_user(arg, brdp, sizeof(stlibrd_t)))
+ if (brdp == (stlibrd_t *) NULL)
+ return(-ENODEV);
+ if (copy_to_user((void *) arg, brdp, sizeof(stlibrd_t)))
return -EFAULT;
- return 0;
+ return(0);
}
/*****************************************************************************/
{
stlibrd_t *brdp;
int brdnr, rc, done;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "stli_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n",
(int) ip, (int) fp, cmd, (int) arg);
#endif
switch (cmd) {
case COM_GETPORTSTATS:
- rc = stli_getportstats(NULL, argp);
+ rc = stli_getportstats((stliport_t *)NULL, (comstats_t *)arg);
done++;
break;
case COM_CLRPORTSTATS:
- rc = stli_clrportstats(NULL, argp);
+ rc = stli_clrportstats((stliport_t *)NULL, (comstats_t *)arg);
done++;
break;
case COM_GETBRDSTATS:
- rc = stli_getbrdstats(argp);
+ rc = stli_getbrdstats((combrd_t *) arg);
done++;
break;
case COM_READPORT:
- rc = stli_getportstruct(argp);
+ rc = stli_getportstruct(arg);
done++;
break;
case COM_READBOARD:
- rc = stli_getbrdstruct(argp);
+ rc = stli_getbrdstruct(arg);
done++;
break;
}
if (brdnr >= STL_MAXBRDS)
return(-ENODEV);
brdp = stli_brds[brdnr];
- if (!brdp)
+ if (brdp == (stlibrd_t *) NULL)
return(-ENODEV);
if (brdp->state == 0)
return(-ENODEV);
}
#endif
+extern int page_is_ram(unsigned long pagenr);
+
+static inline int page_is_allowed(unsigned long pagenr)
+{
+ #ifdef CONFIG_X86
+ if (pagenr <= 256)
+ return 1;
+ if (!page_is_ram(pagenr))
+ return 1;
+ printk("Access to 0x%lx by %s denied \n", pagenr << PAGE_SHIFT, current->comm);
+ return 0;
+ #else
+ return 1;
+ #endif
+}
+
static inline int range_is_allowed(unsigned long from, unsigned long to)
{
unsigned long cursor;
cursor = from >> PAGE_SHIFT;
- while ((cursor << PAGE_SHIFT) < to) {
- if (!devmem_is_allowed(cursor))
+ while ( (cursor << PAGE_SHIFT) < to) {
+ if (!page_is_allowed(cursor))
return 0;
cursor++;
}
}
#endif
if (!range_is_allowed(realp, realp+count))
- return -EPERM;
+ return -EFAULT;
copied = copy_from_user(p, buf, count);
if (copied) {
ssize_t ret = written + (count - copied);
}
#endif
if (!range_is_allowed(p, p+count))
- return -EPERM;
+ return -EFAULT;
if (copy_to_user(buf, __va(p), count))
return -EFAULT;
read += count;
cursor = vma->vm_pgoff;
while ((cursor << PAGE_SHIFT) < offset + vma->vm_end-vma->vm_start) {
- if (!devmem_is_allowed(cursor))
- return -EPERM;
+ if (!page_is_allowed(cursor))
+ return -EFAULT;
cursor++;
}
return virtr + read;
}
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t wrote = 0;
+ ssize_t virtr = 0;
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
+ return -EPERM;
+
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+ if (count > (unsigned long) high_memory - p)
+ wrote = (unsigned long) high_memory - p;
+
+ written = do_write_mem((void*)p, p, buf, wrote, ppos);
+ if (written != wrote)
+ return written;
+ wrote = written;
+ p += wrote;
+ buf += wrote;
+ count -= wrote;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return wrote ? wrote : -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len) {
+ written = copy_from_user(kbuf, buf, len);
+ if (written) {
+ ssize_t ret;
+
+ free_page((unsigned long)kbuf);
+ ret = wrote + virtr + (len - written);
+ return ret ? ret : -EFAULT;
+ }
+ }
+ len = vwrite(kbuf, (char *)p, len);
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+
+ *ppos = p;
+ return virtr + wrote;
+}
+
#if defined(CONFIG_ISA) || !defined(__mc68000__)
static ssize_t read_port(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
static struct file_operations kmem_fops = {
.llseek = memory_lseek,
.read = read_kmem,
+ .write = write_kmem,
.mmap = mmap_kmem,
.open = open_kmem,
};
"CP-204J series",
};
-#ifdef CONFIG_PCI
static struct pci_device_id moxa_pcibrds[] = {
{ PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_C218, PCI_ANY_ID, PCI_ANY_ID,
0, 0, MOXA_BOARD_C218_PCI },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
-#endif /* CONFIG_PCI */
typedef struct _moxa_isa_board_conf {
int boardType;
static int verbose = 0;
static int ttymajor = MOXAMAJOR;
/* Variables for insmod */
-#ifdef MODULE
static int baseaddr[] = {0, 0, 0, 0};
static int type[] = {0, 0, 0, 0};
static int numports[] = {0, 0, 0, 0};
-#endif
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
/*
* static functions:
*/
+static int moxa_get_PCI_conf(struct pci_dev *, int, moxa_board_conf *);
static void do_moxa_softint(void *);
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
static void MoxaPortTxEnable(int);
static int MoxaPortResetBrkCnt(int);
static void MoxaPortSendBreak(int, int);
-static int moxa_get_serial_info(struct moxa_str *, struct serial_struct __user *);
-static int moxa_set_serial_info(struct moxa_str *, struct serial_struct __user *);
+static int moxa_get_serial_info(struct moxa_str *, struct serial_struct *);
+static int moxa_set_serial_info(struct moxa_str *, struct serial_struct *);
static void MoxaSetFifo(int port, int enable);
static struct tty_operations moxa_ops = {
.tiocmset = moxa_tiocmset,
};
-#ifdef CONFIG_PCI
-static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
-{
- board->baseAddr = pci_resource_start (p, 2);
- board->boardType = board_type;
- switch (board_type) {
- case MOXA_BOARD_C218_ISA:
- case MOXA_BOARD_C218_PCI:
- board->numPorts = 8;
- break;
-
- case MOXA_BOARD_CP204J:
- board->numPorts = 4;
- break;
- default:
- board->numPorts = 0;
- break;
- }
- board->busType = MOXA_BUS_TYPE_PCI;
- board->pciInfo.busNum = p->bus->number;
- board->pciInfo.devNum = p->devfn >> 3;
-
- return (0);
-}
-#endif /* CONFIG_PCI */
-
static int __init moxa_init(void)
{
int i, numBoards;
moxaDriver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(moxaDriver, &moxa_ops);
- moxaXmitBuff = NULL;
+ moxaXmitBuff = 0;
for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
ch->type = PORT_16550A;
ch->port = i;
INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
- ch->tty = NULL;
+ ch->tty = 0;
ch->close_delay = 5 * HZ / 10;
ch->closing_wait = 30 * HZ;
ch->count = 0;
module_init(moxa_init);
module_exit(moxa_exit);
+static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
+{
+ board->baseAddr = pci_resource_start (p, 2);
+ board->boardType = board_type;
+ switch (board_type) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ board->numPorts = 8;
+ break;
+
+ case MOXA_BOARD_CP204J:
+ board->numPorts = 4;
+ break;
+ default:
+ board->numPorts = 0;
+ break;
+ }
+ board->busType = MOXA_BUS_TYPE_PCI;
+ board->pciInfo.busNum = p->bus->number;
+ board->pciInfo.devNum = p->devfn >> 3;
+
+ return (0);
+}
+
static void do_moxa_softint(void *private_)
{
struct moxa_str *ch = (struct moxa_str *) private_;
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
ch->event = 0;
- ch->tty = NULL;
+ ch->tty = 0;
if (ch->blocked_open) {
if (ch->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
{
struct moxa_str *ch = (struct moxa_str *) tty->driver_data;
register int port;
- void __user *argp = (void __user *)arg;
int retval;
port = PORTNO(tty);
MoxaPortSendBreak(ch->port, arg);
return (0);
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
case TIOCSSOFTCAR:
- if(get_user(retval, (unsigned long __user *) argp))
+ if(get_user(retval, (unsigned long *) arg))
return -EFAULT;
arg = retval;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
ch->asyncflags |= ASYNC_CHECK_CD;
return (0);
case TIOCGSERIAL:
- return moxa_get_serial_info(ch, argp);
+ return (moxa_get_serial_info(ch, (struct serial_struct *) arg));
case TIOCSSERIAL:
- return moxa_set_serial_info(ch, argp);
+ return (moxa_set_serial_info(ch, (struct serial_struct *) arg));
default:
retval = MoxaDriverIoctl(cmd, arg, port);
}
ch->event = 0;
ch->count = 0;
ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
- ch->tty = NULL;
+ ch->tty = 0;
wake_up_interruptible(&ch->open_wait);
}
unsigned char *charptr, *flagptr;
unsigned long flags;
- ts = NULL;
+ ts = 0;
tp = ch->tty;
if (tp)
ts = tp->termios;
static void moxafunc(unsigned long, int, ushort);
static void wait_finish(unsigned long);
static void low_water_check(unsigned long);
-static int moxaloadbios(int, unsigned char __user *, int);
+static int moxaloadbios(int, unsigned char *, int);
static int moxafindcard(int);
-static int moxaload320b(int, unsigned char __user *, int);
-static int moxaloadcode(int, unsigned char __user *, int);
+static int moxaload320b(int, unsigned char *, int);
+static int moxaloadcode(int, unsigned char *, int);
static int moxaloadc218(int, unsigned long, int);
static int moxaloadc320(int, unsigned long, int, int *);
};
struct dl_str {
- char __user *buf;
+ char *buf;
int len;
int cardno;
};
int i;
int status;
int MoxaPortTxQueue(int), MoxaPortRxQueue(int);
- void __user *argp = (void __user *)arg;
if (port == QueryPort) {
if ((cmd != MOXA_GET_CONF) && (cmd != MOXA_INIT_DRIVER) &&
}
switch (cmd) {
case MOXA_GET_CONF:
- if(copy_to_user(argp, &moxa_boards, MAX_BOARDS * sizeof(moxa_board_conf)))
+ if(copy_to_user((void *)arg, &moxa_boards, MAX_BOARDS * sizeof(moxa_board_conf)))
return -EFAULT;
return (0);
case MOXA_INIT_DRIVER:
return (0);
case MOXA_GETDATACOUNT:
moxaLog.tick = jiffies;
- if(copy_to_user(argp, &moxaLog, sizeof(mon_st)))
+ if(copy_to_user((void *)arg, &moxaLog, sizeof(mon_st)))
return -EFAULT;
return (0);
case MOXA_FLUSH_QUEUE:
temp_queue[i].outq = MoxaPortTxQueue(i);
}
}
- if(copy_to_user(argp, temp_queue, sizeof(struct moxaq_str) * MAX_PORTS))
+ if(copy_to_user((void *)arg, temp_queue, sizeof(struct moxaq_str) * MAX_PORTS))
return -EFAULT;
return (0);
case MOXA_GET_OQUEUE:
i = MoxaPortTxQueue(port);
- return put_user(i, (unsigned long __user *)argp);
+ return put_user(i, (unsigned long *) arg);
case MOXA_GET_IQUEUE:
i = MoxaPortRxQueue(port);
- return put_user(i, (unsigned long __user *)argp);
+ return put_user(i, (unsigned long *) arg);
case MOXA_GET_MAJOR:
- if(copy_to_user(argp, &ttymajor, sizeof(int)))
+ if(copy_to_user((void *)arg, &ttymajor, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GET_CUMAJOR:
i = 0;
- if(copy_to_user(argp, &i, sizeof(int)))
+ if(copy_to_user((void *)arg, &i, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GETMSTATUS:
else
GMStatus[i].cflag = moxaChannels[i].tty->termios->c_cflag;
}
- if(copy_to_user(argp, GMStatus, sizeof(struct mxser_mstatus) * MAX_PORTS))
+ if(copy_to_user((void *)arg, GMStatus, sizeof(struct mxser_mstatus) * MAX_PORTS))
return -EFAULT;
return 0;
default:
break;
}
- if(copy_from_user(&dltmp, argp, sizeof(struct dl_str)))
+ if(copy_from_user(&dltmp, (void *)arg, sizeof(struct dl_str)))
return -EFAULT;
if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS)
return -EINVAL;
}
static int moxa_get_serial_info(struct moxa_str *info,
- struct serial_struct __user *retinfo)
+ struct serial_struct *retinfo)
{
struct serial_struct tmp;
+ if (!retinfo)
+ return (-EFAULT);
memset(&tmp, 0, sizeof(tmp));
tmp.type = info->type;
tmp.line = info->port;
static int moxa_set_serial_info(struct moxa_str *info,
- struct serial_struct __user *new_info)
+ struct serial_struct *new_info)
{
struct serial_struct new_serial;
}
}
-static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
+static int moxaloadbios(int cardno, unsigned char *tmp, int len)
{
unsigned long baseAddr;
int i;
return (0);
}
-static int moxaload320b(int cardno, unsigned char __user *tmp, int len)
+static int moxaload320b(int cardno, unsigned char * tmp, int len)
{
unsigned long baseAddr;
int i;
return (0);
}
-static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
+static int moxaloadcode(int cardno, unsigned char * tmp, int len)
{
unsigned long baseAddr, ofsAddr;
int retval, port, i;
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_open, entry inode %p file %p\n",
- inode, file);
+ "mwavedd::mwave_open, entry inode %x file %x\n",
+ (int) inode, (int) file);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_open, exit return retval %x\n", retval);
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_close, entry inode %p file %p\n",
- inode, file);
+ "mwavedd::mwave_close, entry inode %x file %x\n",
+ (int) inode, (int) file);
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
retval);
void __user *arg = (void __user *)ioarg;
PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, entry inode %p file %p cmd %x arg %x\n",
- inode, file, iocmd, (int) ioarg);
+ "mwavedd::mwave_ioctl, entry inode %x file %x cmd %x arg %x\n",
+ (int) inode, (int) file, iocmd, (int) ioarg);
switch (iocmd) {
loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
+ "mwavedd::mwave_read entry file %p, buf %p, count %x ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_write entry file %p, buf %p,"
- " count %zx ppos %p\n",
+ " count %x ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
static irqreturn_t UartInterrupt(int irq, void *dev_id, struct pt_regs *regs)
{
PRINTK_3(TRACE_TP3780I,
- "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+ "tp3780i::UartInterrupt entry irq %x dev_id %x\n", irq, (int) dev_id);
return IRQ_HANDLED;
}
unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+ "tp3780i::DspInterrupt entry irq %x dev_id %x\n", irq, (int) dev_id);
if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
PRINTK_2(TRACE_TP3780I,
pSettings->bPllBypass = TP_CFG_PllBypass;
pSettings->usChipletEnable = TP_CFG_ChipletEnable;
- if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
+ if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", 0)) {
PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
goto exit_cleanup;
} else { /* no conflict just release */
free_irq(pSettings->usUartIrq, NULL);
}
- if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
+ if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", 0)) {
PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
goto exit_cleanup;
} else {
#define MOXA_GET_CUMAJOR (MOXA + 64)
#define MOXA_GETMSTATUS (MOXA + 65)
-#ifdef CONFIG_PCI
static struct pci_device_id mxser_pcibrds[] = {
{ PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_C168, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
MXSER_BOARD_C168_PCI },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-#endif /* CONFIG_PCI */
static int ioaddr[MXSER_BOARDS];
static int ttymajor = MXSERMAJOR;
static void mxser_getcfg(int board, struct mxser_hwconf *hwconf);
static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
+static int mxser_get_PCI_conf(struct pci_dev *, int, struct mxser_hwconf *);
static void mxser_do_softint(void *);
static int mxser_open(struct tty_struct *, struct file *);
static void mxser_close(struct tty_struct *, struct file *);
static int mxser_startup(struct mxser_struct *);
static void mxser_shutdown(struct mxser_struct *);
static int mxser_change_speed(struct mxser_struct *, struct termios *old_termios);
-static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_get_lsr_info(struct mxser_struct *, unsigned int __user *);
+static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct *);
+static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct *);
+static int mxser_get_lsr_info(struct mxser_struct *, unsigned int *);
static void mxser_send_break(struct mxser_struct *, int);
static int mxser_tiocmget(struct tty_struct *, struct file *);
static int mxser_tiocmset(struct tty_struct *, struct file *, unsigned int, unsigned int);
mxsercfg[board] = *hwconf;
}
-#ifdef CONFIG_PCI
static int mxser_get_PCI_conf(struct pci_dev *pdev, int board_type, struct mxser_hwconf *hwconf)
{
int i;
}
return (0);
}
-#endif /* CONFIG_PCI */
static struct tty_operations mxser_ops = {
.open = mxser_open,
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
info->event = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
struct mxser_struct *info = (struct mxser_struct *) tty->driver_data;
int retval;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser;
+ struct serial_icounter_struct *p_cuser; /* user space */
unsigned long templ;
- void __user *argp = (void __user *)arg;
if (PORTNO(tty) == MXSER_PORTS)
return (mxser_ioctl_special(cmd, arg));
mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
return (0);
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
case TIOCSSOFTCAR:
- if(get_user(templ, (unsigned long __user *) arg))
+ if(get_user(templ, (unsigned long *) arg))
return -EFAULT;
arg = templ;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
return (0);
case TIOCGSERIAL:
- return mxser_get_serial_info(info, argp);
+ return (mxser_get_serial_info(info, (struct serial_struct *) arg));
case TIOCSSERIAL:
- return mxser_set_serial_info(info, argp);
+ return (mxser_set_serial_info(info, (struct serial_struct *) arg));
case TIOCSERGETLSR: /* Get line status register */
- return mxser_get_lsr_info(info, argp);
+ return (mxser_get_lsr_info(info, (unsigned int *) arg));
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
cli();
cnow = info->icount;
restore_flags(flags);
- p_cuser = argp;
+ p_cuser = (struct serial_icounter_struct *) arg;
if(put_user(cnow.cts, &p_cuser->cts))
return -EFAULT;
if(put_user(cnow.dsr, &p_cuser->dsr))
return -EFAULT;
return put_user(cnow.dcd, &p_cuser->dcd);
case MOXA_HighSpeedOn:
- return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
+ return put_user(info->baud_base != 115200 ? 1 : 0, (int *) arg);
default:
return (-ENOIOCTLCMD);
}
static int mxser_ioctl_special(unsigned int cmd, unsigned long arg)
{
int i, result, status;
- void __user *argp = (void __user *)arg;
switch (cmd) {
case MOXA_GET_CONF:
- if(copy_to_user(argp, mxsercfg,
+ if(copy_to_user((struct mxser_hwconf *) arg, mxsercfg,
sizeof(struct mxser_hwconf) * 4))
return -EFAULT;
return 0;
case MOXA_GET_MAJOR:
- if(copy_to_user(argp, &ttymajor, sizeof(int)))
+ if(copy_to_user((int *) arg, &ttymajor, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GET_CUMAJOR:
result = 0;
- if(copy_to_user(argp, &result, sizeof(int)))
+ if(copy_to_user((int *) arg, &result, sizeof(int)))
return -EFAULT;
return 0;
if (mxvar_table[i].base)
result |= (1 << i);
}
- return put_user(result, (unsigned long __user *) argp);
+ return put_user(result, (unsigned long *) arg);
case MOXA_GETDATACOUNT:
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ if(copy_to_user((struct mxser_log *) arg, &mxvar_log, sizeof(mxvar_log)))
return -EFAULT;
return (0);
case MOXA_GETMSTATUS:
else
GMStatus[i].cts = 0;
}
- if(copy_to_user(argp, GMStatus,
+ if(copy_to_user((struct mxser_mstatus *) arg, GMStatus,
sizeof(struct mxser_mstatus) * MXSER_PORTS))
return -EFAULT;
return 0;
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
}
int pass_counter = 0;
int handled = 0;
- port = NULL;
+ port = 0;
for (i = 0; i < MXSER_BOARDS; i++) {
if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) {
port = dev_id;
/*
* and set the speed of the serial port
*/
- mxser_change_speed(info, NULL);
+ mxser_change_speed(info, 0);
info->flags |= ASYNC_INITIALIZED;
restore_flags(flags);
*/
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
+ info->xmit_buf = 0;
}
info->IER = 0;
outb(0x00, info->base + UART_IER); /* disable all intrs */
* ------------------------------------------------------------
*/
static int mxser_get_serial_info(struct mxser_struct *info,
- struct serial_struct __user *retinfo)
+ struct serial_struct *retinfo)
{
struct serial_struct tmp;
}
static int mxser_set_serial_info(struct mxser_struct *info,
- struct serial_struct __user *new_info)
+ struct serial_struct *new_info)
{
struct serial_struct new_serial;
unsigned int flags;
if (info->flags & ASYNC_INITIALIZED) {
if (flags != (info->flags & ASYNC_SPD_MASK)) {
- mxser_change_speed(info, NULL);
+ mxser_change_speed(info, 0);
}
} else
retval = mxser_startup(info);
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int __user *value)
+static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int *value)
{
unsigned char status;
unsigned int result;
#endif
tty->disc_data = NULL;
if (tty == n_hdlc->backup_tty)
- n_hdlc->backup_tty = NULL;
+ n_hdlc->backup_tty = 0;
if (tty != n_hdlc->tty)
return;
if (n_hdlc->backup_tty) {
struct n_hdlc *n_hdlc = kmalloc(sizeof(*n_hdlc), GFP_KERNEL);
if (!n_hdlc)
- return NULL;
+ return 0;
memset(n_hdlc, 0, sizeof(*n_hdlc));
static inline unsigned char *alloc_buf(void)
{
+ unsigned char *p;
int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
- if (PAGE_SIZE != N_TTY_BUF_SIZE)
- return kmalloc(N_TTY_BUF_SIZE, prio);
- else
- return (unsigned char *)__get_free_page(prio);
+ if (PAGE_SIZE != N_TTY_BUF_SIZE) {
+ p = kmalloc(N_TTY_BUF_SIZE, prio);
+ if (p)
+ memset(p, 0, N_TTY_BUF_SIZE);
+ } else
+ p = (unsigned char *)get_zeroed_page(prio);
+
+ return p;
}
static inline void free_buf(unsigned char *buf)
NVRAM_MINOR);
goto out;
}
- if (!create_proc_read_entry("driver/nvram", 0, NULL, nvram_read_proc,
+ if (!create_proc_read_entry("driver/nvram", 0, 0, nvram_read_proc,
NULL)) {
printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
ret = -ENOMEM;
static void __exit
nvram_cleanup_module(void)
{
- remove_proc_entry("driver/nvram", NULL);
+ remove_proc_entry("driver/nvram", 0);
misc_deregister(&nvram_dev);
}
* device at any one time.
*/
-static int button_read (struct file *filp, char __user *buffer,
+static int button_read (struct file *filp, char *buffer,
size_t count, loff_t *ppos)
{
interruptible_sleep_on (&button_wait_queue);
static void button_sequence_finished (unsigned long parameters);
static irqreturn_t button_handler (int irq, void *dev_id, struct pt_regs *regs);
+static int button_read (struct file *filp, char *buffer,
+ size_t count, loff_t *ppos);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
int button_del_callback (void (*callback) (void));
static void kick_open(void);
static int get_flash_id(void);
static int erase_block(int nBlock);
-static int write_block(unsigned long p, const char __user *buf, int count);
+static int write_block(unsigned long p, const char *buf, int count);
+static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg);
+static ssize_t flash_read(struct file *file, char *buf, size_t count, loff_t * ppos);
+static ssize_t flash_write(struct file *file, const char *buf, size_t count, loff_t * ppos);
+static loff_t flash_llseek(struct file *file, loff_t offset, int orig);
#define KFLASH_SIZE 1024*1024 //1 Meg
#define KFLASH_SIZE4 4*1024*1024 //4 Meg
return 0;
}
-static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
- loff_t *ppos)
+static ssize_t flash_read(struct file *file, char *buf, size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
int ret = 0;
if (flashdebug)
- printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, "
- "buffer=%p, count=0x%X.\n", p, buf, count);
+ printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, buffer=%p, count=0x%X.\n",
+ p, buf, count);
if (count)
ret = -ENXIO;
return ret;
}
-static ssize_t flash_write(struct file *file, const char __user *buf,
- size_t size, loff_t * ppos)
+static ssize_t flash_write(struct file *file, const char *buf, size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
break;
}
if (flashdebug)
- printk(KERN_DEBUG "flash_write: writing offset %lX, "
- "from buf %p, bytes left %X.\n", p, buf,
- count - written);
+ printk(KERN_DEBUG "flash_write: writing offset %lX, from buf "
+ "%p, bytes left %X.\n", p, buf, count - written);
/*
* write_block will limit write to space left in this block
/*
* write_block will limit number of bytes written to the space in this block
*/
-static int write_block(unsigned long p, const char __user *buf, int count)
+static int write_block(unsigned long p, const char *buf, int count)
{
volatile unsigned int c1;
volatile unsigned int c2;
#define CHA 0x00 /* channel A offset */
#define CHB 0x40 /* channel B offset */
-
-/*
- * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it.
- */
-#undef PVR
#define RXFIFO 0
#define TXFIFO 0
static BOOLEAN wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
{
int i = 0;
+ unsigned char status;
/* wait for command completion */
- while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
+ while ((status = read_reg(info, (unsigned char)(channel+STAR)) & BIT2)) {
udelay(1);
if (i++ == 1000)
return FALSE;
} else {
time = jiffies;
}
-#elif defined (__sparc_v9__)
- unsigned long tick = tick_ops->get_tick();
-
- time = (unsigned int) tick;
- num ^= (tick >> 32UL);
#else
time = jiffies;
#endif
}
static int proc_do_poolsize(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int ret;
sysctl_poolsize = random_state->poolinfo.POOLBYTES;
- ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, filp, buffer, lenp);
if (ret || !write ||
(sysctl_poolsize == random_state->poolinfo.POOLBYTES))
return ret;
* sysctl system call, it is returned as 16 bytes of binary data.
*/
static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
- return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
+ return proc_dostring(&fake_table, write, filp, buffer, lenp);
}
static int uuid_strategy(ctl_table *table, int __user *name, int nlen,
return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */
}
#endif
-
-/*
- * Get a random word:
- */
-unsigned int get_random_int(void)
-{
- unsigned int val = 0;
-
- if (!exec_shield_randomize)
- return 0;
-
-#ifdef CONFIG_X86_HAS_TSC
- rdtscl(val);
-#endif
- val += current->pid + jiffies + (int)&val;
-
- /*
- * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
- * every second, from the entropy pool (and thus creates a limited
- * drain on it), and uses halfMD4Transform within the second. We
- * also spice it with the TSC (if available), jiffies, PID and the
- * stack address:
- */
- return secure_ip_id(val);
-}
-
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
-{
- unsigned long range = end - len - start;
- if (end <= start + len)
- return 0;
- return PAGE_ALIGN(get_random_int() % range + start);
-}
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
port->event = 0;
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE;
}
static inline int rc_set_serial_info(struct riscom_port * port,
- struct serial_struct __user * newinfo)
+ struct serial_struct * newinfo)
{
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
}
static inline int rc_get_serial_info(struct riscom_port * port,
- struct serial_struct __user *retinfo)
+ struct serial_struct * retinfo)
{
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
{
struct riscom_port *port = (struct riscom_port *)tty->driver_data;
- void __user *argp = (void __user *)arg;
int retval;
if (rc_paranoia_check(port, tty->name, "rc_ioctl"))
rc_send_break(port, arg ? arg*(HZ/10) : HZ/4);
break;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned __user *)argp);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned int *) arg);
case TIOCSSOFTCAR:
- if (get_user(arg,(unsigned __user *) argp))
+ if (get_user(arg,(unsigned int *) arg))
return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
break;
case TIOCGSERIAL:
- return rc_get_serial_info(port, argp);
+ return rc_get_serial_info(port, (struct serial_struct *) arg);
case TIOCSSERIAL:
- return rc_set_serial_info(port, argp);
+ return rc_set_serial_info(port, (struct serial_struct *) arg);
default:
return -ENOIOCTLCMD;
}
port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = NULL;
+ port->tty = 0;
wake_up_interruptible(&port->open_wait);
}
unsigned m = iminor(file->f_dentry->d_inode);
size_t i;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data+i))
unsigned m = iminor(file->f_dentry->d_inode);
int value;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
value = scx200_gpio_get(m);
if (put_user(value ? '1' : '0', buf))
return -EFAULT;
unsigned m = iminor(inode);
if (m > 63)
return -EINVAL;
- return nonseekable_open(inode, file);
+ return 0;
}
static int scx200_gpio_release(struct inode *inode, struct file *file)
/************************* End of Includes **************************/
/***************************** Prototypes ***************************/
+/* Helper functions */
+static __inline__ volatile struct a2232status *a2232stat(unsigned int board,
+ unsigned int portonboard);
+static __inline__ volatile struct a2232memory *a2232mem (unsigned int board);
+static __inline__ void a2232_receive_char( struct a2232_port *port,
+ int ch, int err );
/* The interrupt service routine */
static irqreturn_t a2232_vbl_inter(int irq, void *data, struct pt_regs *fp);
/* Initialize the port structures */
static struct zorro_dev *zd_a2232[MAX_A2232_BOARDS];
/***************************** End of Global variables **************/
-/* Helper functions */
-
-static inline volatile struct a2232memory *a2232mem(unsigned int board)
-{
- return (volatile struct a2232memory *)ZTWO_VADDR(zd_a2232[board]->resource.start);
-}
-
-static inline volatile struct a2232status *a2232stat(unsigned int board,
- unsigned int portonboard)
-{
- volatile struct a2232memory *mem = a2232mem(board);
- return &(mem->Status[portonboard]);
-}
-
-static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
-{
-/* Mostly stolen from other drivers.
- Maybe one could implement a more efficient version by not only
- transferring one character at a time.
-*/
- struct tty_struct *tty = port->gs.tty;
-
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return;
-
- tty->flip.count++;
-
-#if 0
- switch(err) {
- case TTY_BREAK:
- break;
- case TTY_PARITY:
- break;
- case TTY_OVERRUN:
- break;
- case TTY_FRAME:
- break;
- }
-#endif
-
- *tty->flip.flag_buf_ptr++ = err;
- *tty->flip.char_buf_ptr++ = ch;
- tty_flip_buffer_push(tty);
-}
-
/***************************** Functions ****************************/
/*** BEGIN OF REAL_DRIVER FUNCTIONS ***/
}
/*** END OF FUNCTIONS EXPECTED BY TTY DRIVER STRUCTS ***/
+static __inline__ volatile struct a2232status *a2232stat(unsigned int board, unsigned int portonboard)
+{
+ volatile struct a2232memory *mem = a2232mem(board);
+ return &(mem->Status[portonboard]);
+}
+
+static __inline__ volatile struct a2232memory *a2232mem (unsigned int board)
+{
+ return (volatile struct a2232memory *) ZTWO_VADDR( zd_a2232[board]->resource.start );
+}
+
+static __inline__ void a2232_receive_char( struct a2232_port *port,
+ int ch, int err )
+{
+/* Mostly stolen from other drivers.
+ Maybe one could implement a more efficient version by not only
+ transferring one character at a time.
+*/
+ struct tty_struct *tty = port->gs.tty;
+
+ if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+ return;
+
+ tty->flip.count++;
+
+#if 0
+ switch(err) {
+ case TTY_BREAK:
+ break;
+ case TTY_PARITY:
+ break;
+ case TTY_OVERRUN:
+ break;
+ case TTY_FRAME:
+ break;
+ }
+#endif
+
+ *tty->flip.flag_buf_ptr++ = err;
+ *tty->flip.char_buf_ptr++ = ch;
+ tty_flip_buffer_push(tty);
+}
+
static irqreturn_t a2232_vbl_inter(int irq, void *data, struct pt_regs *fp)
{
#if A2232_IOBUFLEN != 256
--- /dev/null
+/*
+ * C-Brick Serial Port (and console) driver for SGI Altix machines.
+ *
+ * This driver is NOT suitable for talking to the l1-controller for
+ * anything other than 'console activities' --- please use the l1
+ * driver for that.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/sysrq.h>
+#include <linux/circ_buf.h>
+#include <linux/serial_reg.h>
+#include <asm/uaccess.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn2/sn_private.h>
+
+#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+static char sysrq_serial_str[] = "\eSYS";
+static char *sysrq_serial_ptr = sysrq_serial_str;
+static unsigned long sysrq_requested;
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */
+
+/* minor device number */
+#define SN_SAL_MINOR 64
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 128
+
+/* number of characters we can transmit to the SAL console at a time */
+#define SN_SAL_MAX_CHARS 120
+
+#define SN_SAL_EVENT_WRITE_WAKEUP 0
+
+/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
+ * avoid losing chars, (always has to be a power of 2) */
+#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
+
+#define SN_SAL_UART_FIFO_DEPTH 16
+#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10
+
+/* we don't kmalloc/get_free_page these as we want them available
+ * before either of those are initialized */
+static char sn_xmit_buff_mem[SN_SAL_BUFFER_SIZE];
+
+struct volatile_circ_buf {
+ char *cb_buf;
+ int cb_head;
+ int cb_tail;
+};
+
+static struct volatile_circ_buf xmit = { .cb_buf = sn_xmit_buff_mem };
+static char sn_tmp_buffer[SN_SAL_BUFFER_SIZE];
+
+static struct tty_struct *sn_sal_tty;
+
+static struct timer_list sn_sal_timer;
+static int sn_sal_event; /* event type for task queue */
+
+static int sn_sal_is_asynch;
+static int sn_sal_irq;
+static spinlock_t sn_sal_lock = SPIN_LOCK_UNLOCKED;
+static int sn_total_tx_count;
+static int sn_total_rx_count;
+
+static void sn_sal_tasklet_action(unsigned long data);
+static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0);
+
+static unsigned long sn_interrupt_timeout;
+
+extern u64 master_node_bedrock_address;
+
+#undef DEBUG
+#ifdef DEBUG
+static int sn_debug_printf(const char *fmt, ...);
+#define DPRINTF(x...) sn_debug_printf(x)
+#else
+#define DPRINTF(x...) do { } while (0)
+#endif
+
+struct sn_sal_ops {
+ int (*sal_puts)(const char *s, int len);
+ int (*sal_getc)(void);
+ int (*sal_input_pending)(void);
+ void (*sal_wakeup_transmit)(void);
+};
+
+/* This is the pointer used. It is assigned to point to one of
+ * the tables below.
+ */
+static struct sn_sal_ops *sn_func;
+
+/* Prototypes */
+static int snt_hw_puts(const char *, int);
+static int snt_poll_getc(void);
+static int snt_poll_input_pending(void);
+static int snt_sim_puts(const char *, int);
+static int snt_sim_getc(void);
+static int snt_sim_input_pending(void);
+static int snt_intr_getc(void);
+static int snt_intr_input_pending(void);
+static void sn_intr_transmit_chars(void);
+
+/* A table for polling */
+static struct sn_sal_ops poll_ops = {
+ .sal_puts = snt_hw_puts,
+ .sal_getc = snt_poll_getc,
+ .sal_input_pending = snt_poll_input_pending
+};
+
+/* A table for the simulator */
+static struct sn_sal_ops sim_ops = {
+ .sal_puts = snt_sim_puts,
+ .sal_getc = snt_sim_getc,
+ .sal_input_pending = snt_sim_input_pending
+};
+
+/* A table for interrupts enabled */
+static struct sn_sal_ops intr_ops = {
+ .sal_puts = snt_hw_puts,
+ .sal_getc = snt_intr_getc,
+ .sal_input_pending = snt_intr_input_pending,
+ .sal_wakeup_transmit = sn_intr_transmit_chars
+};
+
+
+/* the console does output in two distinctly different ways:
+ * synchronous and asynchronous (buffered). initally, early_printk
+ * does synchronous output. any data written goes directly to the SAL
+ * to be output (incidentally, it is internally buffered by the SAL)
+ * after interrupts and timers are initialized and available for use,
+ * the console init code switches to asynchronous output. this is
+ * also the earliest opportunity to begin polling for console input.
+ * after console initialization, console output and tty (serial port)
+ * output is buffered and sent to the SAL asynchronously (either by
+ * timer callback or by UART interrupt) */
+
+
+/* routines for running the console in polling mode */
+
+static int
+snt_hw_puts(const char *s, int len)
+{
+ /* looking at the PROM source code, putb calls the flush
+ * routine, so if we send characters in FIFO sized chunks, it
+ * should go out by the next time the timer gets called */
+ return ia64_sn_console_putb(s, len);
+}
+
+static int
+snt_poll_getc(void)
+{
+ int ch;
+ ia64_sn_console_getc(&ch);
+ return ch;
+}
+
+static int
+snt_poll_input_pending(void)
+{
+ int status, input;
+
+ status = ia64_sn_console_check(&input);
+ return !status && input;
+}
+
+
+/* routines for running the console on the simulator */
+
+static int
+snt_sim_puts(const char *str, int count)
+{
+ int counter = count;
+
+#ifdef FLAG_DIRECT_CONSOLE_WRITES
+ /* This is an easy way to pre-pend the output to know whether the output
+ * was done via sal or directly */
+ writeb('[', master_node_bedrock_address + (UART_TX << 3));
+ writeb('+', master_node_bedrock_address + (UART_TX << 3));
+ writeb(']', master_node_bedrock_address + (UART_TX << 3));
+ writeb(' ', master_node_bedrock_address + (UART_TX << 3));
+#endif /* FLAG_DIRECT_CONSOLE_WRITES */
+ while (counter > 0) {
+ writeb(*str, master_node_bedrock_address + (UART_TX << 3));
+ counter--;
+ str++;
+ }
+
+ return count;
+}
+
+static int
+snt_sim_getc(void)
+{
+ return readb(master_node_bedrock_address + (UART_RX << 3));
+}
+
+static int
+snt_sim_input_pending(void)
+{
+ return readb(master_node_bedrock_address + (UART_LSR << 3)) & UART_LSR_DR;
+}
+
+
+/* routines for an interrupt driven console (normal) */
+
+static int
+snt_intr_getc(void)
+{
+ return ia64_sn_console_readc();
+}
+
+static int
+snt_intr_input_pending(void)
+{
+ return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
+}
+
+/* The early printk (possible setup) and function call */
+
+void
+early_printk_sn_sal(const char *s, unsigned count)
+{
+ extern void early_sn_setup(void);
+
+ if (!sn_func) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_func = &sim_ops;
+ else
+ sn_func = &poll_ops;
+
+ early_sn_setup();
+ }
+ sn_func->sal_puts(s, count);
+}
+
+#ifdef DEBUG
+/* this is as "close to the metal" as we can get, used when the driver
+ * itself may be broken */
+static int
+sn_debug_printf(const char *fmt, ...)
+{
+ static char printk_buf[1024];
+ int printed_len;
+ va_list args;
+
+ va_start(args, fmt);
+ printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+ early_printk_sn_sal(printk_buf, printed_len);
+ va_end(args);
+ return printed_len;
+}
+#endif /* DEBUG */
+
+/*
+ * Interrupt handling routines.
+ */
+
+static void
+sn_sal_sched_event(int event)
+{
+ sn_sal_event |= (1 << event);
+ tasklet_schedule(&sn_sal_tasklet);
+}
+
+/* sn_receive_chars can be called before sn_sal_tty is initialized. in
+ * that case, its only use is to trigger sysrq and kdb */
+static void
+sn_receive_chars(struct pt_regs *regs, unsigned long *flags)
+{
+ int ch;
+
+ while (sn_func->sal_input_pending()) {
+ ch = sn_func->sal_getc();
+ if (ch < 0) {
+ printk(KERN_ERR "sn_serial: An error occured while "
+ "obtaining data from the console (0x%0x)\n", ch);
+ break;
+ }
+#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+ if (sysrq_requested) {
+ unsigned long sysrq_timeout = sysrq_requested + HZ*5;
+
+ sysrq_requested = 0;
+ if (ch && time_before(jiffies, sysrq_timeout)) {
+ spin_unlock_irqrestore(&sn_sal_lock, *flags);
+ handle_sysrq(ch, regs, NULL);
+ spin_lock_irqsave(&sn_sal_lock, *flags);
+ /* don't record this char */
+ continue;
+ }
+ }
+ if (ch == *sysrq_serial_ptr) {
+ if (!(*++sysrq_serial_ptr)) {
+ sysrq_requested = jiffies;
+ sysrq_serial_ptr = sysrq_serial_str;
+ }
+ }
+ else
+ sysrq_serial_ptr = sysrq_serial_str;
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */
+
+ /* record the character to pass up to the tty layer */
+ if (sn_sal_tty) {
+ *sn_sal_tty->flip.char_buf_ptr = ch;
+ sn_sal_tty->flip.char_buf_ptr++;
+ sn_sal_tty->flip.count++;
+ if (sn_sal_tty->flip.count == TTY_FLIPBUF_SIZE)
+ break;
+ }
+ sn_total_rx_count++;
+ }
+
+ if (sn_sal_tty)
+ tty_flip_buffer_push((struct tty_struct *)sn_sal_tty);
+}
+
+
+/* synch_flush_xmit must be called with sn_sal_lock */
+static void
+synch_flush_xmit(void)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+
+ if (xmit.cb_head == xmit.cb_tail)
+ return; /* Nothing to do. */
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ result = sn_func->sal_puts((char *)start, xmit_count);
+ if (!result)
+ DPRINTF("\n*** synch_flush_xmit failed to flush\n");
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = (char *)&xmit.cb_buf[tail];
+ }
+ }
+ }
+}
+
+/* must be called with a lock protecting the circular buffer and
+ * sn_sal_tty */
+static void
+sn_poll_transmit_chars(void)
+{
+ int xmit_count, tail, head;
+ int result;
+ char *start;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ if (xmit.cb_head == xmit.cb_tail ||
+ (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) {
+ /* Nothing to do. */
+ return;
+ }
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count == 0)
+ DPRINTF("\n*** empty xmit_count\n");
+
+ /* use the ops, as we could be on the simulator */
+ result = sn_func->sal_puts((char *)start, xmit_count);
+ if (!result)
+ DPRINTF("\n*** error in synchronous sal_puts\n");
+ /* XXX chadt clean this up */
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = &xmit.cb_buf[tail];
+ }
+
+ /* if there's few enough characters left in the xmit buffer
+ * that we could stand for the upper layer to send us some
+ * more, ask for it. */
+ if (sn_sal_tty)
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS)
+ sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP);
+}
+
+
+/* must be called with a lock protecting the circular buffer and
+ * sn_sal_tty */
+static void
+sn_intr_transmit_chars(void)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ if (xmit.cb_head == xmit.cb_tail ||
+ (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) {
+ /* Nothing to do. */
+ return;
+ }
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ?
+ (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ result = ia64_sn_console_xmit_chars((char *)start, xmit_count);
+#ifdef DEBUG
+ if (!result)
+ DPRINTF("`");
+#endif
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = &xmit.cb_buf[tail];
+ }
+ }
+ }
+
+ /* if there's few enough characters left in the xmit buffer
+ * that we could stand for the upper layer to send us some
+ * more, ask for it. */
+ if (sn_sal_tty)
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS)
+ sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP);
+}
+
+
+static irqreturn_t
+sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* this call is necessary to pass the interrupt back to the
+ * SAL, since it doesn't intercept the UART interrupts
+ * itself */
+ int status = ia64_sn_console_intr_status();
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (status & SAL_CONSOLE_INTR_RECV)
+ sn_receive_chars(regs, &flags);
+ if (status & SAL_CONSOLE_INTR_XMIT)
+ sn_intr_transmit_chars();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/* returns the console irq if interrupt is successfully registered,
+ * else 0 */
+static int
+sn_sal_connect_interrupt(void)
+{
+ cpuid_t intr_cpuid;
+ unsigned int intr_cpuloc;
+ nasid_t console_nasid;
+ unsigned int console_irq;
+ int result;
+
+ console_nasid = ia64_sn_get_console_nasid();
+ intr_cpuid = first_cpu(node_to_cpumask(nasid_to_cnodeid(console_nasid)));
+ intr_cpuloc = cpu_physical_id(intr_cpuid);
+ console_irq = CPU_VECTOR_TO_IRQ(intr_cpuloc, SGI_UART_VECTOR);
+
+ result = intr_connect_level(intr_cpuid, SGI_UART_VECTOR);
+ BUG_ON(result != SGI_UART_VECTOR);
+
+ result = request_irq(console_irq, sn_sal_interrupt, SA_INTERRUPT, "SAL console driver", &sn_sal_tty);
+ if (result >= 0)
+ return console_irq;
+
+ printk(KERN_WARNING "sn_serial: console proceeding in polled mode\n");
+ return 0;
+}
+
+static void
+sn_sal_tasklet_action(unsigned long data)
+{
+ unsigned long flags;
+
+ if (sn_sal_tty) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (sn_sal_tty) {
+ if (test_and_clear_bit(SN_SAL_EVENT_WRITE_WAKEUP, &sn_sal_event)) {
+ if ((sn_sal_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && sn_sal_tty->ldisc.write_wakeup)
+ (sn_sal_tty->ldisc.write_wakeup)((struct tty_struct *)sn_sal_tty);
+ wake_up_interruptible((wait_queue_head_t *)&sn_sal_tty->write_wait);
+ }
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+
+/*
+ * This function handles polled mode.
+ */
+static void
+sn_sal_timer_poll(unsigned long dummy)
+{
+ unsigned long flags;
+
+ if (!sn_sal_irq) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ sn_receive_chars(NULL, &flags);
+ sn_poll_transmit_chars();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout);
+ }
+}
+
+
+/*
+ * User-level console routines
+ */
+
+static int
+sn_sal_open(struct tty_struct *tty, struct file *filp)
+{
+ unsigned long flags;
+
+ DPRINTF("sn_sal_open: sn_sal_tty = %p, tty = %p, filp = %p\n",
+ sn_sal_tty, tty, filp);
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (!sn_sal_tty)
+ sn_sal_tty = tty;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ return 0;
+}
+
+
+/* We're keeping all our resources. We're keeping interrupts turned
+ * on. Maybe just let the tty layer finish its stuff...? GMSH
+ */
+static void
+sn_sal_close(struct tty_struct *tty, struct file * filp)
+{
+ if (tty->count == 1) {
+ unsigned long flags;
+ tty->closing = 1;
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ if (tty->ldisc.flush_buffer)
+ tty->ldisc.flush_buffer(tty);
+ tty->closing = 0;
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ sn_sal_tty = NULL;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+
+static int
+sn_sal_write(struct tty_struct *tty, int from_user,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ unsigned long flags;
+
+ if (from_user) {
+ while (1) {
+ int c1;
+ c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail,
+ SN_SAL_BUFFER_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0)
+ break;
+
+ c -= copy_from_user(sn_tmp_buffer, buf, c);
+ if (!c) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Turn off interrupts and see if the xmit buffer has
+ * moved since the last time we looked.
+ */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ c1 = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+
+ if (c1 < c)
+ c = c1;
+
+ memcpy(xmit.cb_buf + xmit.cb_head, sn_tmp_buffer, c);
+ xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1));
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ }
+ else {
+ /* The buffer passed in isn't coming from userland,
+ * so cut out the middleman (sn_tmp_buffer).
+ */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ while (1) {
+ c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0) {
+ break;
+ }
+ memcpy(xmit.cb_buf + xmit.cb_head, buf, c);
+ xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1));
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (xmit.cb_head != xmit.cb_tail && !(tty && (tty->stopped || tty->hw_stopped)))
+ if (sn_func->sal_wakeup_transmit)
+ sn_func->sal_wakeup_transmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ return ret;
+}
+
+
+static void
+sn_sal_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) != 0) {
+ xmit.cb_buf[xmit.cb_head] = ch;
+ xmit.cb_head = (xmit.cb_head + 1) & (SN_SAL_BUFFER_SIZE-1);
+ if ( sn_func->sal_wakeup_transmit )
+ sn_func->sal_wakeup_transmit();
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+
+static void
+sn_sal_flush_chars(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE))
+ if (sn_func->sal_wakeup_transmit)
+ sn_func->sal_wakeup_transmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+
+static int
+sn_sal_write_room(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int space;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ space = CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return space;
+}
+
+
+static int
+sn_sal_chars_in_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int space;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ space = CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+ DPRINTF("<%d>", space);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return space;
+}
+
+
+static void
+sn_sal_flush_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ /* drop everything */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ xmit.cb_head = xmit.cb_tail = 0;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ /* wake up tty level */
+ wake_up_interruptible(&tty->write_wait);
+ if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup)
+ (tty->ldisc.write_wakeup)(tty);
+}
+
+
+static void
+sn_sal_hangup(struct tty_struct *tty)
+{
+ sn_sal_flush_buffer(tty);
+}
+
+
+static void
+sn_sal_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* this is SAL's problem */
+ DPRINTF("<sn_serial: should wait until sent>");
+}
+
+
+/*
+ * sn_sal_read_proc
+ *
+ * Console /proc interface
+ */
+
+static int
+sn_sal_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+ off_t begin = 0;
+
+ len += sprintf(page, "sn_serial: nasid:%ld irq:%d tx:%d rx:%d\n",
+ ia64_sn_get_console_nasid(), sn_sal_irq,
+ sn_total_tx_count, sn_total_rx_count);
+ *eof = 1;
+
+ if (off >= len+begin)
+ return 0;
+ *start = page + (off-begin);
+
+ return count < begin+len-off ? count : begin+len-off;
+}
+
+
+static struct tty_operations sn_sal_driver_ops = {
+ .open = sn_sal_open,
+ .close = sn_sal_close,
+ .write = sn_sal_write,
+ .put_char = sn_sal_put_char,
+ .flush_chars = sn_sal_flush_chars,
+ .write_room = sn_sal_write_room,
+ .chars_in_buffer = sn_sal_chars_in_buffer,
+ .hangup = sn_sal_hangup,
+ .wait_until_sent = sn_sal_wait_until_sent,
+ .read_proc = sn_sal_read_proc,
+};
+static struct tty_driver *sn_sal_driver;
+
+/* sn_sal_init wishlist:
+ * - allocate sn_tmp_buffer
+ * - fix up the tty_driver struct
+ * - turn on receive interrupts
+ * - do any termios twiddling once and for all
+ */
+
+/*
+ * Boot-time initialization code
+ */
+
+static void __init
+sn_sal_switch_to_asynch(void)
+{
+ unsigned long flags;
+
+ /* without early_printk, we may be invoked late enough to race
+ * with other cpus doing console IO at this point, however
+ * console interrupts will never be enabled */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+
+ if (sn_sal_is_asynch) {
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return;
+ }
+
+ DPRINTF("sn_serial: switch to asynchronous console\n");
+
+ /* early_printk invocation may have done this for us */
+ if (!sn_func) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_func = &sim_ops;
+ else
+ sn_func = &poll_ops;
+ }
+
+ /* we can't turn on the console interrupt (as request_irq
+ * calls kmalloc, which isn't set up yet), so we rely on a
+ * timer to poll for input and push data from the console
+ * buffer.
+ */
+ init_timer(&sn_sal_timer);
+ sn_sal_timer.function = sn_sal_timer_poll;
+
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_interrupt_timeout = 6;
+ else {
+ /* 960cps / 16 char FIFO = 60HZ
+ * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
+ sn_interrupt_timeout = HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
+ }
+ mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout);
+
+ sn_sal_is_asynch = 1;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+static void __init
+sn_sal_switch_to_interrupts(void)
+{
+ int irq;
+
+ DPRINTF("sn_serial: switching to interrupt driven console\n");
+
+ irq = sn_sal_connect_interrupt();
+ if (irq) {
+ unsigned long flags;
+ spin_lock_irqsave(&sn_sal_lock, flags);
+
+ /* sn_sal_irq is a global variable. When it's set to
+ * a non-zero value, we stop polling for input (since
+ * interrupts should now be enabled). */
+ sn_sal_irq = irq;
+ sn_func = &intr_ops;
+
+ /* turn on receive interrupts */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+static int __init
+sn_sal_module_init(void)
+{
+ int retval;
+
+ DPRINTF("sn_serial: sn_sal_module_init\n");
+
+ if (!ia64_platform_is("sn2"))
+ return -ENODEV;
+
+ sn_sal_driver = alloc_tty_driver(1);
+ if ( !sn_sal_driver )
+ return -ENOMEM;
+
+ sn_sal_driver->owner = THIS_MODULE;
+ sn_sal_driver->driver_name = "sn_serial";
+ sn_sal_driver->name = "ttyS";
+ sn_sal_driver->major = TTY_MAJOR;
+ sn_sal_driver->minor_start = SN_SAL_MINOR;
+ sn_sal_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ sn_sal_driver->subtype = SERIAL_TYPE_NORMAL;
+ sn_sal_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
+
+ tty_set_operations(sn_sal_driver, &sn_sal_driver_ops);
+
+ /* when this driver is compiled in, the console initialization
+ * will have already switched us into asynchronous operation
+ * before we get here through the module initcalls */
+ sn_sal_switch_to_asynch();
+
+ /* at this point (module_init) we can try to turn on interrupts */
+ if (!IS_RUNNING_ON_SIMULATOR())
+ sn_sal_switch_to_interrupts();
+
+ sn_sal_driver->init_termios = tty_std_termios;
+ sn_sal_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+
+ if ((retval = tty_register_driver(sn_sal_driver))) {
+ printk(KERN_ERR "sn_serial: Unable to register tty driver\n");
+ return retval;
+ }
+ return 0;
+}
+
+
+static void __exit
+sn_sal_module_exit(void)
+{
+ del_timer_sync(&sn_sal_timer);
+ tty_unregister_driver(sn_sal_driver);
+ put_tty_driver(sn_sal_driver);
+}
+
+module_init(sn_sal_module_init);
+module_exit(sn_sal_module_exit);
+
+/*
+ * Kernel console definitions
+ */
+
+#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE
+/*
+ * Print a string to the SAL console. The console_lock must be held
+ * when we get here.
+ */
+static void
+sn_sal_console_write(struct console *co, const char *s, unsigned count)
+{
+ unsigned long flags;
+ const char *s1;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ /* somebody really wants this output, might be an
+ * oops, kdb, panic, etc. make sure they get it. */
+ if (spin_is_locked(&sn_sal_lock)) {
+ synch_flush_xmit();
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ sn_func->sal_puts(s, s1 - s);
+ sn_func->sal_puts("\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ sn_func->sal_puts(s, count);
+ }
+ else if (in_interrupt()) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ synch_flush_xmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ sn_func->sal_puts(s, s1 - s);
+ sn_func->sal_puts("\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ sn_func->sal_puts(s, count);
+ }
+ else {
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ sn_sal_write(NULL, 0, s, s1 - s);
+ sn_sal_write(NULL, 0, "\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ sn_sal_write(NULL, 0, s, count);
+ }
+}
+
+static struct tty_driver *
+sn_sal_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return sn_sal_driver;
+}
+
+static int __init
+sn_sal_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+
+static struct console sal_console = {
+ .name = "ttyS",
+ .write = sn_sal_console_write,
+ .device = sn_sal_console_device,
+ .setup = sn_sal_console_setup,
+ .index = -1
+};
+
+static int __init
+sn_sal_serial_console_init(void)
+{
+ if (ia64_platform_is("sn2")) {
+ sn_sal_switch_to_asynch();
+ DPRINTF("sn_sal_serial_console_init : register console\n");
+ register_console(&sal_console);
+ }
+ return 0;
+}
+console_initcall(sn_sal_serial_console_init);
+
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE */
{ SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { 0 }
+ { 0, 0, 0, 0 }
};
#define SONYPI_BUF_SIZE 128
/*
* There is a bunch of documentation about the card, jumpers, config
* settings, restrictions, cables, device names and numbers in
- * Documentation/specialix.txt
+ * ../../Documentation/specialix.txt
*/
#include <linux/config.h>
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
port->event = 0;
- port->tty = NULL;
+ port->tty = 0;
if (port->blocked_open) {
if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE;
static inline int sx_set_serial_info(struct specialix_port * port,
- struct serial_struct __user * newinfo)
+ struct serial_struct * newinfo)
{
struct serial_struct tmp;
struct specialix_board *bp = port_Board(port);
int change_speed;
unsigned long flags;
+ int error;
+ error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp));
+ if (error)
+ return error;
+
if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
return -EFAULT;
static inline int sx_get_serial_info(struct specialix_port * port,
- struct serial_struct __user *retinfo)
+ struct serial_struct * retinfo)
{
struct serial_struct tmp;
struct specialix_board *bp = port_Board(port);
+ int error;
+ error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp));
+ if (error)
+ return error;
+
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_CIRRUS;
tmp.line = port - sx_port;
unsigned int cmd, unsigned long arg)
{
struct specialix_port *port = (struct specialix_port *)tty->driver_data;
+ int error;
int retval;
- void __user *argp = (void __user *)arg;
if (sx_paranoia_check(port, tty->name, "sx_ioctl"))
return -ENODEV;
sx_send_break(port, arg ? arg*(HZ/10) : HZ/4);
return 0;
case TIOCGSOFTCAR:
- if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)argp))
- return -EFAULT;
+ error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long));
+ if (error)
+ return error;
+ put_user(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long *) arg);
return 0;
case TIOCSSOFTCAR:
- if (get_user(arg, (unsigned long __user *) argp))
- return -EFAULT;
+ get_user(arg, (unsigned long *) arg);
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
return 0;
case TIOCGSERIAL:
- return sx_get_serial_info(port, argp);
+ return sx_get_serial_info(port, (struct serial_struct *) arg);
case TIOCSSERIAL:
- return sx_set_serial_info(port, argp);
+ return sx_set_serial_info(port, (struct serial_struct *) arg);
default:
return -ENOIOCTLCMD;
}
port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = NULL;
+ port->tty = 0;
wake_up_interruptible(&port->open_wait);
}
static int stl_brdinit(stlbrd_t *brdp);
static int stl_initports(stlbrd_t *brdp, stlpanel_t *panelp);
static int stl_mapirq(int irq, char *name);
-static int stl_getserial(stlport_t *portp, struct serial_struct __user *sp);
-static int stl_setserial(stlport_t *portp, struct serial_struct __user *sp);
-static int stl_getbrdstats(combrd_t __user *bp);
-static int stl_getportstats(stlport_t *portp, comstats_t __user *cp);
-static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp);
-static int stl_getportstruct(stlport_t __user *arg);
-static int stl_getbrdstruct(stlbrd_t __user *arg);
+static int stl_getserial(stlport_t *portp, struct serial_struct *sp);
+static int stl_setserial(stlport_t *portp, struct serial_struct *sp);
+static int stl_getbrdstats(combrd_t *bp);
+static int stl_getportstats(stlport_t *portp, comstats_t *cp);
+static int stl_clrportstats(stlport_t *portp, comstats_t *cp);
+static int stl_getportstruct(unsigned long arg);
+static int stl_getbrdstruct(unsigned long arg);
static int stl_waitcarrier(stlport_t *portp, struct file *filp);
static void stl_delay(int len);
static void stl_eiointr(stlbrd_t *brdp);
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("init_module()\n");
#endif
unsigned long flags;
int i, j, k;
-#ifdef DEBUG
+#if DEBUG
printk("cleanup_module()\n");
#endif
* Check for any arguments passed in on the module load command line.
*/
-static void stl_argbrds(void)
+static void stl_argbrds()
{
stlconf_t conf;
stlbrd_t *brdp;
int nrargs, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_argbrds()\n");
#endif
char *sp;
int nrbrdnames, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
#endif
* Allocate a new board structure. Fill out the basic info in it.
*/
-static stlbrd_t *stl_allocbrd(void)
+static stlbrd_t *stl_allocbrd()
{
stlbrd_t *brdp;
unsigned int minordev;
int brdnr, panelnr, portnr, rc;
-#ifdef DEBUG
+#if DEBUG
printk("stl_open(tty=%x,filp=%x): device=%s\n", (int) tty,
(int) filp, tty->name);
#endif
unsigned long flags;
int rc, doclocal;
-#ifdef DEBUG
+#if DEBUG
printk("stl_waitcarrier(portp=%x,filp=%x)\n", (int) portp, (int) filp);
#endif
stlport_t *portp;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
#endif
static void stl_delay(int len)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_delay(len=%d)\n", len);
#endif
if (len > 0) {
unsigned char *chbuf;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_write(tty=%x,from_user=%d,buf=%x,count=%d)\n",
(int) tty, from_user, (int) buf, count);
#endif
unsigned int len;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_flushchars(tty=%x)\n", (int) tty);
#endif
stlport_t *portp;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_writeroom(tty=%x)\n", (int) tty);
#endif
unsigned int size;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_charsinbuffer(tty=%x)\n", (int) tty);
#endif
* Generate the serial struct info.
*/
-static int stl_getserial(stlport_t *portp, struct serial_struct __user *sp)
+static int stl_getserial(stlport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
stlbrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
* just quietly ignore any requests to change irq, etc.
*/
-static int stl_setserial(stlport_t *portp, struct serial_struct __user *sp)
+static int stl_setserial(stlport_t *portp, struct serial_struct *sp)
{
struct serial_struct sio;
-#ifdef DEBUG
+#if DEBUG
printk("stl_setserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
stlport_t *portp;
unsigned int ival;
int rc;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk("stl_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
(int) tty, (int) file, cmd, (int) arg);
#endif
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned __user *) argp);
+ (unsigned int *) arg);
break;
case TIOCSSOFTCAR:
- if (get_user(ival, (unsigned int __user *) arg))
- return -EFAULT;
- tty->termios->c_cflag =
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(int))) == 0) {
+ get_user(ival, (unsigned int *) arg);
+ tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
+ }
break;
case TIOCGSERIAL:
- rc = stl_getserial(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stl_getserial(portp, (struct serial_struct *) arg);
break;
case TIOCSSERIAL:
- rc = stl_setserial(portp, argp);
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = stl_setserial(portp, (struct serial_struct *) arg);
break;
case COM_GETPORTSTATS:
- rc = stl_getportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_getportstats(portp, (comstats_t *) arg);
break;
case COM_CLRPORTSTATS:
- rc = stl_clrportstats(portp, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_clrportstats(portp, (comstats_t *) arg);
break;
case TIOCSERCONFIG:
case TIOCSERGWILD:
stlport_t *portp;
struct termios *tiosp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_throttle(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_unthrottle(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_stop(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_start(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_hangup(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_flushbuffer(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_breakctl(tty=%x,state=%d)\n", (int) tty, state);
#endif
stlport_t *portp;
unsigned long tend;
-#ifdef DEBUG
+#if DEBUG
printk("stl_waituntilsent(tty=%x,timeout=%d)\n", (int) tty, timeout);
#endif
{
stlport_t *portp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
#endif
int curoff, maxoff;
char *pos;
-#ifdef DEBUG
+#if DEBUG
printk("stl_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
"data=%x\n", (int) page, (int) start, (int) off, count,
(int) eof, (int) data);
int i;
int handled = 0;
-#ifdef DEBUG
+#if DEBUG
printk("stl_intr(irq=%d,regs=%x)\n", irq, (int) regs);
#endif
portp = private;
-#ifdef DEBUG
+#if DEBUG
printk("stl_offintr(portp=%x)\n", (int) portp);
#endif
{
int rc, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_mapirq(irq=%d,name=%s)\n", irq, name);
#endif
stlport_t *portp;
int chipmask, i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initports(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
char *name;
int rc;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initeio(brdp=%x)\n", (int) brdp);
#endif
int panelnr, banknr, i;
char *name;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initech(brdp=%x)\n", (int) brdp);
#endif
{
int i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_brdinit(brdp=%x)\n", (int) brdp);
#endif
* Find the next available board number that is free.
*/
-static inline int stl_getbrdnr(void)
+static inline int stl_getbrdnr()
{
int i;
{
stlbrd_t *brdp;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n", brdtype,
devp->bus->number, devp->devfn);
#endif
* Different Stallion boards use the BAR registers in different ways,
* so set up io addresses based on board type.
*/
-#ifdef DEBUG
+#if DEBUG
printk("%s(%d): BAR[]=%x,%x,%x,%x IRQ=%x\n", __FILE__, __LINE__,
pci_resource_start(devp, 0), pci_resource_start(devp, 1),
pci_resource_start(devp, 2), pci_resource_start(devp, 3), devp->irq);
*/
-static inline int stl_findpcibrds(void)
+static inline int stl_findpcibrds()
{
struct pci_dev *dev = NULL;
int i, rc;
-#ifdef DEBUG
+#if DEBUG
printk("stl_findpcibrds()\n");
#endif
* since the initial search and setup is too different.
*/
-static inline int stl_initbrds(void)
+static inline int stl_initbrds()
{
stlbrd_t *brdp;
stlconf_t *confp;
int i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_initbrds()\n");
#endif
* Return the board stats structure to user app.
*/
-static int stl_getbrdstats(combrd_t __user *bp)
+static int stl_getbrdstats(combrd_t *bp)
{
stlbrd_t *brdp;
stlpanel_t *panelp;
* what port to get stats for (used through board control device).
*/
-static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
+static int stl_getportstats(stlport_t *portp, comstats_t *cp)
{
unsigned char *head, *tail;
unsigned long flags;
- if (!portp) {
+ if (portp == (stlport_t *) NULL) {
if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
* Clear the port stats structure. We also return it zeroed out...
*/
-static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp)
+static int stl_clrportstats(stlport_t *portp, comstats_t *cp)
{
- if (!portp) {
+ if (portp == (stlport_t *) NULL) {
if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
* Return the entire driver ports structure to a user app.
*/
-static int stl_getportstruct(stlport_t __user *arg)
+static int stl_getportstruct(unsigned long arg)
{
stlport_t *portp;
- if (copy_from_user(&stl_dummyport, arg, sizeof(stlport_t)))
+ if (copy_from_user(&stl_dummyport, (void *) arg, sizeof(stlport_t)))
return -EFAULT;
portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
stl_dummyport.portnr);
- if (!portp)
- return -ENODEV;
- return copy_to_user(arg, portp, sizeof(stlport_t)) ? -EFAULT : 0;
+ if (portp == (stlport_t *) NULL)
+ return(-ENODEV);
+ return copy_to_user((void *)arg, portp,
+ sizeof(stlport_t)) ? -EFAULT : 0;
}
/*****************************************************************************/
* Return the entire driver board structure to a user app.
*/
-static int stl_getbrdstruct(stlbrd_t __user *arg)
+static int stl_getbrdstruct(unsigned long arg)
{
stlbrd_t *brdp;
- if (copy_from_user(&stl_dummybrd, arg, sizeof(stlbrd_t)))
+ if (copy_from_user(&stl_dummybrd, (void *) arg, sizeof(stlbrd_t)))
return -EFAULT;
if ((stl_dummybrd.brdnr < 0) || (stl_dummybrd.brdnr >= STL_MAXBRDS))
- return -ENODEV;
+ return(-ENODEV);
brdp = stl_brds[stl_dummybrd.brdnr];
- if (!brdp)
+ if (brdp == (stlbrd_t *) NULL)
return(-ENODEV);
- return copy_to_user(arg, brdp, sizeof(stlbrd_t)) ? -EFAULT : 0;
+ return copy_to_user((void *)arg, brdp, sizeof(stlbrd_t)) ? -EFAULT : 0;
}
/*****************************************************************************/
static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg)
{
int brdnr, rc;
- void __user *argp = (void __user *)arg;
-#ifdef DEBUG
+#if DEBUG
printk("stl_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n", (int) ip,
(int) fp, cmd, (int) arg);
#endif
switch (cmd) {
case COM_GETPORTSTATS:
- rc = stl_getportstats(NULL, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_getportstats((stlport_t *) NULL,
+ (comstats_t *) arg);
break;
case COM_CLRPORTSTATS:
- rc = stl_clrportstats(NULL, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(comstats_t))) == 0)
+ rc = stl_clrportstats((stlport_t *) NULL,
+ (comstats_t *) arg);
break;
case COM_GETBRDSTATS:
- rc = stl_getbrdstats(argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(combrd_t))) == 0)
+ rc = stl_getbrdstats((combrd_t *) arg);
break;
case COM_READPORT:
- rc = stl_getportstruct(argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(stlport_t))) == 0)
+ rc = stl_getportstruct(arg);
break;
case COM_READBOARD:
- rc = stl_getbrdstruct(argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(stlbrd_t))) == 0)
+ rc = stl_getbrdstruct(arg);
break;
default:
rc = -ENOIOCTLCMD;
int chipmask, i, j;
int nrchips, uartaddr, ioaddr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
#endif
* them all up.
*/
-#ifdef DEBUG
+#if DEBUG
printk("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
portp->portnr, portp->panelnr, portp->brdnr);
printk(" cor1=%x cor2=%x cor3=%x cor4=%x cor5=%x\n",
unsigned char msvr1, msvr2;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400setsignals(portp=%x,dtr=%d,rts=%d)\n",
(int) portp, dtr, rts);
#endif
unsigned long flags;
int sigs;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400getsignals(portp=%x)\n", (int) portp);
#endif
unsigned char ccr;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400enablerxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
unsigned char sreron, sreroff;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400startrxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp);
#endif
save_flags(flags);
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
struct tty_struct *tty;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400flowctrl(portp=%x,state=%x)\n", (int) portp, state);
#endif
struct tty_struct *tty;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400sendflow(portp=%x,state=%x)\n", (int) portp, state);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400flush(portp=%x)\n", (int) portp);
#endif
static int stl_cd1400datastate(stlport_t *portp)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400datastate(portp=%x)\n", (int) portp);
#endif
{
unsigned char svrtype;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400eiointr(panelp=%x,iobase=%x)\n",
(int) panelp, iobase);
#endif
{
unsigned char svrtype;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400echintr(panelp=%x,iobase=%x)\n", (int) panelp,
iobase);
#endif
char *head, *tail;
unsigned char ioack, srer;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400txisr(panelp=%x,ioaddr=%x)\n", (int) panelp, ioaddr);
#endif
unsigned char status;
char ch;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400rxisr(panelp=%x,ioaddr=%x)\n", (int) panelp, ioaddr);
#endif
unsigned int ioack;
unsigned char misr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_cd1400mdmisr(panelp=%x)\n", (int) panelp);
#endif
int chipmask, i;
int nrchips, ioaddr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198panelinit(brdp=%x,panelp=%x)\n",
(int) brdp, (int) panelp);
#endif
static void stl_sc26198portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
#endif
* them all up.
*/
-#ifdef DEBUG
+#if DEBUG
printk("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
portp->portnr, portp->panelnr, portp->brdnr);
printk(" mr0=%x mr1=%x mr2=%x clk=%x\n", mr0, mr1, mr2, clk);
unsigned char iopioron, iopioroff;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198setsignals(portp=%x,dtr=%d,rts=%d)\n",
(int) portp, dtr, rts);
#endif
unsigned long flags;
int sigs;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198getsignals(portp=%x)\n", (int) portp);
#endif
unsigned char ccr;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198enablerxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
unsigned char imr;
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198startrxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
unsigned long flags;
unsigned char mr0;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198flowctrl(portp=%x,state=%x)\n", (int) portp, state);
#endif
unsigned long flags;
unsigned char mr0;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198sendflow(portp=%x,state=%x)\n", (int) portp, state);
#endif
{
unsigned long flags;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198flush(portp=%x)\n", (int) portp);
#endif
unsigned long flags;
unsigned char sr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198datastate(portp=%x)\n", (int) portp);
#endif
{
int i;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198wait(portp=%x)\n", (int) portp);
#endif
int len, stlen;
char *head, *tail;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198txisr(portp=%x)\n", (int) portp);
#endif
struct tty_struct *tty;
unsigned int len, buflen, ioaddr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198rxisr(portp=%x,iack=%x)\n", (int) portp, iack);
#endif
{
unsigned char cir, ipr, xisr;
-#ifdef DEBUG
+#if DEBUG
printk("stl_sc26198otherisr(portp=%x,iack=%x)\n", (int) portp, iack);
#endif
#define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000
#endif
-#ifdef CONFIG_PCI
static struct pci_device_id sx_pci_tbl[] = {
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, sx_pci_tbl);
-#endif /* CONFIG_PCI */
/* Configurable options:
(Don't be too sure that it'll work if you toggle them) */
unsigned int cmd, unsigned long arg)
{
int rc = 0;
- int __user *descr = (int __user *)arg;
- int i;
+ int *descr = (int *)arg, i;
static struct sx_board *board = NULL;
int nbytes, offset;
unsigned long data;
get_user (data, descr++);
while (nbytes && data) {
for (i=0;i<nbytes;i += SX_CHUNK_SIZE) {
- if (copy_from_user(tmp, (char __user *)data+i,
+ if (copy_from_user(tmp, (char *)data + i,
(i + SX_CHUNK_SIZE >
nbytes) ? nbytes - i :
SX_CHUNK_SIZE)) {
{
int rc;
struct sx_port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
int ival;
/* func_enter2(); */
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned __user *) argp);
+ (unsigned int *) arg);
break;
case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned __user *) argp)) == 0) {
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
}
break;
case TIOCGSERIAL:
- rc = gs_getserial(&port->gs, argp);
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = gs_getserial(&port->gs, (struct serial_struct *) arg);
break;
case TIOCSSERIAL:
- rc = gs_setserial(&port->gs, argp);
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = gs_setserial(&port->gs, (struct serial_struct *) arg);
break;
default:
rc = -ENOIOCTLCMD;
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
+ info->xmit_buf = 0;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
shutdown(info);
tty->closing = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
cleanup:
if (retval) {
if (tty->count == 1)
- info->tty = NULL;/* tty layer will release tty struct */
+ info->tty = 0; /* tty layer will release tty struct */
if(info->count)
info->count--;
}
}
if (info->memory_base){
iounmap(info->memory_base);
- info->memory_base = NULL;
+ info->memory_base = 0;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
- info->lcr_base = NULL;
+ info->lcr_base = 0;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
#define TMCS 0x64
#define TEPR 0x65
-/*
- * FIXME: DAR here clashed with asm-ppc/reg.h and asm-sh/.../dma.h
- */
-#undef DAR
/* DMA Controller Register macros */
#define DAR 0x80
#define DARL 0x80
cleanup:
if (retval) {
if (tty->count == 1)
- info->tty = NULL;/* tty layer will release tty struct */
+ info->tty = 0; /* tty layer will release tty struct */
if(info->count)
info->count--;
}
shutdown(info);
tty->closing = 0;
- info->tty = NULL;
+ info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
+ info->tty = 0;
wake_up_interruptible(&info->open_wait);
}
if (info->tx_buf) {
kfree(info->tx_buf);
- info->tx_buf = NULL;
+ info->tx_buf = 0;
}
spin_lock_irqsave(&info->lock,flags);
if (info->memory_base){
iounmap(info->memory_base);
- info->memory_base = NULL;
+ info->memory_base = 0;
}
if (info->sca_base) {
iounmap(info->sca_base - info->sca_offset);
- info->sca_base=NULL;
+ info->sca_base=0;
}
if (info->statctrl_base) {
iounmap(info->statctrl_base - info->statctrl_offset);
- info->statctrl_base=NULL;
+ info->statctrl_base=0;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
- info->lcr_base = NULL;
+ info->lcr_base = 0;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
u32 speed = info->params.clock_speed;
info->params.clock_speed = 3686400;
- info->tty = NULL;
+ info->tty = 0;
/* assume failure */
info->init_error = DiagStatus_DmaFailure;
init_ti_parallel(minor);
parport_release(table[minor].dev);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
if (count == 0)
return 0;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
parport_claim_or_block(table[minor].dev);
while (n < count) {
printk(TPQIC02_NAME ": ll_do_qic_cmd(%x, %ld) failed\n", cmd, (long) timeout);
return -EIO;
}
-#ifdef OBSOLETE
+#if OBSOLETE
/* wait for ready since it may not be active immediately after reading status */
while ((inb_p(QIC02_STAT_PORT) & QIC02_STAT_READY) != 0)
cpu_relax();
if (stat != TE_OK)
return stat;
-#ifdef OBSOLETE
+#if OBSOLETE
/************* not needed iff rd_status() would wait for ready!!!!!! **********/
if (wait_for_ready(TIM_S) != TE_OK) { /*** not sure this is needed ***/
tpqputs(TPQD_ALWAYS, "wait_for_ready failed in start_dma");
release_region(QIC02_TAPE_PORT, QIC02_TAPE_PORT_RANGE);
if (buffaddr)
free_pages((unsigned long) buffaddr, get_order(TPQBUF_SIZE));
- buffaddr = NULL; /* Better to cause a panic than overwite someone else */
+ buffaddr = 0; /* Better to cause a panic than overwite someone else */
status_zombie = YES;
} /* qic02_release_resources */
ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *);
static unsigned int tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-static int ptmx_open(struct inode *, struct file *);
static int tty_release(struct inode *, struct file *);
int tty_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg);
static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pread) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
return 0;
}
static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
return -EIO;
}
.fasync = tty_fasync,
};
-#ifdef CONFIG_UNIX98_PTYS
-static struct file_operations ptmx_fops = {
- .llseek = no_llseek,
- .read = tty_read,
- .write = tty_write,
- .poll = tty_poll,
- .ioctl = tty_ioctl,
- .open = ptmx_open,
- .release = tty_release,
- .fasync = tty_fasync,
-};
-#endif
-
static struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
struct tty_struct * tty;
struct inode *inode;
+ /* Can't seek (pread) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
tty = (struct tty_struct *)file->private_data;
inode = file->f_dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read"))
struct tty_struct * tty;
struct inode *inode = file->f_dentry->d_inode;
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
tty = (struct tty_struct *)file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (p) {
ssize_t res;
+ /* Can't seek (pwrite) on ttys. */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
res = vfs_write(p, buf, count, &p->f_pos);
fput(p);
return res;
{
struct tty_struct *tty, *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
- int devpts_master, devpts;
+ int devpts_master;
int idx;
char buf[64];
idx = tty->index;
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
- devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
- devpts_master = pty_master && devpts;
+ devpts_master = pty_master && (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM);
o_tty = tty->link;
#ifdef TTY_PARANOIA_CHECK
#ifdef CONFIG_UNIX98_PTYS
/* Make this pty number available for reallocation */
- if (devpts) {
+ if (devpts_master) {
down(&allocated_ptys_lock);
idr_remove(&allocated_ptys, idx);
up(&allocated_ptys_lock);
dev_t device = inode->i_rdev;
unsigned short saved_flags = filp->f_flags;
- nonseekable_open(inode, filp);
retry_open:
noctty = filp->f_flags & O_NOCTTY;
index = -1;
return -ENODEV;
}
- driver = get_tty_driver(device, &index);
- if (!driver)
- return -ENODEV;
+#ifdef CONFIG_UNIX98_PTYS
+ if (device == MKDEV(TTYAUX_MAJOR,2)) {
+ int idr_ret;
+
+ /* find a device that is not in use. */
+ down(&allocated_ptys_lock);
+ if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
+ up(&allocated_ptys_lock);
+ return -ENOMEM;
+ }
+ idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
+ if (idr_ret < 0) {
+ up(&allocated_ptys_lock);
+ if (idr_ret == -EAGAIN)
+ return -ENOMEM;
+ return -EIO;
+ }
+ if (index >= pty_limit) {
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return -EIO;
+ }
+ up(&allocated_ptys_lock);
+
+ driver = ptm_driver;
+ retval = init_dev(driver, index, &tty);
+ if (retval) {
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return retval;
+ }
+
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ if (devpts_pty_new(tty->link))
+ retval = -ENOMEM;
+ } else
+#endif
+ {
+ driver = get_tty_driver(device, &index);
+ if (!driver)
+ return -ENODEV;
got_driver:
- retval = init_dev(driver, index, &tty);
- if (retval)
- return retval;
+ retval = init_dev(driver, index, &tty);
+ if (retval)
+ return retval;
+ }
filp->private_data = tty;
file_move(filp, &tty->tty_files);
printk(KERN_DEBUG "error %d in opening %s...", retval,
tty->name);
#endif
+
+#ifdef CONFIG_UNIX98_PTYS
+ if (index != -1) {
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ }
+#endif
+
release_dev(filp);
if (retval != -ERESTARTSYS)
return retval;
return 0;
}
-#ifdef CONFIG_UNIX98_PTYS
-static int ptmx_open(struct inode * inode, struct file * filp)
-{
- struct tty_struct *tty;
- int retval;
- int index;
- int idr_ret;
-
- nonseekable_open(inode, filp);
-
- /* find a device that is not in use. */
- down(&allocated_ptys_lock);
- if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
- up(&allocated_ptys_lock);
- return -ENOMEM;
- }
- idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
- if (idr_ret < 0) {
- up(&allocated_ptys_lock);
- if (idr_ret == -EAGAIN)
- return -ENOMEM;
- return -EIO;
- }
- if (index >= pty_limit) {
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return -EIO;
- }
- up(&allocated_ptys_lock);
-
- retval = init_dev(ptm_driver, index, &tty);
- if (retval)
- goto out;
-
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- filp->private_data = tty;
- file_move(filp, &tty->tty_files);
-
- retval = -ENOMEM;
- if (devpts_pty_new(tty->link))
- goto out1;
-
- check_tty_count(tty, "tty_open");
- retval = ptm_driver->open(tty, filp);
- if (!retval)
- return 0;
-out1:
- release_dev(filp);
-out:
- down(&allocated_ptys_lock);
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return retval;
-}
-#endif
-
static int tty_release(struct inode * inode, struct file * filp)
{
lock_kernel();
class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
#ifdef CONFIG_UNIX98_PTYS
- cdev_init(&ptmx_cdev, &ptmx_fops);
+ cdev_init(&ptmx_cdev, &tty_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
panic("Couldn't register /dev/ptmx driver\n");
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/pm.h>
-#include <linux/font.h>
#include <asm/io.h>
#include <asm/system.h>
#define max_font_size 65536
-int con_font_get(int currcons, struct console_font_op *op)
+int con_font_op(int currcons, struct console_font_op *op)
{
- struct console_font font;
int rc = -EINVAL;
- int c;
+ int size = max_font_size, set;
+ u8 *temp = NULL;
+ struct console_font_op old_op;
if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
-
+ goto quit;
+ memcpy(&old_op, op, sizeof(old_op));
+ if (op->op == KD_FONT_OP_SET) {
+ if (!op->data)
+ return -EINVAL;
+ if (op->charcount > 512)
+ goto quit;
+ if (!op->height) { /* Need to guess font height [compat] */
+ int h, i;
+ u8 __user *charmap = op->data;
+ u8 tmp;
+
+ /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
+ so that we can get rid of this soon */
+ if (!(op->flags & KD_FONT_FLAG_OLD))
+ goto quit;
+ rc = -EFAULT;
+ for (h = 32; h > 0; h--)
+ for (i = 0; i < op->charcount; i++) {
+ if (get_user(tmp, &charmap[32*i+h-1]))
+ goto quit;
+ if (tmp)
+ goto nonzero;
+ }
+ rc = -EINVAL;
+ goto quit;
+ nonzero:
+ rc = -EINVAL;
+ op->height = h;
+ }
+ if (op->width > 32 || op->height > 32)
+ goto quit;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+ return -ENOSPC;
+ set = 1;
+ } else if (op->op == KD_FONT_OP_GET)
+ set = 0;
+ else {
+ acquire_console_sem();
+ rc = sw->con_font_op(vc_cons[currcons].d, op);
+ release_console_sem();
+ return rc;
+ }
if (op->data) {
- font.data = kmalloc(max_font_size, GFP_KERNEL);
- if (!font.data)
+ temp = kmalloc(size, GFP_KERNEL);
+ if (!temp)
return -ENOMEM;
- } else
- font.data = NULL;
-
- acquire_console_sem();
- if (sw->con_font_get)
- rc = sw->con_font_get(vc_cons[currcons].d, &font);
- else
- rc = -ENOSYS;
- release_console_sem();
-
- if (rc)
- goto out;
-
- c = (font.width+7)/8 * 32 * font.charcount;
-
- if (op->data && font.charcount > op->charcount)
- rc = -ENOSPC;
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- if (font.width > op->width || font.height > op->height)
- rc = -ENOSPC;
- } else {
- if (font.width != 8)
- rc = -EIO;
- else if ((op->height && font.height > op->height) ||
- font.height > 32)
- rc = -ENOSPC;
+ if (set && copy_from_user(temp, op->data, size)) {
+ rc = -EFAULT;
+ goto quit;
+ }
+ op->data = temp;
}
- if (rc)
- goto out;
-
- if (op->data && copy_to_user(op->data, font.data, c))
- rc = -EFAULT;
-
-out:
- kfree(font.data);
- return rc;
-}
-
-int con_font_set(int currcons, struct console_font_op *op)
-{
- struct console_font font;
- int rc = -EINVAL;
- int size;
- if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
- if (!op->data)
- return -EINVAL;
- if (op->charcount > 512)
- return -EINVAL;
- if (!op->height) { /* Need to guess font height [compat] */
- int h, i;
- u8 __user *charmap = op->data;
- u8 tmp;
-
- /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
- so that we can get rid of this soon */
- if (!(op->flags & KD_FONT_FLAG_OLD))
- return -EINVAL;
- for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++) {
- if (get_user(tmp, &charmap[32*i+h-1]))
- return -EFAULT;
- if (tmp)
- goto nonzero;
- }
- return -EINVAL;
- nonzero:
- op->height = h;
- }
- if (op->width <= 0 || op->width > 32 || op->height > 32)
- return -EINVAL;
- size = (op->width+7)/8 * 32 * op->charcount;
- if (size > max_font_size)
- return -ENOSPC;
- font.charcount = op->charcount;
- font.height = op->height;
- font.width = op->width;
- font.data = kmalloc(size, GFP_KERNEL);
- if (!font.data)
- return -ENOMEM;
- if (copy_from_user(font.data, op->data, size)) {
- kfree(font.data);
- return -EFAULT;
- }
acquire_console_sem();
- if (sw->con_font_set)
- rc = sw->con_font_set(vc_cons[currcons].d, &font, op->flags);
- else
- rc = -ENOSYS;
+ rc = sw->con_font_op(vc_cons[currcons].d, op);
release_console_sem();
- kfree(font.data);
- return rc;
-}
-
-int con_font_default(int currcons, struct console_font_op *op)
-{
- struct console_font font = {.width = op->width, .height = op->height};
- char name[MAX_FONT_NAME];
- char *s = name;
- int rc;
-
- if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
-
- if (!op->data)
- s = NULL;
- else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
- return -EFAULT;
- else
- name[MAX_FONT_NAME - 1] = 0;
- acquire_console_sem();
- if (sw->con_font_default)
- rc = sw->con_font_default(vc_cons[currcons].d, &font, s);
- else
- rc = -ENOSYS;
- release_console_sem();
- if (!rc) {
- op->width = font.width;
- op->height = font.height;
+ op->data = old_op.data;
+ if (!rc && !set) {
+ int c = (op->width+7)/8 * 32 * op->charcount;
+
+ if (op->data && op->charcount > old_op.charcount)
+ rc = -ENOSPC;
+ if (!(op->flags & KD_FONT_FLAG_OLD)) {
+ if (op->width > old_op.width ||
+ op->height > old_op.height)
+ rc = -ENOSPC;
+ } else {
+ if (op->width != 8)
+ rc = -EIO;
+ else if ((old_op.height && op->height > old_op.height) ||
+ op->height > 32)
+ rc = -ENOSPC;
+ }
+ if (!rc && op->data && copy_to_user(op->data, temp, c))
+ rc = -EFAULT;
}
+quit: if (temp)
+ kfree(temp);
return rc;
}
-int con_font_copy(int currcons, struct console_font_op *op)
-{
- int con = op->height;
- struct vc_data *vc;
- int rc;
-
- if (vt_cons[currcons]->vc_mode != KD_TEXT)
- return -EINVAL;
-
- acquire_console_sem();
- vc = vc_cons[currcons].d;
- if (!sw->con_font_copy)
- rc = -ENOSYS;
- else if (con < 0 || !vc_cons_allocated(con))
- rc = -ENOTTY;
- else if (con == vc->vc_num) /* nothing to do */
- rc = 0;
- else
- rc = sw->con_font_copy(vc, con);
- release_console_sem();
- return rc;
-}
-
-int con_font_op(int currcons, struct console_font_op *op)
-{
- switch (op->op) {
- case KD_FONT_OP_SET:
- return con_font_set(currcons, op);
- case KD_FONT_OP_GET:
- return con_font_get(currcons, op);
- case KD_FONT_OP_SET_DEFAULT:
- return con_font_default(currcons, op);
- case KD_FONT_OP_COPY:
- return con_font_copy(currcons, op);
- }
- return -ENOSYS;
-}
-
/*
* Interface exported to selection and vcs.
*/
op.width = 8;
op.height = 0;
op.charcount = 256;
- op.data = up;
+ op.data = (char *) arg;
return con_font_op(fg_console, &op);
}
op.width = 8;
op.height = 32;
op.charcount = 256;
- op.data = up;
+ op.data = (char *) arg;
return con_font_op(fg_console, &op);
}
Say N if you are unsure.
-config IXP2000_WATCHDOG
- tristate "IXP2000 Watchdog"
- depends on WATCHDOG && ARCH_IXP2000
- help
- Say Y here if to include support for the watchdog timer
- in the Intel IXP2000(2400, 2800, 2850) network processors.
- This driver can be built as a module by choosing M. The module
- will be called ixp2000_wdt.
-
- Say N if you are unsure.
-
config SA1100_WATCHDOG
tristate "SA1100/PXA2xx watchdog"
depends on WATCHDOG && ( ARCH_SA1100 || ARCH_PXA )
obj-$(CONFIG_PCIPCWATCHDOG) += pcwd_pci.o
obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
-obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
static ssize_t acq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
/* Activate */
acq_keepalive();
- return nonseekable_open(inode, file);
+ return 0;
}
static int acq_close(struct inode *inode, struct file *file)
static ssize_t
advwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
*/
advwdt_ping();
- return nonseekable_open(inode, file);
+ return 0;
}
static int
static ssize_t ali_write(struct file *file, const char __user *data,
size_t len, loff_t * ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
ali_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/*
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
return -EBUSY;
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return 0;
}
static int fop_close(struct inode * inode, struct file * file)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
eurwdt_timeout = WDT_TIMEOUT; /* initial timeout */
/* Activate the WDT */
eurwdt_activate_timer();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
*/
tco_timer_keepalive ();
tco_timer_start ();
- return nonseekable_open(inode, file);
+ return 0;
}
static int i8xx_tco_release (struct inode *inode, struct file *file)
static ssize_t i8xx_tco_write (struct file *file, const char __user *data,
size_t len, loff_t * ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
static ssize_t
ibwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
/* Activate */
ibwdt_ping();
spin_unlock(&ibwdt_lock);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
indydog_alive = 1;
printk(KERN_INFO "Started watchdog timer.\n");
- return nonseekable_open(inode, file);
+ return 0;
}
static int indydog_release(struct inode *inode, struct file *file)
static ssize_t indydog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* Refresh the timer. */
if (len) {
indydog_ping();
+++ /dev/null
-/*
- * drivers/watchdog/ixp2000_wdt.c
- *
- * Watchdog driver for Intel IXP2000 network processors
- *
- * Adapted from the IXP4xx watchdog driver by Lennert Buytenhek.
- * The original version carries these notices:
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/init.h>
-
-#include <asm/hardware.h>
-#include <asm/bitops.h>
-#include <asm/uaccess.h>
-
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-static int nowayout = 1;
-#else
-static int nowayout = 0;
-#endif
-static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
-static unsigned long wdt_status;
-
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-
-static unsigned long wdt_tick_rate;
-
-static void
-wdt_enable(void)
-{
- ixp2000_reg_write(IXP2000_RESET0, *(IXP2000_RESET0) | WDT_RESET_ENABLE);
- ixp2000_reg_write(IXP2000_TWDE, WDT_ENABLE);
- ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
- ixp2000_reg_write(IXP2000_T4_CTL, TIMER_DIVIDER_256 | TIMER_ENABLE);
-}
-
-static void
-wdt_disable(void)
-{
- ixp2000_reg_write(IXP2000_T4_CTL, 0);
-}
-
-static void
-wdt_keepalive(void)
-{
- ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
-}
-
-static int
-ixp2000_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- wdt_enable();
-
- return nonseekable_open(inode, file);
-}
-
-static ssize_t
-ixp2000_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- wdt_keepalive();
- }
-
- return len;
-}
-
-
-static struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
- .identity = "IXP2000 Watchdog",
-};
-
-static int
-ixp2000_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOIOCTLCMD;
- int time;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
- if (ret)
- break;
-
- if (time <= 0 || time > 60) {
- ret = -EINVAL;
- break;
- }
-
- heartbeat = time;
- wdt_keepalive();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static int
-ixp2000_wdt_release(struct inode *inode, struct file *file)
-{
- if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
- wdt_disable();
- } else {
- printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - "
- "timer will not stop\n");
- }
-
- clear_bit(WDT_IN_USE, &wdt_status);
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- return 0;
-}
-
-
-static struct file_operations ixp2000_wdt_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = ixp2000_wdt_write,
- .ioctl = ixp2000_wdt_ioctl,
- .open = ixp2000_wdt_open,
- .release = ixp2000_wdt_release,
-};
-
-static struct miscdevice ixp2000_wdt_miscdev =
-{
- .minor = WATCHDOG_MINOR,
- .name = "IXP2000 Watchdog",
- .fops = &ixp2000_wdt_fops,
-};
-
-static int __init ixp2000_wdt_init(void)
-{
- wdt_tick_rate = (*IXP2000_T1_CLD * HZ)/ 256;;
-
- return misc_register(&ixp2000_wdt_miscdev);
-}
-
-static void __exit ixp2000_wdt_exit(void)
-{
- misc_deregister(&ixp2000_wdt_miscdev);
-}
-
-module_init(ixp2000_wdt_init);
-module_exit(ixp2000_wdt_exit);
-
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net">);
-MODULE_DESCRIPTION("IXP2000 Network Processor Watchdog");
-
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)");
-
-module_param(nowayout, int, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
wdt_enable();
- return nonseekable_open(inode, file);
+ return 0;
}
static ssize_t
ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
static ssize_t zf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character */
if(count){
zf_timer_on();
- return nonseekable_open(inode, file);
+ return 0;
}
static int zf_close(struct inode *inode, struct file *file)
mixcomwd_timer_alive=0;
}
}
- return nonseekable_open(inode, file);
+ return 0;
}
static int mixcomwd_release(struct inode *inode, struct file *file)
static ssize_t mixcomwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
+ if (ppos != &file->f_pos) {
+ return -ESPIPE;
+ }
+
if(len)
{
if (!nowayout) {
static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
/* Activate */
pcwd_start();
pcwd_keepalive();
- return nonseekable_open(inode, file);
+ return(0);
}
static int pcwd_close(struct inode *inode, struct file *file)
{
int temperature;
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (pcwd_get_temperature(&temperature))
return -EFAULT;
if (!supports_temp)
return -ENODEV;
- return nonseekable_open(inode, file);
+ return 0;
}
static int pcwd_temp_close(struct inode *inode, struct file *file)
static ssize_t pcipcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
pcipcwd_start();
pcipcwd_keepalive();
- return nonseekable_open(inode, file);
+ return 0;
}
static int pcipcwd_release(struct inode *inode, struct file *file)
{
int temperature;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (pcipcwd_get_temperature(&temperature))
return -EFAULT;
if (!pcipcwd_private.supports_temp)
return -ENODEV;
- return nonseekable_open(inode, file);
+ return 0;
}
static int pcipcwd_temp_release(struct inode *inode, struct file *file)
static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
usb_pcwd_start(usb_pcwd_device);
usb_pcwd_keepalive(usb_pcwd_device);
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_pcwd_release(struct inode *inode, struct file *file)
{
int temperature;
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
static int usb_pcwd_temperature_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
*/
static int sa1100dog_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
if (test_and_set_bit(1,&sa1100wdt_users))
return -EBUSY;
static ssize_t sa1100dog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count)
{
static int fop_open(struct inode * inode, struct file * file)
{
- nonseekable_open(inode, file);
-
/* Just in case we're already talking to someone... */
if(test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
static int sc1200wdt_open(struct inode *inode, struct file *file)
{
- nonseekable_open(inode, file);
-
/* allow one at a time */
if (down_trylock(&open_sem))
return -EBUSY;
static ssize_t sc1200wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (len) {
if (!nowayout) {
size_t i;
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
static int fop_open(struct inode * inode, struct file * file)
{
- nonseekable_open(inode, file);
-
/* Just in case we're already talking to someone... */
if(test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
return -EBUSY;
scx200_wdt_enable();
- return nonseekable_open(inode, file);
+ return 0;
}
static int scx200_wdt_release(struct inode *inode, struct file *file)
static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* check for a magic close character */
if (len)
{
sh_wdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
static ssize_t sh_wdt_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
* Activate timer
*/
softdog_keepalive();
- return nonseekable_open(inode, file);
+ return 0;
}
static int softdog_release(struct inode *inode, struct file *file)
static ssize_t softdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/*
* Refresh the timer.
*/
static ssize_t
wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
*/
wdt_ping();
- return nonseekable_open(inode, file);
+ return 0;
}
static int
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
+ /* We can't seek */
+ if(ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if(count)
{
/* Good, fire up the show */
wdt_startup();
- return nonseekable_open(inode, file);
+ return 0;
}
static int fop_close(struct inode * inode, struct file * file)
static ssize_t wafwdt_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
* Activate
*/
wafwdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
static int
static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if(count) {
if (!nowayout) {
size_t i;
* Activate
*/
wdt_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
{
int temperature;
+ /* Can't seek (pread) on this device */
+ if (ptr != &file->f_pos)
+ return -ESPIPE;
+
if (wdt_get_temperature(&temperature))
return -EFAULT;
static int wdt_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return 0;
}
/**
ret = 0;
#endif
- nonseekable_open(inode, file);
return ret;
}
static ssize_t
watchdog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
/*
* Refresh the timer.
*/
__module_get(THIS_MODULE);
wdt977_start();
- return nonseekable_open(inode, file);
+ return 0;
}
static int wdt977_release(struct inode *inode, struct file *file)
* write of data will do, as we we don't define content meaning.
*/
-static ssize_t wdt977_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t wdt977_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
int status;
int new_options, retval = -EINVAL;
int new_timeout;
- union {
- struct watchdog_info __user *ident;
- int __user *i;
- } uarg;
-
- uarg.i = (int __user *)arg;
switch(cmd)
{
return -ENOIOCTLCMD;
case WDIOC_GETSUPPORT:
- return copy_to_user(uarg.ident, &ident,
+ return copy_to_user((struct watchdog_info *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
wdt977_get_status(&status);
- return put_user(status, uarg.i);
+ return put_user(status, (int *) arg);
case WDIOC_GETBOOTSTATUS:
- return put_user(0, uarg.i);
+ return put_user(0, (int *) arg);
case WDIOC_KEEPALIVE:
wdt977_keepalive();
return 0;
case WDIOC_SETOPTIONS:
- if (get_user (new_options, uarg.i))
+ if (get_user (new_options, (int *) arg))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
return retval;
case WDIOC_SETTIMEOUT:
- if (get_user(new_timeout, uarg.i))
+ if (get_user(new_timeout, (int *) arg))
return -EFAULT;
if (wdt977_set_timeout(new_timeout))
/* Fall */
case WDIOC_GETTIMEOUT:
- return put_user(timeout, uarg.i);
+ return put_user(timeout, (int *)arg);
}
}
static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
if (!nowayout) {
size_t i;
* Activate
*/
wdtpci_start();
- return nonseekable_open(inode, file);
+ return 0;
}
/**
{
int temperature;
+ /* Can't seek (pread) on this device */
+ if (ptr != &file->f_pos)
+ return -ESPIPE;
+
if (wdtpci_get_temperature(&temperature))
return -EFAULT;
static int wdtpci_temp_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return 0;
}
/**
/*********************** cpufreq_sysctl interface ********************/
static int
cpufreq_procctl(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
char buf[16], *p;
int cpu = (long) ctl->extra1;
unsigned int len, left = *lenp;
- if (!left || (*ppos && !write) || !cpu_online(cpu)) {
+ if (!left || (filp->f_pos && !write) || !cpu_online(cpu)) {
*lenp = 0;
return 0;
}
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static int __init soc_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
struct soc *s;
int cards = 0;
static int __init socal_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
struct socal *s;
int cards = 0;
/*
- * Parse the EFI PCDP table to locate the console device.
- *
- * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
- * Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2002, 2003, 2004 Hewlett-Packard Co.
+ * Khalid Aziz <khalid_aziz@hp.com>
* Alex Williamson <alex.williamson@hp.com>
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Parse the EFI PCDP table to locate the console device.
*/
#include <linux/acpi.h>
/*
+ * Copyright (C) 2002, 2004 Hewlett-Packard Co.
+ * Khalid Aziz <khalid_aziz@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
* Definitions for PCDP-defined console devices
*
* v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf
* v2.0: http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf
- *
- * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
- * Khalid Aziz <khalid.aziz@hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define PCDP_CONSOLE 0
/* Allocate space for two transmit and two receive buffer
* descriptors in the DP ram.
*/
- data->dp_addr = cpm_dpalloc(sizeof(cbd_t) * 4, 8);
-
+ data->dp_addr = m8xx_cpm_dpram_offset(m8xx_cpm_dpalloc(sizeof(cbd_t)
+ * 4));
+
/* ptr to i2c area */
data->i2c = (i2c8xx_t *)&(((immap_t *)IMAP_ADDR)->im_i2c);
}
config SENSORS_VIA686A
tristate "VIA686A"
- depends on I2C && PCI && EXPERIMENTAL
+ depends on I2C && EXPERIMENTAL
select I2C_SENSOR
select I2C_ISA
help
config BLK_DEV_IDE_PMAC
bool "Builtin PowerMac IDE support"
- depends on PPC_PMAC && IDE=y
+ depends on PPC_PMAC
help
This driver provides support for the built-in IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks.
do_end_request = 1;
} else if (sense_key == ILLEGAL_REQUEST ||
sense_key == DATA_PROTECT) {
+ /*
+ * check if this was a write protected media
+ */
+ if (rq_data_dir(rq) == WRITE) {
+ printk("ide-cd: media marked write protected\n");
+ set_disk_ro(drive->disk, 1);
+ }
+
/* No point in retrying after an illegal
request or data protect error.*/
ide_dump_status (drive, "command error", stat);
* sg request
*/
if (rq->bio) {
- int mask = drive->queue->dma_alignment;
- unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));
-
- info->cmd = rq_data_dir(rq);
+ if (rq->data_len & 3) {
+ printk("%s: block pc not aligned, len=%d\n", drive->name, rq->data_len);
+ cdrom_end_request(drive, 0);
+ return ide_stopped;
+ }
info->dma = drive->using_dma;
-
- /*
- * check if dma is safe
- */
- if ((rq->data_len & mask) || (addr & mask))
- info->dma = 0;
+ info->cmd = rq_data_dir(rq);
}
/* Start sending the command to the drive. */
int nslots;
blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn);
- blk_queue_dma_alignment(drive->queue, 31);
+ blk_queue_dma_alignment(drive->queue, 3);
drive->queue->unplug_delay = (1 * HZ) / 1000;
if (!drive->queue->unplug_delay)
drive->queue->unplug_delay = 1;
nslots = ide_cdrom_probe_capabilities (drive);
/*
- * set correct block size
+ * set correct block size and read-only for non-ram media
*/
+ set_disk_ro(drive->disk, !CDROM_CONFIG_FLAGS(drive)->ram);
blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
#if 0
{
struct block_device *bdev = inode->i_bdev;
ide_drive_t *drive = bdev->bd_disk->private_data;
- int err = generic_ide_ioctl(file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(bdev, cmd, arg);
if (err == -EINVAL) {
struct cdrom_info *info = drive->driver_data;
- err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg);
+ err = cdrom_ioctl(&info->devinfo, inode, cmd, arg);
}
return err;
}
unsigned int cmd, unsigned long arg)
{
struct block_device *bdev = inode->i_bdev;
- return generic_ide_ioctl(file, bdev, cmd, arg);
+ return generic_ide_ioctl(bdev, cmd, arg);
}
static int idedisk_media_changed(struct gendisk *disk)
ide_drive_t *drive = bdev->bd_disk->private_data;
idefloppy_floppy_t *floppy = drive->driver_data;
void __user *argp = (void __user *)arg;
- int err = generic_ide_ioctl(file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(bdev, cmd, arg);
int prevent = (arg) ? 1 : 0;
idefloppy_pc_t pc;
if (err != -EINVAL)
idetape_tape_t *tape = drive->driver_data;
ssize_t bytes_read,temp, actually_read = 0, rc;
+ if (ppos != &file->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ return -ENXIO;
+ }
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 3)
printk(KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %Zd\n", count);
idetape_tape_t *tape = drive->driver_data;
ssize_t retval, actually_written = 0;
+ if (ppos != &file->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ return -ENXIO;
+ }
+
/* The drive is write protected. */
if (tape->write_prot)
return -EACCES;
idetape_pc_t pc;
int retval;
- nonseekable_open(inode, filp);
#if IDETAPE_DEBUG_LOG
printk(KERN_INFO "ide-tape: Reached idetape_chrdev_open\n");
#endif /* IDETAPE_DEBUG_LOG */
{
struct block_device *bdev = inode->i_bdev;
ide_drive_t *drive = bdev->bd_disk->private_data;
- int err = generic_ide_ioctl(file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(bdev, cmd, arg);
if (err == -EINVAL)
err = idetape_blkdev_ioctl(drive, cmd, arg);
return err;
return ide_do_drive_cmd(drive, &rq, ide_head_wait);
}
-int generic_ide_ioctl(struct file *file, struct block_device *bdev,
- unsigned int cmd, unsigned long arg)
+int generic_ide_ioctl(struct block_device *bdev, unsigned int cmd,
+ unsigned long arg)
{
ide_drive_t *drive = bdev->bd_disk->private_data;
ide_settings_t *setting;
case CDROMEJECT:
case CDROMCLOSETRAY:
- return scsi_cmd_ioctl(file, bdev->bd_disk, cmd, p);
+ return scsi_cmd_ioctl(bdev->bd_disk, cmd, p);
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
/*
- * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
+ * linux/drivers/ide/pci/hpt366.c Version 0.34 Sept 17, 2002
*
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
- * Portions Copyright (C) 2003 Red Hat Inc
*
* Thanks to HighPoint Technologies for their assistance, and hardware.
* Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
* Reset the hpt366 on error, reset on dma
* Fix disabling Fast Interrupt hpt366.
* Mike Waychison <crlf@sun.com>
- *
- * Added support for 372N clocking and clock switching. The 372N needs
- * different clocks on read/write. This requires overloading rw_disk and
- * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
- * keeping me sane.
- * Alan Cox <alan@redhat.com>
- *
*/
class_rev &= 0xff;
switch(dev->device) {
- /* Remap new 372N onto 372 */
- case PCI_DEVICE_ID_TTI_HPT372N:
- class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
case PCI_DEVICE_ID_TTI_HPT374:
class_rev = PCI_DEVICE_ID_TTI_HPT374; break;
case PCI_DEVICE_ID_TTI_HPT371:
return mode;
}
-/*
- * Note for the future; the SATA hpt37x we must set
- * either PIO or UDMA modes 0,4,5
- */
-
static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
return __ide_dma_end(drive);
}
-/**
- * hpt372n_set_clock - perform clock switching dance
- * @drive: Drive to switch
- * @mode: Switching mode (0x21 for write, 0x23 otherwise)
- *
- * Switch the DPLL clock on the HPT372N devices. This is a
- * right mess.
- */
-
-static void hpt372n_set_clock(ide_drive_t *drive, int mode)
-{
- ide_hwif_t *hwif = HWIF(drive);
-
- /* FIXME: should we check for DMA active and BUG() */
- /* Tristate the bus */
- outb(0x80, hwif->dma_base+0x73);
- outb(0x80, hwif->dma_base+0x77);
-
- /* Switch clock and reset channels */
- outb(mode, hwif->dma_base+0x7B);
- outb(0xC0, hwif->dma_base+0x79);
-
- /* Reset state machines */
- outb(0x37, hwif->dma_base+0x70);
- outb(0x37, hwif->dma_base+0x74);
-
- /* Complete reset */
- outb(0x00, hwif->dma_base+0x79);
-
- /* Reconnect channels to bus */
- outb(0x00, hwif->dma_base+0x73);
- outb(0x00, hwif->dma_base+0x77);
-}
-
-/**
- * hpt372n_rw_disk - wrapper for I/O
- * @drive: drive for command
- * @rq: block request structure
- * @block: block number
- *
- * This is called when a disk I/O is issued to the 372N instead
- * of the default functionality. We need it because of the clock
- * switching
- *
- */
-
-static ide_startstop_t hpt372n_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block)
-{
- int wantclock;
-
- if(rq_data_dir(rq) == READ)
- wantclock = 0x21;
- else
- wantclock = 0x23;
-
- if(HWIF(drive)->config_data != wantclock)
- {
- hpt372n_set_clock(drive, wantclock);
- HWIF(drive)->config_data = wantclock;
- }
- return __ide_do_rw_disk(drive, rq, block);
-}
-
/*
* Since SUN Cobalt is attempting to do this operation, I should disclose
* this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
u16 freq;
u32 pll;
u8 reg5bh;
+
+#if 1
u8 reg5ah = 0;
- unsigned long dmabase = pci_resource_start(dev, 4);
- u8 did, rid;
- int is_372n = 0;
-
pci_read_config_byte(dev, 0x5a, ®5ah);
/* interrupt force enable */
pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10));
-
- if(dmabase)
- {
- did = inb(dmabase + 0x22);
- rid = inb(dmabase + 0x28);
-
- if((did == 4 && rid == 6) || (did == 5 && rid > 1))
- is_372n = 1;
- }
+#endif
/*
* default to pci clock. make sure MA15/16 are set to output
/*
* set up the PLL. we need to adjust it so that it's stable.
* freq = Tpll * 192 / Tpci
- *
- * Todo. For non x86 should probably check the dword is
- * set to 0xABCDExxx indicating the BIOS saved f_CNT
*/
pci_read_config_word(dev, 0x78, &freq);
freq &= 0x1FF;
-
- /*
- * The 372N uses different PCI clock information and has
- * some other complications
- * On PCI33 timing we must clock switch
- * On PCI66 timing we must NOT use the PCI clock
- *
- * Currently we always set up the PLL for the 372N
- */
-
- pci_set_drvdata(dev, NULL);
-
- if(is_372n)
- {
- printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
- if(freq < 0x55)
- pll = F_LOW_PCI_33;
- else if(freq < 0x70)
- pll = F_LOW_PCI_40;
- else if(freq < 0x7F)
- pll = F_LOW_PCI_50;
+ if (freq < 0xa0) {
+ pll = F_LOW_PCI_33;
+ if (hpt_minimum_revision(dev,8))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
else
- pll = F_LOW_PCI_66;
-
- printk(KERN_INFO "FREQ: %d PLL: %d\n", freq, pll);
-
- /* We always use the pll not the PCI clock on 372N */
- }
- else
- {
- if(freq < 0x9C)
- pll = F_LOW_PCI_33;
- else if(freq < 0xb0)
- pll = F_LOW_PCI_40;
- else if(freq <0xc8)
- pll = F_LOW_PCI_50;
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
+ printk("HPT37X: using 33MHz PCI clock\n");
+ } else if (freq < 0xb0) {
+ pll = F_LOW_PCI_40;
+ } else if (freq < 0xc8) {
+ pll = F_LOW_PCI_50;
+ if (hpt_minimum_revision(dev,8))
+ pci_set_drvdata(dev, NULL);
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) fifty_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
else
- pll = F_LOW_PCI_66;
-
- if (pll == F_LOW_PCI_33) {
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
- printk("HPT37X: using 33MHz PCI clock\n");
- } else if (pll == F_LOW_PCI_40) {
- /* Unsupported */
- } else if (pll == F_LOW_PCI_50) {
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, NULL);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) fifty_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- printk("HPT37X: using 50MHz PCI clock\n");
- } else {
- if (hpt_minimum_revision(dev,8))
- {
- printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
- }
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
- printk("HPT37X: using 66MHz PCI clock\n");
+ pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ printk("HPT37X: using 50MHz PCI clock\n");
+ } else {
+ pll = F_LOW_PCI_66;
+ if (hpt_minimum_revision(dev,8))
+ {
+ printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
+ pci_set_drvdata(dev, NULL);
}
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
+ else
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
+ printk("HPT37X: using 66MHz PCI clock\n");
}
/*
if (pci_get_drvdata(dev))
goto init_hpt37X_done;
- if (hpt_minimum_revision(dev,8))
- {
- printk(KERN_ERR "HPT374: Only 33MHz PCI timings are supported.\n");
- return -EOPNOTSUPP;
- }
/*
* adjust PLL based upon PCI clock, enable it, and wait for
* stabilization.
{
struct pci_dev *dev = hwif->pci_dev;
u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
- u8 did, rid;
- unsigned long dmabase = hwif->dma_base;
- int is_372n = 0;
-
- if(dmabase)
- {
- did = inb(dmabase + 0x22);
- rid = inb(dmabase + 0x28);
-
- if((did == 4 && rid == 6) || (did == 5 && rid > 1))
- is_372n = 1;
- }
-
+
hwif->tuneproc = &hpt3xx_tune_drive;
hwif->speedproc = &hpt3xx_tune_chipset;
hwif->quirkproc = &hpt3xx_quirkproc;
hwif->intrproc = &hpt3xx_intrproc;
hwif->maskproc = &hpt3xx_maskproc;
-
- if(is_372n)
- hwif->rw_disk = &hpt372n_rw_disk;
/*
* The HPT37x uses the CBLID pins as outputs for MA15/MA16
u8 pin1 = 0, pin2 = 0;
unsigned int class_rev;
char *chipset_names[] = {"HPT366", "HPT366", "HPT368",
- "HPT370", "HPT370A", "HPT372",
- "HPT372N" };
+ "HPT370", "HPT370A", "HPT372"};
if (PCI_FUNC(dev->devfn) & 1)
return;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
- if(dev->device == PCI_DEVICE_ID_TTI_HPT372N)
- class_rev = 6;
-
- if(class_rev <= 6)
- d->name = chipset_names[class_rev];
+ strcpy(d->name, chipset_names[class_rev]);
switch(class_rev) {
- case 6:
case 5:
case 4:
case 3: ide_setup_pci_device(dev, d);
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
- { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl);
.channels = 2, /* 4 */
.autodma = AUTODMA,
.bootable = OFF_BOARD,
- },{ /* 5 */
- .name = "HPT372N",
- .init_setup = init_setup_hpt37x,
- .init_chipset = init_chipset_hpt366,
- .init_hwif = init_hwif_hpt366,
- .init_dma = init_dma_hpt366,
- .channels = 2, /* 4 */
- .autodma = AUTODMA,
- .bootable = OFF_BOARD,
}
};
if (!pmif->mediabay) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(IDE_WAKEUP_DELAY);
}
/* Sanitize drive timings */
/* This is necessary to enable IDE when net-booting */
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(IDE_WAKEUP_DELAY);
}
/* Setup MMIO ops */
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
- depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
known as FireWire(tm) or i.Link(tm) and is used for connecting all
config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)"
- depends on IEEE1394 && SCSI && (PCI || BROKEN)
+ depends on IEEE1394 && SCSI
help
This option enables you to use SBP-2 devices connected to your IEEE
1394 bus. SBP-2 devices include harddrives and DVD devices.
static int dv1394_fasync(int fd, struct file *file, int on)
{
/* I just copied this code verbatim from Alan Cox's mouse driver example
- (Documentation/DocBook/) */
+ (linux/Documentation/DocBook/) */
struct video_card *video = file_to_video_card(file);
if (file->f_op->ioctl != dv1394_ioctl)
return -EFAULT;
- if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
+ if (copy_from_user(&dv32, (void *)arg, sizeof(dv32)))
return -EFAULT;
dv.api_version = dv32.api_version;
dv32.n_clear_frames = dv.n_clear_frames;
dv32.dropped_frames = dv.dropped_frames;
- if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
+ if (copy_to_user((struct dv1394_status32 *)arg, &dv32, sizeof(dv32)))
ret = -EFAULT;
}
return 0;
}
-static inline void purge_partial_datagram(struct list_head *old)
-{
- struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
- struct list_head *lh, *n;
-
- list_for_each_safe(lh, n, &pd->frag_info) {
- struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
- list_del(lh);
- kfree(fi);
- }
- list_del(old);
- kfree_skb(pd->skb);
- kfree(pd);
-}
/******************************************
* 1394 bus activity functions
return 0;
}
+static inline void purge_partial_datagram(struct list_head *old)
+{
+ struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+ struct list_head *lh, *n;
+
+ list_for_each_safe(lh, n, &pd->frag_info) {
+ struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+ list_del(lh);
+ kfree(fi);
+ }
+ list_del(old);
+ kfree_skb(pd->skb);
+ kfree(pd);
+}
+
static inline int is_datagram_complete(struct list_head *lh, int dg_size)
{
struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
#include "raw1394.h"
#include "raw1394-private.h"
-#define int2ptr(x) ((void __user *)(unsigned long)x)
+#if BITS_PER_LONG == 64
+#define int2ptr(x) ((void __user *)x)
#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
+#else
+#define int2ptr(x) ((void __user *)(u32)x)
+#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
+#endif
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
#define RAW1394_DEBUG
if (another_host) {
DBGMSG("another hosts entry is valid -> SUCCESS");
if (copy_to_user(int2ptr(req->req.recvb),
- &addr->start,sizeof(u64))) {
+ int2ptr(&addr->start),sizeof(u64))) {
printk(KERN_ERR "raw1394: arm_register failed "
" address-range-entry is invalid -> EFAULT !!!\n");
vfree(addr->addr_space_buffer);
case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
{
struct video1394_wait v;
- unsigned int *psizes = NULL;
+ struct video1394_queue_variable qv;
struct dma_iso_ctx *d;
+ qv.packet_sizes = NULL;
+
if (copy_from_user(&v, argp, sizeof(v)))
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
+ unsigned int *psizes;
int buf_size = d->nb_cmd * sizeof(unsigned int);
- struct video1394_queue_variable __user *p = argp;
- unsigned int __user *qv;
- if (get_user(qv, &p->packet_sizes))
+ if (copy_from_user(&qv, argp, sizeof(qv)))
return -EFAULT;
psizes = kmalloc(buf_size, GFP_KERNEL);
if (!psizes)
return -ENOMEM;
- if (copy_from_user(psizes, qv, buf_size)) {
+ if (copy_from_user(psizes, qv.packet_sizes, buf_size)) {
kfree(psizes);
return -EFAULT;
}
+
+ qv.packet_sizes = psizes;
}
spin_lock_irqsave(&d->lock,flags);
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
- if (psizes)
- kfree(psizes);
+ if (qv.packet_sizes)
+ kfree(qv.packet_sizes);
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
initialize_dma_it_prg_var_packet_queue(
- d, v.buffer, psizes,
+ d, v.buffer, qv.packet_sizes,
ohci);
}
}
}
- if (psizes)
- kfree(psizes);
+ if (qv.packet_sizes)
+ kfree(qv.packet_sizes);
return 0;
static int video1394_wr_wait32(unsigned int fd, unsigned int cmd, unsigned long arg,
struct file *file)
{
- struct video1394_wait32 __user *argp = (void __user *)arg;
struct video1394_wait32 wait32;
struct video1394_wait wait;
mm_segment_t old_fs;
if (file->f_op->ioctl != video1394_ioctl)
return -EFAULT;
- if (copy_from_user(&wait32, argp, sizeof(wait32)))
+ if (copy_from_user(&wait32, (void *)arg, sizeof(wait32)))
return -EFAULT;
wait.channel = wait32.channel;
wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
- if (copy_to_user(argp, &wait32, sizeof(wait32)))
+ if (copy_to_user((struct video1394_wait32 *)arg, &wait32, sizeof(wait32)))
ret = -EFAULT;
}
if (file->f_op->ioctl != video1394_ioctl)
return -EFAULT;
- if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
+ if (copy_from_user(&wait32, (void *)arg, sizeof(wait32)))
return -EFAULT;
wait.channel = wait32.channel;
struct video1394_queue_variable {
unsigned int channel;
unsigned int buffer;
- unsigned int __user * packet_sizes; /* Buffer of size:
+ unsigned int* packet_sizes; /* Buffer of size:
buf_size / packet_size */
};
return count;
}
-#elif defined(__x86_64__)
+#elif __x86_64__
#define GET_TIME(x) rdtscl(x)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__)
+#elif __alpha__
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC"
char name[64];
char phys[32];
char type;
- volatile s8 reset;
- volatile s8 layout;
+ volatile char reset;
+ volatile char layout;
};
/*
if (pc110pad_used++)
return 0;
- pc110pad_interrupt(0,NULL,NULL);
- pc110pad_interrupt(0,NULL,NULL);
- pc110pad_interrupt(0,NULL,NULL);
+ pc110pad_interrupt(0,0,0);
+ pc110pad_interrupt(0,0,0);
+ pc110pad_interrupt(0,0,0);
outb(PC110PAD_ON, pc110pad_io + 2);
pc110pad_count = 0;
outb(PC110PAD_OFF, pc110pad_io + 2);
- if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", NULL))
+ if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", 0))
{
release_region(pc110pad_io, 4);
printk(KERN_ERR "pc110pad: Unable to get irq %d.\n", pc110pad_irq);
outb(PC110PAD_OFF, pc110pad_io + 2);
- free_irq(pc110pad_irq, NULL);
+ free_irq(pc110pad_irq, 0);
release_region(pc110pad_io, 4);
}
struct sk_buff *skb;
size_t copied;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!cdev->ap.applid)
return -ENODEV;
struct sk_buff *skb;
u16 mlen;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!cdev->ap.applid)
return -ENODEV;
if ((file->private_data = capidev_alloc()) == 0)
return -ENOMEM;
- return nonseekable_open(inode, file);
+ return 0;
}
static int
if ((len = strlen(inf->info_start)) <= count) {
if (copy_to_user(buf, inf->info_start, len))
return -EFAULT;
- *off += len;
+ file->f_pos += len;
return (len);
}
return (0);
(struct divert_info **) filep->private_data = &divert_info_head;
spin_unlock_irqrestore( &divert_info_lock, flags );
/* start_divert(); */
- return nonseekable_open(ino, filep);
+ return (0);
} /* isdn_divert_open */
/*******************/
card = kmalloc(sizeof(*card), GFP_KERNEL);
if (!card)
- return NULL;
+ return 0;
memset(card, 0, sizeof(*card));
cinfo = kmalloc(sizeof(*cinfo) * nr_controllers, GFP_KERNEL);
if (!cinfo) {
kfree(card);
- return NULL;
+ return 0;
}
memset(cinfo, 0, sizeof(*cinfo) * nr_controllers);
err_kfree:
kfree(p);
err:
- return NULL;
+ return 0;
}
void avmcard_dma_free(avmcard_dmainfo *p)
int str_length;
int *str_msg;
+ if (off != &file->f_pos)
+ return -ESPIPE;
+
if (!file->private_data) {
for (;;) {
while (
filep->private_data = NULL;
- return nonseekable_open(ino, filep);
+ return (0);
}
static int maint_close(struct inode *ino, struct file *filep)
if (*off)
return 0;
+ if (off != &file->f_pos)
+ return -ESPIPE;
divas_get_version(tmpbuf);
if (copy_to_user(buf + len, &tmpbuf, strlen(tmpbuf)))
static int divas_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return (0);
}
static int divas_close(struct inode *inode, struct file *file)
-/* $Id: platform.h,v 1.37.4.1 2004/07/28 14:47:21 armin Exp $
+/* $Id: platform.h,v 1.37 2004/03/20 17:44:29 armin Exp $
*
* platform.h
*
*/
static __inline__ void diva_os_sleep(dword mSec)
{
- msleep(mSec);
+ unsigned long timeout = HZ * mSec / 1000 + 1;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
}
static __inline__ void diva_os_wait(dword mSec)
{
config HISAX_TELESPCI
bool "Teles PCI"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
config HISAX_NETJET
bool "NETjet card"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the NetJet from Traverse
Technologies.
config HISAX_NETJET_U
bool "NETspider U card"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
- depends on PCI && (BROKEN || !(SPARC64 || PPC))
+ depends on PCI
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
config HISAX_FRITZ_PCIPNP
tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This enables the driver for the AVM Fritz!Card PCI,
Fritz!Card PCI v2 and Fritz!Card PnP.
return(0);
}
-#ifdef CONFIG_PCI
static struct pci_dev *dev_avm __initdata = NULL;
-#endif
#ifdef __ISAPNP__
static struct pnp_card *pnp_avm_c __initdata = NULL;
#endif
printk(KERN_INFO "FritzPnP: no ISA PnP present\n");
}
#endif
-#ifdef CONFIG_PCI
+#if CONFIG_PCI
if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
PCI_DEVICE_ID_AVM_A1, dev_avm))) {
cs->irq = dev_avm->irq;
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* based on the teles driver from Jan den Ouden
*
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* based on the teles driver from Jan den Ouden
*
}
}
-#ifdef CONFIG_PCI
#include <linux/pci.h>
static struct pci_device_id hisax_pci_tbl[] __initdata = {
};
MODULE_DEVICE_TABLE(pci, hisax_pci_tbl);
-#endif /* CONFIG_PCI */
module_init(HiSax_init);
module_exit(HiSax_exit);
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Eicon Technology for documents and information
*
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Elsa GmbH for documents and information
*
byteout(cs->hw.hfcD.addr | 1, reg);
}
ret = bytein(cs->hw.hfcD.addr);
-#ifdef HFC_REG_DEBUG
+#if HFC_REG_DEBUG
if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2))
debugl1(cs, "t3c RD %02x %02x", reg, ret);
#endif
}
if (data)
byteout(cs->hw.hfcD.addr, value);
-#ifdef HFC_REG_DEBUG
+#if HFC_REG_DEBUG
if (cs->debug & L1_DEB_HSCX_FIFO && (data != HFCD_DATA_NODEB))
debugl1(cs, "t3c W%c %02x %02x", data ? 'D' : 'C', reg, value);
#endif
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
}
bcs->hw.tiger.s_tot += s_cnt;
if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs,"tiger write_raw: c%d %p-%p %d/%d %d %x", bcs->channel,
- buf, p, s_cnt, cnt,
+ debugl1(bcs->cs,"tiger write_raw: c%d %x-%x %d/%d %d %x", bcs->channel,
+ (u_int)buf, (u_int)p, s_cnt, cnt,
bcs->hw.tiger.sendcnt, bcs->cs->hw.njet.irqstat0);
if (bcs->cs->debug & L1_DEB_HSCX_FIFO)
printframe(bcs->cs, bcs->hw.tiger.sp, s_cnt, "snd");
cs->bcs[1].hw.tiger.s_end = cs->bcs[0].hw.tiger.s_end;
memset(cs->bcs[0].hw.tiger.send, 0xff, NETJET_DMA_TXSIZE * sizeof(unsigned int));
- debugl1(cs, "tiger: send buf %p - %p", cs->bcs[0].hw.tiger.send,
- cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1);
+ debugl1(cs, "tiger: send buf %x - %x", (u_int)cs->bcs[0].hw.tiger.send,
+ (u_int)(cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1));
outl(virt_to_bus(cs->bcs[0].hw.tiger.send),
cs->hw.njet.base + NETJET_DMA_READ_START);
outl(virt_to_bus(cs->bcs[0].hw.tiger.s_irq),
"HiSax: No memory for tiger.rec\n");
return;
}
- debugl1(cs, "tiger: rec buf %p - %p", cs->bcs[0].hw.tiger.rec,
- cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1);
+ debugl1(cs, "tiger: rec buf %x - %x", (u_int)cs->bcs[0].hw.tiger.rec,
+ (u_int)(cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1));
cs->bcs[1].hw.tiger.rec = cs->bcs[0].hw.tiger.rec;
memset(cs->bcs[0].hw.tiger.rec, 0xff, NETJET_DMA_RXSIZE * sizeof(unsigned int));
outl(virt_to_bus(cs->bcs[0].hw.tiger.rec),
st5481_usb_device_ctrl_msg(adapter, FFMSK_D, 0xfc, NULL, NULL);
st5481_in_mode(d_in, L1_MODE_HDLC);
-#ifdef LOOPBACK
+#if LOOPBACK
// Turn loopback on (data sent on B and D looped back)
st5481_usb_device_ctrl_msg(cs, LBB, 0x04, NULL, NULL);
#endif
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * Documentation/isdn/HiSax.cert
+ * ../../../Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
#
config HYSDN
tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
- depends on m && PROC_FS && PCI && BROKEN_ON_SMP
+ depends on m && PROC_FS && BROKEN_ON_SMP
help
Say Y here if you have one of Hypercope's active PCI ISDN cards
Champ, Ergo and Metro. You will then get a module called hysdn.
}
}
detach_capi_ctr(ctrl);
- ctrl->driverdata = NULL;
+ ctrl->driverdata = 0;
kfree(card->hyctrlinfo);
***********************************************************/
-int hycapi_init(void)
+int hycapi_init()
{
int i;
for(i=0;i<CAPI_MAXAPPL;i++) {
/* write conf file -> boot or send cfg line to card */
/****************************************************/
static ssize_t
-hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
+hysdn_conf_write(struct file *file, const char *buf, size_t count, loff_t * off)
{
struct conf_writedata *cnf;
int i;
uchar ch, *cp;
+ if (&file->f_pos != off) /* fs error check */
+ return (-ESPIPE);
if (!count)
return (0); /* nothing to handle */
/* read conf file -> output card info data */
/*******************************************/
static ssize_t
-hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t * off)
+hysdn_conf_read(struct file *file, char *buf, size_t count, loff_t * off)
{
char *cp;
int i;
+ if (off != &file->f_pos) /* fs error check */
+ return -ESPIPE;
+
if (file->f_mode & FMODE_READ) {
if (!(cp = file->private_data))
return (-EFAULT); /* should never happen */
return (-EPERM); /* no permission this time */
}
unlock_kernel();
- return nonseekable_open(ino, filep);
+ return (0);
} /* hysdn_conf_open */
/***************************/
/* write log file -> set log level bits */
/****************************************/
static ssize_t
-hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
+hysdn_log_write(struct file *file, const char *buf, size_t count, loff_t * off)
{
ulong u = 0;
int found = 0;
long base = 10;
hysdn_card *card = (hysdn_card *) file->private_data;
+ if (&file->f_pos != off) /* fs error check */
+ return (-ESPIPE);
+
if (count > (sizeof(valbuf) - 1))
count = sizeof(valbuf) - 1; /* limit length */
if (copy_from_user(valbuf, buf, count))
/* read log file */
/******************/
static ssize_t
-hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t * off)
+hysdn_log_read(struct file *file, char *buf, size_t count, loff_t * off)
{
struct log_data *inf;
int len;
if ((len = strlen(inf->log_start)) <= count) {
if (copy_to_user(buf, inf->log_start, len))
return -EFAULT;
- *off += len;
+ file->f_pos += len;
return (len);
}
return (0);
return (-EPERM); /* no permission this time */
}
unlock_kernel();
- return nonseekable_open(ino, filep);
+ return (0);
} /* hysdn_log_open */
/*******************************************************************************/
int retval;
char *p;
+ if (off != &file->f_pos)
+ return -ESPIPE;
+
lock_kernel();
if (minor == ISDN_MINOR_STATUS) {
if (!file->private_data) {
int chidx;
int retval;
+ if (off != &file->f_pos)
+ return -ESPIPE;
+
if (minor == ISDN_MINOR_STATUS)
return -EPERM;
if (!dev->drivers)
}
#endif
out:
- nonseekable_open(ino, filep);
return retval;
}
unsigned long expires = 0;
int tmp = 0;
int period = lp->cisco_keepalive_period;
- s8 debserint = lp->cisco_debserint;
+ char debserint = lp->cisco_debserint;
int rc = 0;
if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK)
* stuff needed to support the Linux X.25 PLP code on top of devices that
* can provide a lab_b service using the concap_proto mechanism.
* This module supports a network interface wich provides lapb_sematics
- * -- as defined in Documentation/networking/x25-iface.txt -- to
+ * -- as defined in ../../Documentation/networking/x25-iface.txt -- to
* the upper layer and assumes that the lower layer provides a reliable
* data link service by means of the concap_device_ops callbacks.
*
}
/* process a frame handed over to us from linux network layer. First byte
- semantics as defined in Documentation/networking/x25-iface.txt
+ semantics as defined in ../../Documentation/networking/x25-iface.txt
*/
int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
{
#
config ISDN_DRV_PCBIT
tristate "PCBIT-D support"
- depends on ISDN_I4L && ISA && (BROKEN || !PPC)
+ depends on ISDN_I4L && ISA
help
This enables support for the PCBIT ISDN-card. This card is
manufactured in Portugal by Octal. For running this card,
card->bar0 + TPAM_PAGE_REGISTER);
/* write the value */
- writel(val, card->bar0 + (((unsigned long)addr) & TPAM_PAGE_SIZE));
+ writel(val, card->bar0 + (((u32)addr) & TPAM_PAGE_SIZE));
}
/*
events; also, the PowerBook button device will be enabled so you can
change the screen brightness.
+config MAC_FLOPPY
+ bool "Support for PowerMac floppy"
+ depends on PPC_PMAC && !PPC_PMAC64
+ help
+ If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
+ floppy controller, say Y here. Most commonly found in PowerMacs.
+
config MAC_SERIAL
tristate "Support for PowerMac serial ports (OBSOLETE DRIVER)"
depends on PPC_PMAC && BROKEN
G5 machines.
config ANSLCD
- tristate "Support for ANS LCD display"
+ bool "Support for ANS LCD display"
depends on ADB_CUDA && PPC_PMAC
endmenu
static __inline__ void adb_wait_ms(unsigned int ms)
{
if (current->pid && adb_probe_task_pid &&
- adb_probe_task_pid == current->pid)
- msleep(ms);
- else
+ adb_probe_task_pid == current->pid) {
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1 + ms * HZ / 1000);
+ } else
mdelay(ms);
}
write_lock_irq(&adb_handler_lock);
}
ret = 0;
- adb_handler[index].handler = NULL;
+ adb_handler[index].handler = 0;
}
write_unlock_irq(&adb_handler_lock);
up(&adb_handler_sem);
#define FLAG_POWER_FROM_FN 0x00000002
#define FLAG_EMU_FWDEL_DOWN 0x00000004
-static struct adbhid *adbhid[16];
+static struct adbhid *adbhid[16] = { 0 };
static void adbhid_probe(void);
if (adbhid[id]->keycode)
kfree(adbhid[id]->keycode);
kfree(adbhid[id]);
- adbhid[id] = NULL;
+ adbhid[id] = 0;
}
}
static ssize_t __pmac
-anslcd_write( struct file * file, const char __user * buf,
+anslcd_write( struct file * file, const char * buf,
size_t count, loff_t *ppos )
{
- const char __user *p = buf;
+ const char * p = buf;
int i;
#ifdef DEBUG
anslcd_ioctl( struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg )
{
- char ch, __user *temp;
+ char ch, *temp;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg);
anslcd_write_byte_ctrl ( 0x02 );
return 0;
case ANSLCD_SENDCTRL:
- temp = (char __user *) arg;
+ temp = (char *) arg;
__get_user(ch, temp);
for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */
anslcd_write_byte_ctrl ( ch );
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
-static int __init
+int __init
anslcd_init(void)
{
int a;
return 0;
}
-static void __exit
-anslcd_exit(void)
-{
- misc_deregister(&anslcd_dev);
- iounmap(anslcd_ptr);
-}
+__initcall(anslcd_init);
-module_init(anslcd_init);
-module_exit(anslcd_exit);
req->data[i] = req->data[i+1];
--req->nbytes;
- req->next = NULL;
+ req->next = 0;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
local_irq_save(flags);
if (in_8(&adb->intr.r) != 0)
- macio_adb_interrupt(0, NULL, NULL);
+ macio_adb_interrupt(0, 0, 0);
local_irq_restore(flags);
}
static void rxdma_start(struct mac_serial * info, int current);
static void rxdma_to_tty(struct mac_serial * info);
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
/*
* tmp_buf is used as a temporary buffer by serial_write. We need to
* lock it in case the copy_from_user blocks while swapping in a page,
if (from_user) {
down(&tmp_buf_sem);
while (1) {
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = MIN(count,
+ MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0)
break;
break;
}
spin_lock_irqsave(&info->lock, flags);
- c = min_t(int, c, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = MIN(c, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
memcpy(info->xmit_buf + info->xmit_head, tmp_buf, c);
info->xmit_head = ((info->xmit_head + c) &
(SERIAL_XMIT_SIZE-1));
} else {
while (1) {
spin_lock_irqsave(&info->lock, flags);
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = MIN(count,
+ MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0) {
spin_unlock_irqrestore(&info->lock, flags);
break;
} else if (char_time == 0)
char_time = 1;
if (timeout)
- char_time = min_t(unsigned long, char_time, timeout);
+ char_time = MIN(char_time, timeout);
while ((read_zsreg(info->zs_channel, 1) & ALL_SNT) == 0) {
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(char_time);
#endif /* CONFIG_BLK_DEV_IDE */
return -ENODEV;
}
-EXPORT_SYMBOL(check_media_bay);
int __pmac check_media_bay_by_base(unsigned long base, int what)
{
/* Force an immediate detect */
set_mb_power(bay, 0);
- msleep(MB_POWER_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POWER_DELAY));
bay->content_id = MB_NO;
bay->last_value = bay->ops->content(bay);
bay->value_count = MS_TO_HZ(MB_STABLE_DELAY);
bay->state = mb_empty;
do {
- msleep(MB_POLL_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
media_bay_step(i);
} while((bay->state != mb_empty) &&
(bay->state != mb_up));
bay->sleeping = 1;
set_mb_power(bay, 0);
up(&bay->lock);
- msleep(MB_POLL_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
mdev->ofdev.dev.power_state = state;
}
return 0;
/* Force MB power to 0 */
down(&bay->lock);
set_mb_power(bay, 0);
- msleep(MB_POWER_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POWER_DELAY));
if (bay->ops->content(bay) != bay->content_id) {
printk("mediabay%d: content changed during sleep...\n", bay->index);
up(&bay->lock);
bay->cd_retry = 0;
#endif
do {
- msleep(MB_POLL_DELAY);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
media_bay_step(bay->index);
} while((bay->state != mb_empty) &&
(bay->state != mb_up));
return nw;
}
-static int start_fcu(void)
-{
- unsigned char buf = 0xff;
- int rc;
-
- rc = fan_write_reg(0xe, &buf, 1);
- if (rc < 0)
- return -EIO;
- rc = fan_write_reg(0x2e, &buf, 1);
- if (rc < 0)
- return -EIO;
- return 0;
-}
-
static int set_rpm_fan(int fan, int rpm)
{
unsigned char buf[2];
down(&driver_lock);
- if (start_fcu() < 0) {
- printk(KERN_ERR "kfand: failed to start FCU\n");
- up(&driver_lock);
- goto out;
- }
-
/* Set the PCI fan once for now */
set_pwm_fan(SLOTS_FAN_PWM_ID, SLOTS_FAN_DEFAULT_PWM);
schedule_timeout(HZ - elapsed);
}
- out:
DBG("main_control_loop ended\n");
ctrl_task = 0;
req->complete = 1;
return -EINVAL;
}
- req->next = NULL;
+ req->next = 0;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
* disable_irq(), would that work on m68k ? --BenH
*/
local_irq_save(flags);
- cuda_interrupt(0, NULL, NULL);
+ cuda_interrupt(0, 0, 0);
local_irq_restore(flags);
}
}
#endif /* CONFIG_PMAC_PBOOK */
/* Create /proc/pmu */
- proc_pmu_root = proc_mkdir("pmu", NULL);
+ proc_pmu_root = proc_mkdir("pmu", 0);
if (proc_pmu_root) {
int i;
proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root,
}
if (pmu_state == idle)
adb_int_pending = 1;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
udelay(10);
}
return -EINVAL;
}
- req->next = NULL;
+ req->next = 0;
req->sent = 0;
req->complete = 0;
return;
if (disable_poll)
return;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
}
void __openfirmware
/* Kicks ADB read when PMU is suspended */
adb_int_pending = 1;
do {
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
} while (pmu_suspended && (adb_int_pending || pmu_state != idle
|| req_awaiting_reply));
}
if (!via)
return;
while((pmu_state != idle && pmu_state != locked) || !req->complete)
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
}
/* This function loops until the PMU is idle and prevents it from
spin_unlock_irqrestore(&pmu_lock, flags);
if (req_awaiting_reply)
adb_int_pending = 1;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
spin_lock_irqsave(&pmu_lock, flags);
if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
#ifdef SUSPEND_USES_PMU
printk(KERN_ERR "PMU: extra ADB reply\n");
return;
}
- req_awaiting_reply = NULL;
+ req_awaiting_reply = 0;
if (len <= 2)
req->reply_len = 0;
else {
pmu_irq_stats[1]++;
adb_int_pending = 1;
spin_unlock_irqrestore(&pmu_lock, flags);
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
if (n->list.next == 0)
return -ENOENT;
list_del(&n->list);
- n->list.next = NULL;
+ n->list.next = 0;
return 0;
}
/* Force a poll of ADB interrupts */
adb_int_pending = 1;
- via_pmu_interrupt(0, NULL, NULL);
+ via_pmu_interrupt(0, 0, 0);
/* Restart jiffies & scheduling */
wakeup_decrementer();
lock_kernel();
if (pp != 0) {
- file->private_data = NULL;
+ file->private_data = 0;
spin_lock_irqsave(&all_pvt_lock, flags);
list_del(&pp->list);
spin_unlock_irqrestore(&all_pvt_lock, flags);
u_int cmd, u_long arg)
{
struct pmu_private *pp = filp->private_data;
- __u32 __user *argp = (__u32 __user *)arg;
int error;
switch (cmd) {
sleep_in_progress = 0;
return error;
case PMU_IOC_CAN_SLEEP:
- return put_user((u32)can_sleep, argp);
+ return put_user((u32)can_sleep, (__u32 *)arg);
#ifdef CONFIG_PMAC_BACKLIGHT
/* Backlight should have its own device or go via
error = get_backlight_level();
if (error < 0)
return error;
- return put_user(error, argp);
+ return put_user(error, (__u32 *)arg);
case PMU_IOC_SET_BACKLIGHT:
{
__u32 value;
if (sleep_in_progress)
return -EBUSY;
- error = get_user(value, argp);
+ error = get_user(value, (__u32 *)arg);
if (!error)
error = set_backlight_level(value);
return error;
#endif /* CONFIG_INPUT_ADBHID */
#endif /* CONFIG_PMAC_BACKLIGHT */
case PMU_IOC_GET_MODEL:
- return put_user(pmu_kind, argp);
+ return put_user(pmu_kind, (__u32 *)arg);
case PMU_IOC_HAS_ADB:
- return put_user(pmu_has_adb, argp);
+ return put_user(pmu_has_adb, (__u32 *)arg);
}
return -EINVAL;
}
/*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
};
-int pmu_probe(void)
+int pmu_probe()
{
if (macintosh_config->adb_type == MAC_ADB_PB1) {
pmu_kind = PMU_68K_V1;
}
static void
-recv_byte(void)
+recv_byte()
{
char c;
}
static void
-pmu_start(void)
+pmu_start()
{
unsigned long flags;
struct adb_request *req;
}
void
-pmu_poll(void)
+pmu_poll()
{
unsigned long flags;
if (uptodate)
multipath_end_bh_io(mp_bh, uptodate);
- else if ((bio->bi_rw & (1 << BIO_RW_AHEAD)) == 0) {
+ else {
/*
* oops, IO error:
*/
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_sector);
multipath_reschedule_retry(mp_bh);
- } else
- multipath_end_bh_io(mp_bh, 0);
+ }
rdev_dec_pending(rdev, conf->mddev);
return 0;
}
" to another IO path\n",
bdevname(bio->bi_bdev,b),
(unsigned long long)bio->bi_sector);
- *bio = *(mp_bh->master_bio);
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
- bio->bi_end_io = multipath_end_request;
- bio->bi_private = mp_bh;
generic_make_request(bio);
}
}
*/
#define RAID5_DEBUG 0
#define RAID5_PARANOIA 1
-#if RAID5_PARANOIA && defined(CONFIG_SMP)
+#if RAID5_PARANOIA && CONFIG_SMP
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
#define RAID6_DEBUG 0 /* Extremely verbose printk */
#define RAID6_PARANOIA 1 /* Check spinlocks */
#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */
-#if RAID6_PARANOIA && defined(CONFIG_SMP)
+#if RAID6_PARANOIA && CONFIG_SMP
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
config DVB_B2C2_SKYSTAR
tristate "Technisat Skystar2 PCI"
- depends on DVB_CORE && PCI
+ depends on DVB_CORE
help
Support for the Skystar2 PCI DVB card by Technisat, which
is equipped with the FlexCopII chipset by B2C2.
neq |= f->maskandnotmode[i] & xor;
}
- if (f->doneq && !neq)
+ if (f->doneq & !neq)
return 0;
return feed->cb.sec (feed->feed.sec.secbuf, feed->feed.sec.seclen,
/* Copy arguments into temp kernel buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
- /*
- * For this command, the pointer is actually an integer
- * argument.
- */
- parg = (void *) arg;
+ parg = NULL;
break;
case _IOC_READ: /* some v4l ioctls are marked wrong ... */
case _IOC_WRITE:
#include "dvb_functions.h"
-static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
+static inline __u32 iov_crc32( __u32 c, struct iovec *iov, unsigned int cnt )
{
unsigned int j;
for (j = 0; j < cnt; j++)
/* Check CRC32, we've got it in our skb already. */
unsigned short ulen = htons(priv->ule_sndu_len);
unsigned short utype = htons(priv->ule_sndu_type);
- struct kvec iov[4] = {
+ struct iovec iov[4] = {
{ &ulen, sizeof ulen },
{ &utype, sizeof utype },
{ NULL, 0 },
#include <linux/list.h>
#include <linux/devfs_fs_kernel.h>
-#define DVB_MAJOR 212
+#define DVB_MAJOR 250
#define DVB_DEVICE_VIDEO 0
#define DVB_DEVICE_AUDIO 1
*/
+
+#define __KERNEL_SYSCALLS__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
loff_t filesize;
char *dp;
- fd = sys_open(fn, 0, 0);
+ fd = open(fn, 0, 0);
if (fd == -1) {
printk("%s: unable to open '%s'.\n", __FUNCTION__, fn);
return -EIO;
}
- filesize = sys_lseek(fd, 0L, 2);
+ filesize = lseek(fd, 0L, 2);
if (filesize <= 0 || filesize < SP8870_FIRMWARE_OFFSET + SP8870_FIRMWARE_SIZE) {
printk("%s: firmware filesize to small '%s'\n", __FUNCTION__, fn);
sys_close(fd);
return -EIO;
}
- sys_lseek(fd, SP8870_FIRMWARE_OFFSET, 0);
- if (sys_read(fd, dp, SP8870_FIRMWARE_SIZE) != SP8870_FIRMWARE_SIZE) {
+ lseek(fd, SP8870_FIRMWARE_OFFSET, 0);
+ if (read(fd, dp, SP8870_FIRMWARE_SIZE) != SP8870_FIRMWARE_SIZE) {
printk("%s: failed to read '%s'.\n",__FUNCTION__, fn);
vfree(dp);
sys_close(fd);
next 0x4000 loaded. This may change in future versions.
*/
+#define __KERNEL_SYSCALLS__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
// Load the firmware
set_fs(get_ds());
- fd = sys_open(sp887x_firmware, 0, 0);
+ fd = open(sp887x_firmware, 0, 0);
if (fd < 0) {
printk(KERN_WARNING "%s: Unable to open firmware %s\n", __FUNCTION__,
sp887x_firmware);
return -EIO;
}
- filesize = sys_lseek(fd, 0L, 2);
+ filesize = lseek(fd, 0L, 2);
if (filesize <= 0) {
printk(KERN_WARNING "%s: Firmware %s is empty\n", __FUNCTION__,
sp887x_firmware);
// read it!
// read the first 16384 bytes from the file
// ignore the first 10 bytes
- sys_lseek(fd, 10, 0);
- if (sys_read(fd, firmware, fw_size) != fw_size) {
+ lseek(fd, 10, 0);
+ if (read(fd, firmware, fw_size) != fw_size) {
printk(KERN_WARNING "%s: Failed to read firmware\n", __FUNCTION__);
vfree(firmware);
sys_close(fd);
*/
+#define __KERNEL_SYSCALLS__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/unistd.h>
#include <linux/fcntl.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
// Load the firmware
set_fs(get_ds());
- fd = sys_open(tda1004x_firmware, 0, 0);
+ fd = open(tda1004x_firmware, 0, 0);
if (fd < 0) {
printk("%s: Unable to open firmware %s\n", __FUNCTION__,
tda1004x_firmware);
return -EIO;
}
- filesize = sys_lseek(fd, 0L, 2);
+ filesize = lseek(fd, 0L, 2);
if (filesize <= 0) {
printk("%s: Firmware %s is empty\n", __FUNCTION__,
tda1004x_firmware);
}
// read it!
- sys_lseek(fd, fw_offset, 0);
- if (sys_read(fd, firmware, fw_size) != fw_size) {
+ lseek(fd, fw_offset, 0);
+ if (read(fd, firmware, fw_size) != fw_size) {
printk("%s: Failed to read firmware\n", __FUNCTION__);
vfree(firmware);
sys_close(fd);
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on VIDEO_DEV && PCI
+ depends on VIDEO_DEV
---help---
Choose Y here if you have this radio card. This card may also be
found as Gemtek PCI FM.
if ((i=aci_rw_cmd(ACI_READ_TUNERSTATION, -1, -1))<0)
return i;
-#ifdef DEBUG
+#if DEBUG
printk("check_sig: 0x%x\n", i);
#endif
if (i & 0x80) {
if ((i=aci_rds_cmd(RDS_RXVALUE, &buf, 1))<0)
return i;
-#ifdef DEBUG
+#if DEBUG
printk("rds-signal: %d\n", buf);
#endif
if (buf > 15) {
unsigned long *freq = arg;
pcm20->freq = *freq;
i=pcm20_setfreq(pcm20, pcm20->freq);
-#ifdef DEBUG
+#if DEBUG
printk("First view (setfreq): 0x%x\n", i);
#endif
return i;
struct saa7134_buf *buf)
{
struct saa7134_buf *next = NULL;
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
struct saa7134_dmaqueue *q,
unsigned int state)
{
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
dprintk("buffer_finish %p\n",q->curr);
{
struct saa7134_buf *buf,*next = NULL;
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
BUG_ON(NULL != q->curr);
enum v4l2_field cap = V4L2_FIELD_ANY;
enum v4l2_field ov = V4L2_FIELD_ANY;
-#ifdef DEBUG_SPINLOCKS
+#if DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
#include <linux/videodev.h>
#include <linux/spinlock.h>
#include <linux/sem.h>
-#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <asm/io.h>
{NULL, 0, 0, 0},
};
+struct procfs_io {
+ char *buffer;
+ char *end;
+ int neof;
+ int count;
+ int count_current;
+};
+
static void
setparam (struct zoran *zr,
char *name,
}
}
-static int zoran_show(struct seq_file *p, void *v)
+static int
+print_procfs (struct procfs_io *io,
+ const char *fmt,
+ ...)
{
- struct zoran *zr = p->private;
+ va_list args;
int i;
- seq_printf(p, "ZR36067 registers:\n");
- for (i = 0; i < 0x130; i += 16)
- seq_printf(p, "%03X %08X %08X %08X %08X \n", i,
- btread(i), btread(i+4), btread(i+8), btread(i+12));
- return 0;
+ if (io->buffer >= io->end) {
+ io->neof++;
+ return 0;
+ }
+ if (io->count > io->count_current++)
+ return 0;
+ va_start(args, fmt);
+ i = vsprintf(io->buffer, fmt, args);
+ io->buffer += i;
+ va_end(args);
+ return i;
}
-static int zoran_open(struct inode *inode, struct file *file)
+static void
+zoran_procfs_output (struct procfs_io *io,
+ void *data)
{
- struct zoran *data = PDE(inode)->data;
- return single_open(file, zoran_show, data);
+ int i;
+ struct zoran *zr;
+ zr = (struct zoran *) data;
+
+ print_procfs(io, "ZR36067 registers:");
+ for (i = 0; i < 0x130; i += 4) {
+ if (!(i % 16)) {
+ print_procfs(io, "\n%03X", i);
+ };
+ print_procfs(io, " %08X ", btread(i));
+ };
+ print_procfs(io, "\n");
}
-static ssize_t zoran_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
+static int
+zoran_read_proc (char *buffer,
+ char **start,
+ off_t offset,
+ int size,
+ int *eof,
+ void *data)
+{
+ struct procfs_io io;
+ int nbytes;
+
+ io.buffer = buffer;
+ io.end = buffer + size - 128; // Just to make it a little bit safer
+ io.count = offset;
+ io.count_current = 0;
+ io.neof = 0;
+ zoran_procfs_output(&io, data);
+ *start = (char *) (io.count_current - io.count);
+ nbytes = (int) (io.buffer - buffer);
+ *eof = !io.neof;
+ return nbytes;
+
+ return 0;
+}
+
+static int
+zoran_write_proc (struct file *file,
+ const char __user *buffer,
+ unsigned long count,
+ void *data)
{
- struct zoran *zr = PDE(file->f_dentry->d_inode)->data;
char *string, *sp;
char *line, *ldelim, *varname, *svar, *tdelim;
+ struct zoran *zr;
if (count > 32768) /* Stupidity filter */
return -EINVAL;
+ zr = (struct zoran *) data;
+
string = sp = vmalloc(count + 1);
if (!string) {
dprintk(1,
return -EFAULT;
}
string[count] = 0;
- dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%zu zr=%p\n",
- ZR_DEVNAME(zr), file->f_dentry->d_name.name, count, zr);
+ dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%lu data=%x\n",
+ ZR_DEVNAME(zr), file->f_dentry->d_name.name, count, (int) data);
ldelim = " \t\n";
tdelim = "=";
line = strpbrk(sp, ldelim);
return count;
}
-
-static struct file_operations zoran_operations = {
- .open = zoran_open,
- .read = seq_read,
- .write = zoran_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
#endif
int
snprintf(name, 7, "zoran%d", zr->id);
if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) {
+ zr->zoran_proc->read_proc = zoran_read_proc;
+ zr->zoran_proc->write_proc = zoran_write_proc;
zr->zoran_proc->data = zr;
zr->zoran_proc->owner = THIS_MODULE;
- zr->zoran_proc->proc_fops = &zoran_operations;
dprintk(2,
KERN_INFO
"%s: procfs entry /proc/%s allocated. data=%p\n",
char name[8];
snprintf(name, 7, "zoran%d", zr->id);
- if (zr->zoran_proc)
+ if (zr->zoran_proc) {
remove_proc_entry(name, NULL);
+ }
zr->zoran_proc = NULL;
#endif
}
-# $Id: Kconfig,v 1.6 2004/08/09 13:19:42 dwmw2 Exp $
+# $Id: Kconfig,v 1.5 2004/06/04 15:59:32 gleixner Exp $
menu "Memory Technology Devices (MTD)"
Determines the verbosity level of the MTD debugging messages.
config MTD_PARTITIONS
- bool "MTD partitioning support"
+ tristate "MTD partitioning support"
depends on MTD
help
If you have a device which needs to divide its flash chip(s) up
#
# Makefile for the memory technology device drivers.
#
-# $Id: Makefile.common,v 1.5 2004/08/10 20:51:49 dwmw2 Exp $
+# $Id: Makefile.common,v 1.3 2004/07/12 16:07:30 dwmw2 Exp $
# Core functionality.
-mtd-y := mtdcore.o
-mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
-obj-$(CONFIG_MTD) += $(mtd-y)
-
+obj-$(CONFIG_MTD) += mtdcore.o
obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
+obj-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
# drivers/mtd/chips/Kconfig
-# $Id: Kconfig,v 1.9 2004/07/16 15:32:14 dwmw2 Exp $
+# $Id: Kconfig,v 1.8 2004/07/13 22:32:02 dwmw2 Exp $
menu "RAM/ROM/Flash chip drivers"
depends on MTD!=n
with this driver will return -ENODEV upon access.
config MTD_OBSOLETE_CHIPS
- depends on MTD && BROKEN
bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
help
This option does not enable any code directly, but will allow you to
*
* Author: Jonas Holmberg <jonas.holmberg@axis.com>
*
- * $Id: amd_flash.c,v 1.25 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: amd_flash.c,v 1.24 2004/07/12 13:34:30 dwmw2 Exp $
*
* Copyright (c) 2001 Axis Communications AB
*
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ instr->callback(instr);
+ }
return 0;
}
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: cfi_cmdset_0001.c,v 1.153 2004/07/12 21:52:20 dwmw2 Exp $
*
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
return ret;
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
*
* This code is GPL
*
- * $Id: cfi_cmdset_0002.c,v 1.106 2004/08/09 14:02:32 dwmw2 Exp $
+ * $Id: cfi_cmdset_0002.c,v 1.103 2004/07/14 16:24:03 dwmw2 Exp $
*
*/
ofs = instr->addr;
len = instr->len;
- ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
+ ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
if (ret)
return ret;
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
return ret;
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
int ret;
DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
+ "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
int ret;
DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
+ "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0020.c,v 1.15 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: cfi_cmdset_0020.c,v 1.13 2004/07/12 21:52:50 dwmw2 Exp $
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
* - completely revamped method functions so they are aware and
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
kfree(cfi);
}
+#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
+#define cfi_staa_init init_module
+#define cfi_staa_exit cleanup_module
+#endif
+
static char im_name[]="cfi_cmdset_0020";
int __init cfi_staa_init(void)
* not going to guess how to send commands to them, plus I expect they will
* all speak CFI..
*
- * $Id: jedec.c,v 1.21 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: jedec.c,v 1.20 2004/07/12 14:03:01 dwmw2 Exp $
*/
#include <linux/init.h>
//printk("done\n");
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
#undef flread
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_ram.c,v 1.20 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: map_ram.c,v 1.19 2004/07/12 21:58:44 dwmw2 Exp $
*/
#include <linux/module.h>
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
* Copyright 2000,2001 David A. Schleef <ds@schleef.org>
* 2000,2001 Lineo, Inc.
*
- * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $
+ * $Id: sharp.c,v 1.13 2004/07/12 14:06:34 dwmw2 Exp $
*
* Devices supported:
* LH28F016SCT Symmetrical block flash memory, 2Mx8
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if(instr->callback)
+ instr->callback(instr);
return 0;
}
# drivers/mtd/maps/Kconfig
-# $Id: Kconfig,v 1.12 2004/08/10 13:12:18 dwmw2 Exp $
+# $Id: Kconfig,v 1.10 2004/07/15 00:34:49 dwmw2 Exp $
menu "Self-contained MTD device drivers"
depends on MTD!=n
comment "Disk-On-Chip Device Drivers"
config MTD_DOC2000
- tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+ tristate "M-Systems Disk-On-Chip 2000 and Millennium"
depends on MTD
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
emulate a block device by using a kind of file system on the flash
chips.
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
config MTD_DOC2001
- tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+ tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (see help)"
depends on MTD
---help---
This provides an alternative MTD device driver for the M-Systems
emulate a block device by using a kind of file system on the flash
chips.
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
config MTD_DOC2001PLUS
tristate "M-Systems Disk-On-Chip Millennium Plus"
depends on MTD
to emulate a block device by using a kind of file system on the
flash chips.
- NOTE: This driver will soon be replaced by the new DiskOnChip driver
- under "NAND Flash Device Drivers" (currently that driver does not
- support all Millennium Plus devices).
-
config MTD_DOCPROBE
tristate
default m if MTD_DOC2001!=y && MTD_DOC2000!=y && MTD_DOC2001PLUS!=y && (MTD_DOC2001=m || MTD_DOC2000=m || MTD_DOC2001PLUS=m)
default y if MTD_DOC2001=y || MTD_DOC2000=y || MTD_DOC2001PLUS=y
help
- This isn't a real config option; it's derived.
+ This isn't a real config option, it's derived.
config MTD_DOCECC
tristate
default m if MTD_DOCPROBE!=y && MTD_NAND_DISKONCHIP!=y && (MTD_DOCPROBE=m || MTD_NAND_DISKONCHIP=m)
default y if MTD_DOCPROBE=y || MTD_NAND_DISKONCHIP=y
help
- This isn't a real config option; it's derived.
+ This isn't a real config option, it's derived.
config MTD_DOCPROBE_ADVANCED
bool "Advanced detection options for DiskOnChip"
/*
- * $Id: blkmtd.c,v 1.23 2004/08/09 14:03:19 dwmw2 Exp $
+ * $Id: blkmtd-25.c,v 1.6 2004/07/15 15:09:15 dwmw2 Exp $
*
* blkmtd.c - use a block device as a fake MTD
*
/* Default erase size in K, always make it a multiple of PAGE_SIZE */
#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */
-#define VERSION "$Revision: 1.23 $"
+#define VERSION "$Revision: 1.6 $"
/* Info for the block device */
struct blkmtd_dev {
pagenr = to >> PAGE_SHIFT;
offset = to & ~PAGE_MASK;
- DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
+ DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %d pagenr = %d offset = %d\n",
buf, (long)to, len, pagenr, offset);
/* see if we have to do a partial write at the start */
down(&dev->wrbuf_mutex);
- DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
+ DEBUG(3, "blkmtd: write: start_len = %d len = %d end_len = %d pagecnt = %d\n",
start_len, len, end_len, pagecnt);
if(start_len) {
/* do partial start region */
struct page *page;
- DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
+ DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n",
pagenr, start_len, offset);
BUG_ON(!buf);
page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
lock_page(page);
if(PageDirty(page)) {
- err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
+ err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
to, start_len, len, end_len, pagenr);
BUG();
}
if(end_len) {
/* do the third region */
struct page *page;
- DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
+ DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n",
pagenr, end_len);
BUG_ON(!buf);
page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
lock_page(page);
if(PageDirty(page)) {
- err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
+ err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
to, start_len, len, end_len, pagenr);
BUG();
}
if(bio)
blkmtd_write_out(bio);
- DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
+ DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
up(&dev->wrbuf_mutex);
if(retlen)
size_t from;
u_long len;
int err = -EIO;
- size_t retlen;
+ int retlen;
instr->state = MTD_ERASING;
from = instr->addr;
len = instr->len;
/* check erase region has valid start and length */
- DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",
+ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n",
mtd->name+9, from, len);
while(numregions) {
DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
if(!numregions) {
/* Not a valid erase block */
- err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);
+ err("erase: invalid erase request 0x%lX @ 0x%08X", len, from);
instr->state = MTD_ERASE_FAILED;
err = -EIO;
}
if(instr->state != MTD_ERASE_FAILED) {
/* do the erase */
- DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);
+ DEBUG(3, "Doing erase from = %d len = %ld\n", from, len);
err = write_pages(dev, NULL, from, len, &retlen);
if(err || retlen != len) {
err("erase failed err = %d", err);
}
DEBUG(3, "blkmtd: erase: checking callback\n");
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
return err;
}
int pagenr, pages;
size_t thislen = 0;
- DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",
- mtd->name+9, from, len, buf);
+ DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
+ mtd->name+9, (long int)from, len, buf);
if(from > mtd->size)
return -EINVAL;
readerr:
if(retlen)
*retlen = thislen;
- DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err);
+ DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", thislen, err);
return err;
}
if(!len)
return 0;
- DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n",
- mtd->name+9, to, len, buf);
+ DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
+ mtd->name+9, (long int)to, len, buf);
if(to >= mtd->size) {
return -ENOSPC;
{
struct mtd_erase_region_info *info = NULL;
- DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n",
+ DEBUG(2, "calc_erase_regions, es = %d size = %d regions = %d\n",
erase_size, total_size, *regions);
/* Make any user specified erasesize be a power of 2
and at least PAGE_SIZE */
break;
}
} while(!(*regions));
- DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n",
+ DEBUG(2, "calc_erase_regions done, es = %d size = %d regions = %d\n",
erase_size, total_size, *regions);
return info;
}
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2000.c,v 1.62 2004/08/09 14:04:02 dwmw2 Exp $
+ * $Id: doc2000.c,v 1.60 2004/04/07 08:30:04 gleixner Exp $
*/
#include <linux/kernel.h>
size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
-static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
+static int doc_writev_ecc(struct mtd_info *mtd, const struct iovec *vecs,
unsigned long count, loff_t to, size_t *retlen,
u_char *eccbuf, struct nand_oobinfo *oobsel);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t * retlen, u_char * buf)
{
/* Just a special case of doc_read_ecc */
- return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, 0);
}
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, const u_char * buf)
{
char eccbuf[6];
- return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, 0);
}
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
return 0;
}
-static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
+static int doc_writev_ecc(struct mtd_info *mtd, const struct iovec *vecs,
unsigned long count, loff_t to, size_t *retlen,
u_char *eccbuf, struct nand_oobinfo *oobsel)
{
instr->state = MTD_ERASE_DONE;
callback:
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
up(&this->lock);
return 0;
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001.c,v 1.44 2004/08/09 14:04:24 dwmw2 Exp $
+ * $Id: doc2001.c,v 1.42 2004/04/04 12:36:45 gleixner Exp $
*/
#include <linux/kernel.h>
static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel);
+ size_t *retlen, u_char *buf, u_char *eccbuf, int oobsel);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel);
+ size_t *retlen, const u_char *buf, u_char *eccbuf, int oobsel);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf)
{
/* Just a special case of doc_read_ecc */
- return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, 0);
}
static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel)
+ size_t *retlen, u_char *buf, u_char *eccbuf, int oobsel)
{
int i, ret;
volatile char dummy;
size_t *retlen, const u_char *buf)
{
char eccbuf[6];
- return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, 0);
}
static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf,
- struct nand_oobinfo *oobsel)
+ size_t *retlen, const u_char *buf, u_char *eccbuf, int oobsel)
{
int i,ret = 0;
volatile char dummy;
instr->state = MTD_ERASE_DONE;
dummy = ReadDOC(docptr, LastDataRead);
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001plus.c,v 1.9 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: doc2001plus.c,v 1.8 2004/04/04 12:36:45 gleixner Exp $
*
* Released under GPL
*/
/* Disable flash internally */
WriteDOC(0, docptr, Mplus_FlashSelect);
- mtd_erase_callback(instr);
+ if (instr->callback)
+ instr->callback(instr);
return 0;
}
/*
* MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
*
- * $Id: lart.c,v 1.7 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: lart.c,v 1.6 2004/07/14 17:21:38 dwmw2 Exp $
*
* Author: Abraham vd Merwe <abraham@2d3d.co.za>
*
}
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback) instr->callback (instr);
return (0);
}
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * $Id: ms02-nv.c,v 1.7 2004/07/29 14:16:45 macro Exp $
+ * $Id: ms02-nv.c,v 1.6 2003/08/19 09:25:36 dwmw2 Exp $
*/
#include <linux/init.h>
static char version[] __initdata =
"ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
-MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_AUTHOR("Maciej W. Rozycki <macro@ds2.pg.gda.pl>");
MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
MODULE_LICENSE("GPL");
/*
* mtdram - a test mtd device
- * $Id: mtdram.c,v 1.33 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: mtdram.c,v 1.32 2003/05/21 15:15:07 dwmw2 Exp $
* Author: Alexander Larsson <alex@cendio.se>
*
* Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ (*(instr->callback))(instr);
return 0;
}
/**
*
- * $Id: phram.c,v 1.2 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: phram.c,v 1.1 2003/08/21 17:52:30 joern Exp $
*
* Copyright (c) Jochen Schaeuble <psionic@psionic.de>
* 07/2003 rewritten by Joern Engel <joern@wh.fh-wedel.de>
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback)
+ (*(instr->callback))(instr);
+ else
+ kfree(instr);
return 0;
}
/*
- * $Id: pmc551.c,v 1.28 2004/08/09 13:19:44 dwmw2 Exp $
+ * $Id: pmc551.c,v 1.26 2004/07/14 17:25:07 dwmw2 Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
#include <linux/mtd/pmc551.h>
#include <linux/mtd/compatmac.h>
+#if LINUX_VERSION_CODE > 0x20300
+#define PCI_BASE_ADDRESS(dev) (dev->resource[0].start)
+#else
+#define PCI_BASE_ADDRESS(dev) (dev->base_address[0])
+#endif
+
static struct mtd_info *pmc551list;
static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
printk(KERN_DEBUG "pmc551_erase() done\n");
#endif
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
return 0;
}
(size<1024)?size:(size<1048576)?size>>10:size>>20,
(size<1024)?'B':(size<1048576)?'K':'M',
size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
- (dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK );
+ PCI_BASE_ADDRESS(dev)&PCI_BASE_ADDRESS_MEM_MASK );
/*
* Check to see the state of the memory
}
printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
- PCI_Device->resource[0].start);
+ PCI_BASE_ADDRESS(PCI_Device));
/*
* The PMC551 device acts VERY weird if you don't init it
printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
priv->asize = asize;
}
- priv->start = ioremap(((PCI_Device->resource[0].start)
+ priv->start = ioremap((PCI_BASE_ADDRESS(PCI_Device)
& PCI_BASE_ADDRESS_MEM_MASK),
priv->asize);
/*======================================================================
- $Id: slram.c,v 1.31 2004/08/09 13:19:44 dwmw2 Exp $
+ $Id: slram.c,v 1.30 2003/05/20 21:03:08 dwmw2 Exp $
This driver provides a method to access memory not used by the kernel
itself (i.e. if the kernel commandline mem=xxx is used). To actually
instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
+ else {
+ kfree(instr);
+ }
return(0);
}
/* This version ported to the Linux-MTD system by dwmw2@infradead.org
- * $Id: ftl.c,v 1.53 2004/08/09 13:55:43 dwmw2 Exp $
+ * $Id: ftl.c,v 1.52 2003/08/11 09:00:44 dwmw2 Exp $
*
* Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
{
erase_unit_header_t header;
loff_t offset, max_offset;
- size_t ret;
- int err;
+ int ret;
part->header.FormattedSize = 0;
max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
/* Search first megabyte for a valid FTL header */
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
(unsigned char *)&header);
- if (err)
- return err;
+ if (ret)
+ return ret;
if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
}
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
- " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
+ " = 0x%x, Offset = 0x%x\n", log_addr, virt_addr,
offset);
return -EIO;
}
int init_ftl(void)
{
- DEBUG(0, "$Id: ftl.c,v 1.53 2004/08/09 13:55:43 dwmw2 Exp $\n");
+ DEBUG(0, "$Id: ftl.c,v 1.52 2003/08/11 09:00:44 dwmw2 Exp $\n");
return register_mtd_blktrans(&ftl_tr);
}
* (c) 1999 Machine Vision Holdings, Inc.
* Author: David Woodhouse <dwmw2@infradead.org>
*
- * $Id: inftlcore.c,v 1.17 2004/08/09 13:56:48 dwmw2 Exp $
+ * $Id: inftlcore.c,v 1.16 2004/07/12 12:34:58 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
u16 pot = inftl->LastFreeEUN;
int silly = inftl->nb_blocks;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
- "desperate=%d)\n", inftl, desperate);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=0x%x,"
+ "desperate=%d)\n", (int)inftl, desperate);
/*
* Normally, we force a fold to happen before we run out of free
struct inftl_oob oob;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
- "pending=%d)\n", inftl, thisVUC, pendingblock);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=0x%x,thisVUC=%d,"
+ "pending=%d)\n", (int)inftl, thisVUC, pendingblock);
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
u16 ChainLength = 0, thislen;
u16 chain, EUN;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
- "pending=%d)\n", inftl, pendingblock);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=0x%x,"
+ "pending=%d)\n", (int)inftl, pendingblock);
for (chain = 0; chain < inftl->nb_blocks; chain++) {
EUN = inftl->VUtable[chain];
size_t retlen;
int silly, silly2 = 3;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
- "block=%d)\n", inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=0x%x,"
+ "block=%d)\n", (int)inftl, block);
do {
/*
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
- "thisVUC=%d)\n", inftl, thisVUC);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=0x%x,"
+ "thisVUC=%d)\n", (int)inftl, thisVUC);
memset(BlockUsed, 0, sizeof(BlockUsed));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
size_t retlen;
struct inftl_bci bci;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
- "block=%d)\n", inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=0x%x,"
+ "block=%d)\n", (int)inftl, block);
while (thisEUN < inftl->nb_blocks) {
if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
struct inftl_oob oob;
char *p, *pend;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
- "buffer=%p)\n", inftl, block, buffer);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=0x%x,block=%ld,"
+ "buffer=0x%x)\n", (int)inftl, block, (int)buffer);
/* Is block all zero? */
pend = buffer + SECTORSIZE;
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
- "buffer=%p)\n", inftl, block, buffer);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=0x%x,block=%ld,"
+ "buffer=0x%x)\n", (int)inftl, block, (int)buffer);
while (thisEUN < inftl->nb_blocks) {
if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
int __init init_inftl(void)
{
- printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.17 $, "
+ printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.16 $, "
"inftlmount.c %s\n", inftlmountrev);
return register_mtd_blktrans(&inftl_tr);
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: inftlmount.c,v 1.14 2004/08/09 13:57:42 dwmw2 Exp $
+ * $Id: inftlmount.c,v 1.13 2004/06/28 16:06:36 dbrown Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/mtd/inftl.h>
#include <linux/mtd/compatmac.h>
-char inftlmountrev[]="$Revision: 1.14 $";
+char inftlmountrev[]="$Revision: 1.13 $";
/*
* find_boot_record: Find the INFTL Media Header and its Spare copy which
u8 buf[SECTORSIZE];
struct INFTLMediaHeader *mh = &inftl->MediaHdr;
struct INFTLPartition *ip;
- size_t retlen;
+ int retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=0x%x)\n",
+ (int)inftl);
/*
* Assume logical EraseSize == physical erasesize for starting the
inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
if (!inftl->PUtable) {
printk(KERN_WARNING "INFTL: allocation of PUtable "
- "failed (%zd bytes)\n",
+ "failed (%d bytes)\n",
inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
if (!inftl->VUtable) {
kfree(inftl->PUtable);
printk(KERN_WARNING "INFTL: allocation of VUtable "
- "failed (%zd bytes)\n",
+ "failed (%d bytes)\n",
inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int len, int check_oob)
{
+ int i, retlen;
u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
- size_t retlen;
- int i;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=%p,"
- "address=0x%x,len=%d,check_oob=%d)\n", inftl,
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=0x%x,"
+ "address=0x%x,len=%d,check_oob=%d)\n", (int)inftl,
address, len, check_oob);
for (i = 0; i < len; i += SECTORSIZE) {
*/
int INFTL_formatblock(struct INFTLrecord *inftl, int block)
{
- size_t retlen;
+ int retlen;
struct inftl_unittail uci;
struct erase_info *instr = &inftl->instr;
int physblock;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
- "block=%d)\n", inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=0x%x,"
+ "block=%d)\n", (int)inftl, block);
memset(instr, 0, sizeof(struct erase_info));
int chain_length, do_format_chain;
struct inftl_unithead1 h0;
struct inftl_unittail h1;
- size_t retlen;
- int i;
+ int i, retlen;
u8 *ANACtable, ANAC;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=0x%x)\n", (int)s);
/* Search for INFTL MediaHeader and Spare INFTL Media Header */
if (find_boot_record(s) < 0) {
# drivers/mtd/maps/Kconfig
-# $Id: Kconfig,v 1.30 2004/07/21 00:16:14 jwboyer Exp $
+# $Id: Kconfig,v 1.29 2004/07/15 15:29:17 dwmw2 Exp $
menu "Mapping drivers for chip access"
depends on MTD!=n
command set driver code to communicate with flash chips which
are mapped physically into the CPU's memory. You will need to
configure the physical address and size of the flash chips on
- your particular board as well as the bus width, either statically
- with config options or at run-time.
+ your particular board as well as the bus width.
config MTD_PHYSMAP_START
hex "Physical start address of flash mapping"
are mapped on your particular target board. Refer to the
memory map which should hopefully be in the documentation for
your board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_LEN
hex "Physical length of flash mapping"
than the total amount of flash present. Refer to the memory
map which should hopefully be in the documentation for your
board.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
-config MTD_PHYSMAP_BANKWIDTH
- int "Bank width in octets"
+config MTD_PHYSMAP_BUSWIDTH
+ int "Bus width in octets"
depends on MTD_PHYSMAP
default "2"
help
in octets. For example, if you have a data bus width of 32
bits, you would set the bus width octect value to 4. This is
used internally by the CFI drivers.
- Ignore this option if you use run-time physmap configuration
- (i.e., run-time calling physmap_configure()).
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
* ichxrom.c
*
* Normal mappings of chips in physical memory
- * $Id: ichxrom.c,v 1.8 2004/07/16 17:43:11 dwmw2 Exp $
+ * $Id: ichxrom.c,v 1.7 2004/07/14 18:14:09 eric Exp $
*/
#include <linux/module.h>
info->mtd->unlock = ichxrom_unlock;
}
if (info->mtd->size > info->map.size) {
- printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%lu). fixing...\n",
+ printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%u). fixing...\n",
info->mtd->size, info->map.size);
info->mtd->size = info->map.size;
}
/*
- * $Id: physmap.c,v 1.34 2004/07/21 00:16:14 jwboyer Exp $
+ * $Id: physmap.c,v 1.33 2004/07/12 14:37:24 dwmw2 Exp $
*
* Normal mappings of chips in physical memory
*
static struct mtd_info *mymtd;
-struct map_info physmap_map = {
- .name = "phys_mapped_flash",
- .phys = CONFIG_MTD_PHYSMAP_START,
- .size = CONFIG_MTD_PHYSMAP_LEN,
- .bankwidth = CONFIG_MTD_PHYSMAP_BANKWIDTH,
-};
+struct map_info physmap_map = {.name = "phys_mapped_flash"};
#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition *mtd_parts;
/*
- * $Id: mtdchar.c,v 1.64 2004/08/09 13:59:46 dwmw2 Exp $
+ * $Id: mtdchar.c,v 1.62 2004/07/14 13:20:42 dwmw2 Exp $
*
* Character-device access to raw MTD devices.
*
IOCTL calls for getting device parameters.
======================================================================*/
-static void mtdchar_erase_callback (struct erase_info *instr)
+static void mtd_erase_callback (struct erase_info *instr)
{
wake_up((wait_queue_head_t *)instr->priv);
}
return -EFAULT;
}
erase->mtd = mtd;
- erase->callback = mtdchar_erase_callback;
+ erase->callback = mtd_erase_callback;
erase->priv = (unsigned long)&waitq;
/*
}
default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %x)\n", cmd, MEMGETINFO);
ret = -ENOTTY;
}
/*
- * $Id: mtdcore.c,v 1.43 2004/07/23 15:20:46 dwmw2 Exp $
+ * $Id: mtdcore.c,v 1.42 2004/07/13 10:21:13 dwmw2 Exp $
*
* Core registration and callback routines for MTD
* drivers and users.
*
*/
+#include <linux/version.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
*
* This code is GPL
*
- * $Id: mtdpart.c,v 1.50 2004/08/10 16:18:34 dwmw2 Exp $
+ * $Id: mtdpart.c,v 1.46 2004/07/12 13:28:07 dwmw2 Exp $
*
* 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
* added support for read_oob, write_oob
return -EINVAL;
instr->addr += part->offset;
ret = part->master->erase(part->master, instr);
+ if (instr->fail_addr != 0xffffffff)
+ instr->fail_addr -= part->offset;
return ret;
}
-void mtd_erase_callback(struct erase_info *instr)
-{
- if (instr->mtd->erase == part_erase) {
- struct mtd_part *part = PART(instr->mtd);
-
- if (instr->fail_addr != 0xffffffff)
- instr->fail_addr -= part->offset;
- instr->addr -= part->offset;
- }
- if (instr->callback)
- instr->callback(instr);
-}
-EXPORT_SYMBOL_GPL(mtd_erase_callback);
-
static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_part *part = PART(mtd);
# drivers/mtd/nand/Kconfig
-# $Id: Kconfig,v 1.17 2004/08/10 14:24:07 dwmw2 Exp $
+# $Id: Kconfig,v 1.14 2004/07/13 00:14:35 dbrown Exp $
menu "NAND Flash Device Drivers"
depends on MTD!=n
This enables the NAND flash driver on the PPChameleon EVB Board.
config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
+ tristate "DiskOnChip 2000 and Millennium (NAND reimplementation) (EXPERIMENTAL)"
depends on MTD_NAND && EXPERIMENTAL
help
- This is a reimplementation of M-Systems DiskOnChip 2000,
- Millennium and Millennium Plus as a standard NAND device driver,
- as opposed to the earlier self-contained MTD device drivers.
+ This is a reimplementation of M-Systems DiskOnChip 2000 and
+ Millennium as a standard NAND device driver, as opposed to the
+ earlier self-contained MTD device drivers.
This should enable, among other things, proper JFFS2 operation on
these devices.
-config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- bool "Advanced detection options for DiskOnChip"
- depends on MTD_NAND_DISKONCHIP
- help
- This option allows you to specify nonstandard address at which to
- probe for a DiskOnChip, or to change the detection options. You
- are unlikely to need any of this unless you are using LinuxBIOS.
- Say 'N'.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
- hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- depends on MTD_NAND_DISKONCHIP
- default "0"
- ---help---
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option allows you to specify a single address at which to probe
- for the device, which is useful if you have other devices in that
- range which get upset when they are probed.
-
- (Note that on PowerPC, the normal probe will only check at
- 0xE4000000.)
-
- Normally, you should leave this set to zero, to allow the probe at
- the normal addresses.
-
-config MTD_NAND_DISKONCHIP_PROBE_HIGH
- bool "Probe high addresses"
- depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
- help
- By default, the probe for DiskOnChip devices will look for a
- DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
- This option changes to make it probe between 0xFFFC8000 and
- 0xFFFEE000. Unless you are using LinuxBIOS, this is unlikely to be
- useful to you. Say 'N'.
-
config MTD_NAND_DISKONCHIP_BBTWRITE
bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
depends on MTD_NAND_DISKONCHIP
* Derived from drivers/mtd/spia.c
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: autcpu12.c,v 1.20 2004/07/20 02:44:26 dwmw2 Exp $
+ * $Id: autcpu12.c,v 1.19 2004/07/12 15:02:15 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
*/
static struct mtd_info *autcpu12_mtd = NULL;
+/*
+ * Module stuff
+ */
+#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
+#define autcpu12_init init_module
+#define autcpu12_cleanup cleanup_module
+#endif
+
static int autcpu12_io_base = CS89712_VIRT_BASE;
static int autcpu12_fio_pbase = AUTCPU12_PHYS_SMC;
static int autcpu12_fio_ctrl = AUTCPU12_SMC_SELECT_OFFSET;
* drivers/mtd/nand/diskonchip.c
*
* (C) 2003 Red Hat, Inc.
- * (C) 2004 Dan Brown <dan_brown@ieee.org>
- * (C) 2004 Kalev Lember <kalev@smartlink.ee>
*
* Author: David Woodhouse <dwmw2@infradead.org>
- * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
- * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
*
* Interface to generic NAND code for M-Systems DiskOnChip devices
*
- * $Id: diskonchip.c,v 1.34 2004/08/09 19:41:12 dbrown Exp $
+ * $Id: diskonchip.c,v 1.23 2004/07/13 00:14:35 dbrown Exp $
*/
#include <linux/kernel.h>
#include <linux/mtd/inftl.h>
/* Where to look for the devices? */
-#ifndef CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS
-#define CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS 0
+#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
+#define CONFIG_MTD_DOCPROBE_ADDRESS 0
#endif
static unsigned long __initdata doc_locations[] = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_DISKONCHIP_PROBE_HIGH
+#ifdef CONFIG_MTD_DOCPROBE_HIGH
0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
#define INFTL_BBT_RESERVED_BLOCKS 4
-#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
#endif
MODULE_PARM(inftl_bbt_write, "i");
-static unsigned long doc_config_location = CONFIG_MTD_DISKONCHIP_PROBE_ADDRESS;
+static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
MODULE_PARM(doc_config_location, "l");
MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
for (i = 0; i < cycles; i++) {
if (DoC_is_Millennium(doc))
dummy = ReadDOC(doc->virtadr, NOP);
- else if (DoC_is_MillenniumPlus(doc))
- dummy = ReadDOC(doc->virtadr, Mplus_NOP);
else
dummy = ReadDOC(doc->virtadr, DOCStatus);
}
}
-
-#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
static int _DoC_WaitReady(struct doc_priv *doc)
{
if(debug) printk("_DoC_WaitReady...\n");
/* Out-of-line routine to wait for chip response */
- if (DoC_is_MillenniumPlus(doc)) {
- while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
- if (time_after(jiffies, timeo)) {
- printk("_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
- }
- } else {
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- if (time_after(jiffies, timeo)) {
- printk("_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
}
+ udelay(1);
+ cond_resched();
}
return 0;
unsigned long docptr = doc->virtadr;
int ret = 0;
- if (DoC_is_MillenniumPlus(doc)) {
- DoC_Delay(doc, 4);
-
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
- } else {
- DoC_Delay(doc, 4);
+ DoC_Delay(doc, 4);
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
- DoC_Delay(doc, 2);
- }
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
if(debug) printk("DoC_WaitReady OK\n");
return ret;
}
ReadDOC(docptr, ReadPipeInit);
for (i=0; i < len-1; i++)
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
/* Terminate read pipeline */
buf[i] = ReadDOC(docptr, LastDataRead);
return 0;
}
-static u_char doc2001plus_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- u_char ret;
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ret = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug) printk("read_byte returns %02x\n", ret);
- return ret;
-}
-
-static void doc2001plus_writebuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("writebuf of %d bytes: ", len);
- for (i=0; i < len; i++) {
- WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
- if (debug && i < 16)
- printk("%02x ", buf[i]);
- }
- if (debug) printk("\n");
-}
-
-static void doc2001plus_readbuf(struct mtd_info *mtd,
- u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("readbuf of %d bytes: ", len);
-
- /* Start read pipeline */
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- for (i=0; i < len-2; i++) {
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
- if (debug && i < 16)
- printk("%02x ", buf[i]);
- }
-
- /* Terminate read pipeline */
- buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug && i < 16)
- printk("%02x ", buf[len-2]);
- buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug && i < 16)
- printk("%02x ", buf[len-1]);
- if (debug) printk("\n");
-}
-
-static int doc2001plus_verifybuf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int i;
-
- if (debug)printk("verifybuf of %d bytes: ", len);
-
- /* Start read pipeline */
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- for (i=0; i < len-2; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, Mplus_LastDataRead);
- ReadDOC(docptr, Mplus_LastDataRead);
- return i;
- }
- if (buf[len-2] != ReadDOC(docptr, Mplus_LastDataRead))
- return len-2;
- if (buf[len-1] != ReadDOC(docptr, Mplus_LastDataRead))
- return len-1;
- return 0;
-}
-
-static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
- int floor = 0;
-
- if(debug)printk("select chip (%d)\n", chip);
-
- if (chip == -1) {
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
- return;
- }
-
- floor = chip / doc->chips_per_floor;
- chip -= (floor * doc->chips_per_floor);
-
- /* Assert ChipEnable and deassert WriteProtect */
- WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- doc->curchip = chip;
- doc->curfloor = floor;
-}
-
static void doc200x_select_chip(struct mtd_info *mtd, int chip)
{
struct nand_chip *this = mtd->priv;
unsigned long docptr = doc->virtadr;
int floor = 0;
+ /* 11.4.4 -- deassert CE before changing chip */
+ doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE);
+
if(debug)printk("select chip (%d)\n", chip);
if (chip == -1)
floor = chip / doc->chips_per_floor;
chip -= (floor * doc->chips_per_floor);
- /* 11.4.4 -- deassert CE before changing chip */
- doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE);
-
WriteDOC(floor, docptr, FloorSelect);
WriteDOC(chip, docptr, CDSNDeviceSelect);
DoC_Delay(doc, 4);
}
-static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- /*
- * Must terminate write pipeline before sending any commands
- * to the device.
- */
- if (command == NAND_CMD_PAGEPROG) {
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- }
-
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->oobblock) {
- /* OOB area */
- column -= mtd->oobblock;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- WriteDOC(readcmd, docptr, Mplus_FlashCmd);
- }
- WriteDOC(command, docptr, Mplus_FlashCmd);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
-
- if (column != -1 || page_addr != -1) {
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16)
- column >>= 1;
- WriteDOC(column, docptr, Mplus_FlashAddress);
- }
- if (page_addr != -1) {
- WriteDOC((unsigned char) (page_addr & 0xff), docptr, Mplus_FlashAddress);
- WriteDOC((unsigned char) ((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
- WriteDOC((unsigned char) ((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
- printk("high density\n");
- }
- }
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- /* deassert ALE */
- if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID)
- WriteDOC(0, docptr, Mplus_FlashControl);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
-
- case NAND_CMD_RESET:
- if (this->dev_ready)
- break;
- udelay(this->chip_delay);
- WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- while ( !(this->read_byte(mtd) & 0x40));
- return;
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->dev_ready) {
- udelay (this->chip_delay);
- return;
- }
- }
-
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay (100);
- /* wait until command is processed */
- while (!this->dev_ready(mtd));
-}
-
static int doc200x_dev_ready(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = (void *)this->priv;
unsigned long docptr = doc->virtadr;
- if (DoC_is_MillenniumPlus(doc)) {
- /* 11.4.2 -- must NOP four times before checking FR/B# */
- DoC_Delay(doc, 4);
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
- if(debug)
- printk("not ready\n");
- return 0;
- }
- if (debug)printk("was ready\n");
- return 1;
- } else {
- /* 11.4.2 -- must NOP four times before checking FR/B# */
- DoC_Delay(doc, 4);
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- if(debug)
- printk("not ready\n");
- return 0;
- }
- /* 11.4.2 -- Must NOP twice if it's ready */
- DoC_Delay(doc, 2);
- if (debug)printk("was ready\n");
- return 1;
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if(debug)
+ printk("not ready\n");
+ return 0;
}
-}
+ /* 11.4.2 -- Must NOP twice if it's ready */
+ DoC_Delay(doc, 2);
+ if (debug)printk("was ready\n");
+ return 1;
+}
static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
break;
- }
-}
-
-static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
- unsigned long docptr = doc->virtadr;
-
- /* Prime the ECC engine */
- switch(mode) {
- case NAND_ECC_READ:
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
- break;
- case NAND_ECC_WRITE:
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
- break;
- }
+ }
}
/* This code is only called on write */
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(doc->CDSNControl, docptr, CDSNControl);
- } else if (DoC_is_MillenniumPlus(doc)) {
- WriteDOC(0, docptr, Mplus_NOP);
- WriteDOC(0, docptr, Mplus_NOP);
- WriteDOC(0, docptr, Mplus_NOP);
} else {
WriteDOC(0, docptr, NOP);
WriteDOC(0, docptr, NOP);
}
for (i = 0; i < 6; i++) {
- if (DoC_is_MillenniumPlus(doc))
- ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
- else
- ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
if (ecc_code[i] != empty_write_ecc[i])
emptymatch = 0;
}
- if (DoC_is_MillenniumPlus(doc))
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
- else
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
#if 0
/* If emptymatch=1, we might have an all-0xff data buffer. Check. */
if (emptymatch) {
dummy = ReadDOC(docptr, 2k_ECCStatus);
dummy = ReadDOC(docptr, 2k_ECCStatus);
dummy = ReadDOC(docptr, 2k_ECCStatus);
- } else if (DoC_is_MillenniumPlus(doc)) {
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
} else {
dummy = ReadDOC(docptr, ECCConf);
dummy = ReadDOC(docptr, ECCConf);
/* Error occured ? */
if (dummy & 0x80) {
for (i = 0; i < 6; i++) {
- if (DoC_is_MillenniumPlus(doc))
- calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
- else
- calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
if (calc_ecc[i] != empty_read_syndrome[i])
emptymatch = 0;
}
if (ret > 0)
printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
}
- if (DoC_is_MillenniumPlus(doc))
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
- else
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
if (no_ecc_failures && (ret == -1)) {
printk(KERN_ERR "suppressing ECC failure\n");
ret = 0;
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = (void *)this->priv;
- unsigned offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift);
- int ret;
- size_t retlen;
+ int offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift);
+ int ret, retlen;
end = min(end, mtd->size); // paranoia
for (offs = 0; offs < end; offs += mtd->erasesize) {
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = (void *)this->priv;
- int ret = 0;
- u_char *buf;
- struct NFTLMediaHeader *mh;
- const unsigned psize = 1 << this->page_shift;
- unsigned blocks, maxblocks;
+ u_char *buf = this->data_buf;
+ struct NFTLMediaHeader *mh = (struct NFTLMediaHeader *) buf;
+ const int psize = 1 << this->page_shift;
+ int blocks, maxblocks;
int offs, numheaders;
- buf = kmalloc(mtd->oobblock, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
- return 0;
- }
- if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) goto out;
- mh = (struct NFTLMediaHeader *) buf;
+ if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) return 0;
//#ifdef CONFIG_MTD_DEBUG_VERBOSE
// if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
//#endif
blocks = mtd->size >> this->phys_erase_shift;
- maxblocks = min(32768U, mtd->erasesize - psize);
+ maxblocks = min(32768, mtd->erasesize - psize);
if (mh->UnitSizeFactor == 0x00) {
/* Auto-determine UnitSizeFactor. The constraints are:
mh->UnitSizeFactor = 0xff;
while (blocks > maxblocks) {
blocks >>= 1;
- maxblocks = min(32768U, (maxblocks << 1) + psize);
+ maxblocks = min(32768, (maxblocks << 1) + psize);
mh->UnitSizeFactor--;
}
printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
blocks = mtd->size >> this->bbt_erase_shift;
- maxblocks = min(32768U, mtd->erasesize - psize);
+ maxblocks = min(32768, mtd->erasesize - psize);
}
if (blocks > maxblocks) {
printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
- goto out;
+ return 0;
}
/* Skip past the media headers. */
parts[1].name = " DiskOnChip Remainder partition";
parts[1].offset = offs;
parts[1].size = mtd->size - offs;
- ret = 2;
- goto out;
+ return 2;
}
- ret = 1;
-out:
- kfree(buf);
- return ret;
+ return 1;
}
/* This is a stripped-down copy of the code in inftlmount.c */
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = (void *)this->priv;
- int ret = 0;
- u_char *buf;
- struct INFTLMediaHeader *mh;
+ u_char *buf = this->data_buf;
+ struct INFTLMediaHeader *mh = (struct INFTLMediaHeader *) buf;
struct INFTLPartition *ip;
int numparts = 0;
int blocks;
if (inftl_bbt_write)
end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
- buf = kmalloc(mtd->oobblock, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
- return 0;
- }
-
- if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out;
+ if (!find_media_headers(mtd, buf, "BNAND", 0)) return 0;
doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
- mh = (struct INFTLMediaHeader *) buf;
mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
" NoOfBDTLPartitions = %d\n"
" BlockMultiplerBits = %d\n"
" FormatFlgs = %d\n"
- " OsakVersion = %d.%d.%d.%d\n"
+ " OsakVersion = 0x%x\n"
" PercentUsed = %d\n",
mh->bootRecordID, mh->NoOfBootImageBlocks,
mh->NoOfBinaryPartitions,
mh->NoOfBDTLPartitions,
mh->BlockMultiplierBits, mh->FormatFlags,
- ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
- ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
- ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
- ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
- mh->PercentUsed);
+ mh->OsakVersion, mh->PercentUsed);
//#endif
vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
blocks = mtd->size >> vshift;
if (blocks > 32768) {
printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
- goto out;
+ return 0;
}
blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
if (inftl_bbt_write && (blocks > mtd->erasesize)) {
printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
- goto out;
+ return 0;
}
/* Scan the partitions */
parts[numparts].size = end - parts[numparts].offset;
numparts++;
}
- ret = numparts;
-out:
- kfree(buf);
- return ret;
+ return numparts;
}
static int __init nftl_scan_bbt(struct mtd_info *mtd)
if ((ret = nand_scan_bbt(mtd, NULL)))
return ret;
add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
+#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
+ if (!no_autopart) add_mtd_partitions(mtd, parts, numparts);
#endif
return 0;
}
return -EIO;
}
- if (DoC_is_MillenniumPlus(doc)) {
- this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
- if (inftl_bbt_write)
- this->bbt_td->options |= NAND_BBT_WRITE;
- this->bbt_td->pages[0] = 2;
- this->bbt_md = NULL;
- } else {
- this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
- NAND_BBT_VERSION;
- if (inftl_bbt_write)
- this->bbt_td->options |= NAND_BBT_WRITE;
- this->bbt_td->offs = 8;
- this->bbt_td->len = 8;
- this->bbt_td->veroffs = 7;
- this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
- this->bbt_td->reserved_block_code = 0x01;
- this->bbt_td->pattern = "MSYS_BBT";
-
- this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
- NAND_BBT_VERSION;
- if (inftl_bbt_write)
- this->bbt_md->options |= NAND_BBT_WRITE;
- this->bbt_md->offs = 8;
- this->bbt_md->len = 8;
- this->bbt_md->veroffs = 7;
- this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
- this->bbt_md->reserved_block_code = 0x01;
- this->bbt_md->pattern = "TBB_SYSM";
+ if (mtd->size == (8<<20)) {
+#if 0
+/* This doesn't seem to work for me. I get ECC errors on every page. */
+ /* The Millennium 8MiB is actually an NFTL device! */
+ mtd->name = "DiskOnChip Millennium 8MiB (NFTL)";
+ return nftl_scan_bbt(mtd);
+#endif
+ printk(KERN_ERR "DiskOnChip Millennium 8MiB is not supported.\n");
+ return -EIO;
}
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
+ NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
+ NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+
/* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
At least as nand_bbt.c is currently written. */
if ((ret = nand_scan_bbt(mtd, NULL)))
autopartitioning, but I want to give it more thought. */
if (!numparts) return -EIO;
add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
+#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
+ if (!no_autopart) add_mtd_partitions(mtd, parts, numparts);
#endif
return 0;
}
this->write_buf = doc2001_writebuf;
this->read_buf = doc2001_readbuf;
this->verify_buf = doc2001_verifybuf;
+ this->scan_bbt = inftl_scan_bbt;
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
can have multiple chips. */
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (INFTL Model)";
- this->scan_bbt = inftl_scan_bbt;
return (4 * doc->chips_per_floor);
} else {
/* Bog-standard Millennium */
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium";
- this->scan_bbt = nftl_scan_bbt;
return 1;
}
}
-static inline int __init doc2001plus_init(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = (void *)this->priv;
-
- this->write_byte = NULL;
- this->read_byte = doc2001plus_read_byte;
- this->write_buf = doc2001plus_writebuf;
- this->read_buf = doc2001plus_readbuf;
- this->verify_buf = doc2001plus_verifybuf;
- this->scan_bbt = inftl_scan_bbt;
- this->hwcontrol = NULL;
- this->select_chip = doc2001plus_select_chip;
- this->cmdfunc = doc2001plus_command;
- this->enable_hwecc = doc2001plus_enable_hwecc;
-
- doc->chips_per_floor = 1;
- mtd->name = "DiskOnChip Millennium Plus";
-
- return 1;
-}
-
static inline int __init doc_probe(unsigned long physadr)
{
unsigned char ChipID;
case DOC_ChipID_DocMil:
reg = DoC_ECCConf;
break;
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- case 0:
- /* Possible Millennium Plus, need to do more checks */
- /* Possibly release from power down mode */
- for (tmp = 0; (tmp < 4); tmp++)
- ReadDOC(virtadr, Mplus_Power);
-
- /* Reset the Millennium Plus ASIC */
- tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, virtadr, Mplus_DOCControl);
- WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
-
- mdelay(1);
- /* Enable the Millennium Plus ASIC */
- tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, virtadr, Mplus_DOCControl);
- WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
- mdelay(1);
-
- ChipID = ReadDOC(virtadr, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_DocMilPlus16:
- reg = DoC_Mplus_Toggle;
- break;
- case DOC_ChipID_DocMilPlus32:
- printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
- default:
- ret = -ENODEV;
- goto notfound;
- }
- break;
-
default:
ret = -ENODEV;
goto notfound;
}
for (mtd = doclist; mtd; mtd = doc->nextdoc) {
- unsigned char oldval;
- unsigned char newval;
nand = mtd->priv;
doc = (void *)nand->priv;
/* Use the alias resolution register to determine if this is
in fact the same DOC aliased to a new address. If writes
to one chip's alias resolution register change the value on
the other chip, they're the same chip. */
- if (ChipID == DOC_ChipID_DocMilPlus16) {
- oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
- newval = ReadDOC(virtadr, Mplus_AliasResolution);
- } else {
- oldval = ReadDOC(doc->virtadr, AliasResolution);
- newval = ReadDOC(virtadr, AliasResolution);
- }
+ unsigned char oldval = ReadDOC(doc->virtadr, AliasResolution);
+ unsigned char newval = ReadDOC(virtadr, AliasResolution);
if (oldval != newval)
continue;
- if (ChipID == DOC_ChipID_DocMilPlus16) {
- WriteDOC(~newval, virtadr, Mplus_AliasResolution);
- oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
- WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
- } else {
- WriteDOC(~newval, virtadr, AliasResolution);
- oldval = ReadDOC(doc->virtadr, AliasResolution);
- WriteDOC(newval, virtadr, AliasResolution); // restore it
- }
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); // restore it
newval = ~newval;
if (oldval == newval) {
- printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
+ //printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
goto notfound;
}
}
if (ChipID == DOC_ChipID_Doc2k)
numchips = doc2000_init(mtd);
- else if (ChipID == DOC_ChipID_DocMilPlus16)
- numchips = doc2001plus_init(mtd);
else
numchips = doc2001_init(mtd);
kfree(mtd);
}
}
-
+
module_init(init_nanddoc);
module_exit(cleanup_nanddoc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver\n");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000 and Millennium device driver\n");
* The AG-AND chips have nice features for speed improvement,
* which are not supported yet. Read / program 4 pages in one go.
*
- * $Id: nand_base.c,v 1.115 2004/08/09 13:19:45 dwmw2 Exp $
+ * $Id: nand_base.c,v 1.113 2004/07/14 16:31:31 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/bitops.h>
#include <asm/io.h>
-#ifdef CONFIG_MTD_PARTITIONS
+#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
#include <linux/mtd/partitions.h>
#endif
nand_release_chip(mtd);
/*
- * Return success, if no ECC failures, else -EBADMSG
+ * Return success, if no ECC failures, else -EIO
* fs driver will take care of that, because
- * retlen == desired len and result == -EBADMSG
+ * retlen == desired len and result == -EIO
*/
*retlen = read;
- return ecc_failed ? -EBADMSG : 0;
+ return ecc_failed ? -EIO : 0;
}
/**
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Do call back function */
- if (!ret)
- mtd_erase_callback(instr);
+ if (!ret && instr->callback)
+ instr->callback (instr);
/* Deselect and wake up anyone waiting on the device */
nand_release_chip(mtd);
{
struct nand_chip *this = mtd->priv;
-#ifdef CONFIG_MTD_PARTITIONS
- /* Deregister partitions */
+#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
+ /* Unregister partitions */
del_mtd_partitions (mtd);
#endif
- /* Deregister the device */
+ /* Unregister the device */
del_mtd_device (mtd);
/* Free bad block table memory, if allocated */
* Derived from drivers/mtd/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: tx4925ndfmc.c,v 1.3 2004/07/20 02:44:26 dwmw2 Exp $
+ * $Id: tx4925ndfmc.c,v 1.2 2004/03/27 19:55:53 gleixner Exp $
*
* Copyright (C) 2001 Toshiba Corporation
*
*/
static struct mtd_info *tx4925ndfmc_mtd = NULL;
+/*
+ * Module stuff
+ */
+#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
+#define tx4925ndfmc_init init_module
+#define tx4925ndfmc_cleanup cleanup_module
+#endif
+
/*
* Define partitions for flash devices
*/
/*
- * $Id: redboot.c,v 1.15 2004/08/10 07:55:16 dwmw2 Exp $
+ * $Id: redboot.c,v 1.13 2004/04/01 10:17:40 gthomas Exp $
*
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
static char nullstring[] = "unallocated";
#endif
- buf = vmalloc(master->erasesize);
+ buf = kmalloc(master->erasesize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
fl = fl->next;
kfree(old);
}
- vfree(buf);
+ kfree(buf);
return ret;
}
outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */
/* Clear the Tx ring. */
for (i = 0; i < TX_RING_SIZE; i++)
- vp->tx_skbuff[i] = NULL;
+ vp->tx_skbuff[i] = 0;
outl(0, ioaddr + DownListPtr);
}
/* Set receiver mode: presumably accept b-case and phys addr only. */
break; /* It still hasn't been processed. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = NULL;
+ lp->tx_skbuff[entry] = 0;
}
dirty_tx++;
}
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
dev_kfree_skb(vp->rx_skbuff[i]);
- vp->rx_skbuff[i] = NULL;
+ vp->rx_skbuff[i] = 0;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
for (i = 0; i < TX_RING_SIZE; i++)
if (vp->tx_skbuff[i]) {
dev_kfree_skb(vp->tx_skbuff[i]);
- vp->tx_skbuff[i] = NULL;
+ vp->tx_skbuff[i] = 0;
}
}
#endif
unsigned long flags;
- MPU_PORT(dev, PORT_RESET, NULL);
+ MPU_PORT(dev, PORT_RESET, 0);
udelay(100); /* Wait 100us - seems to help */
failed:
printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
- MPU_PORT(dev, PORT_RESET, NULL);
+ MPU_PORT(dev, PORT_RESET, 0);
return -1;
}
config OAKNET
tristate "National DP83902AV (Oak ethernet) support"
- depends on NET_ETHERNET && PPC && BROKEN
+ depends on NET_ETHERNET && PPC
select CRC32
help
Say Y if your machine has this type of Ethernet network card.
config NE2000
tristate "NE2000/NE1000 support"
- depends on ISA || (Q40 && m)
select CRC32
---help---
If you have a network (Ethernet) card of this type, say Y and read
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on 85xx
+ help
+ This driver supports the Gigabit TSEC on the MPC85xx
+ family of chips, and the FEC on the 8540
+
+config GFAR_NAPI
+ bool "NAPI Support"
+ depends on GIANFAR
+
endmenu
#
obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_BONDING) += bonding/
+obj-$(CONFIG_GIANFAR) += gianfar.o gianfar_ethtool.o gianfar_phy.o
#
# link order important here
*/
#define ACE_MINI_SIZE 100
-#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
-#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
-#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
+#define ACE_MINI_BUFSIZE (ACE_MINI_SIZE + 2 + 16)
+#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 2+4+16)
+#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
/*
* There seems to be a magic difference in the effect between 995 and 996
ringp = &ap->skb->rx_std_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_STD_BUFSIZE,
+ ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->rx_std_ring[i].size = 0;
ringp = &ap->skb->rx_mini_skbuff[i];
mapping = pci_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_MINI_BUFSIZE,
+ ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->rx_mini_ring[i].size = 0;
ringp = &ap->skb->rx_jumbo_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_JUMBO_BUFSIZE,
+ ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->rx_jumbo_ring[i].size = 0;
set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
- info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
+ info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
info->rx_std_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * Make sure IP header starts on a fresh cache line.
+ */
+ skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_STD_BUFSIZE,
+ ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_STD_BUFSIZE;
+ rd->size = ACE_STD_MTU + ETH_HLEN + 4;
rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * Make sure the IP header ends up on a fresh cache line
+ */
+ skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_MINI_BUFSIZE,
+ ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_MINI_BUFSIZE;
+ rd->size = ACE_MINI_SIZE;
rd->idx = idx;
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
+ /*
+ * Make sure the IP header ends up on a fresh cache line
+ */
+ skb_reserve(skb, 2 + 16);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_JUMBO_BUFSIZE,
+ ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_JUMBO_BUFSIZE;
+ rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
rd->idx = idx;
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
*/
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
- mapsize = ACE_STD_BUFSIZE;
+ mapsize = ACE_STD_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
- mapsize = ACE_JUMBO_BUFSIZE;
+ mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
- mapsize = ACE_MINI_BUFSIZE;
+ mapsize = ACE_MINI_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
#define tigonFwBssAddr 0x00015dd0
#define tigonFwBssLen 0x2080
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
-#define tigonFwText NULL
-#define tigonFwData NULL
-#define tigonFwRodata NULL
+#define tigonFwText 0
+#define tigonFwData 0
+#define tigonFwRodata 0
#else
/* Generated by genfw.c */
static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
lp->tx_skbuff[tx_index]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
- lp->tx_skbuff[tx_index] = NULL;
+ lp->tx_skbuff[tx_index] = 0;
lp->tx_dma_addr[tx_index] = 0;
}
lp->tx_complete_idx++;
if( dev->mc_count == 0 ){
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
- lp->mc_list = NULL;
+ lp->mc_list = 0;
lp->options &= ~OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
/* disable promiscous mode */
return 0;
}
+#ifdef CONFIG_NET_FASTROUTE
+static int bond_accept_fastpath(struct net_device *bond_dev, struct dst_entry *dst)
+{
+ return -1;
+}
+#endif
+
/*------------------------- Device initialization ---------------------------*/
/*
bond_set_mode_ops(bond_dev, bond->params.mode);
bond_dev->destructor = free_netdev;
+#ifdef CONFIG_NET_FASTROUTE
+ bond_dev->accept_fastpath = bond_accept_fastpath;
+#endif
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
{
if (lp->dma_buff) {
free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
- lp->dma_buff = NULL;
+ lp->dma_buff = 0;
}
}
#endif
* $Id: asstruct.h,v 1.1.1.1 1994/10/23 05:08:32 rick Exp $
*/
-#ifdef ASSEMBLER
+#if ASSEMBLER
# define MO(t,a) (a)
# define VMO(t,a) (a)
/************************************************************************/
typedef volatile struct _I596_RBD
{
-#ifdef INTEL_RETENTIVE
+#if INTEL_RETENTIVE
ushort count; /* Length of data in buf */
ushort offset;
#else
#endif
vol struct _I596_RBD *next; /* Next buffer descriptor in list */
uchar *buf; /* Data buffer */
-#ifdef INTEL_RETENTIVE
+#if INTEL_RETENTIVE
ushort size; /* Size of buf (constant) */
ushort zero;
#else
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
/* Allocate the rx buffers */
else
dev_kfree_skb (skb);
- np->tx_skbuff[entry] = NULL;
+ np->tx_skbuff[entry] = 0;
entry = (entry + 1) % TX_RING_SIZE;
tx_use++;
}
pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
}
}
{
}
+#ifdef CONFIG_NET_FASTROUTE
+static int dummy_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+ return -1;
+}
+#endif
+
static void __init dummy_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->hard_start_xmit = dummy_xmit;
dev->set_multicast_list = set_multicast_list;
dev->set_mac_address = dummy_set_address;
+#ifdef CONFIG_NET_FASTROUTE
+ dev->accept_fastpath = dummy_accept_fastpath;
+#endif
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
#include "e1000_hw.h"
-#ifdef DBG
+#if DBG
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
#else
#define E1000_DBG(args...)
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-#ifdef DBG
+#if DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
#else
/* Set up the Tx queue early.. */
sp->cur_tx = 0;
sp->dirty_tx = 0;
- sp->last_cmd = NULL;
+ sp->last_cmd = 0;
sp->tx_full = 0;
sp->in_interrupt = 0;
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
}
sp->dirty_tx++;
}
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
}
dirty_tx++;
}
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = sp->rx_skbuff[i];
- sp->rx_skbuff[i] = NULL;
+ sp->rx_skbuff[i] = 0;
/* Clear the Rx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = sp->tx_skbuff[i];
- sp->tx_skbuff[i] = NULL;
+ sp->tx_skbuff[i] = 0;
/* Clear the Tx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = NULL; /* Redundant. */
+ sp->tx_skbuff[entry] = 0; /* Redundant. */
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
sp->last_cmd = mc_setup_frm;
/* Change the command to a NoOp, pointing to the CmdMulti command. */
- sp->tx_skbuff[entry] = NULL;
+ sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
ep->rx_ring[i].next = ep->rx_ring_dma +
(i+1)*sizeof(struct epic_rx_desc);
- ep->rx_skbuff[i] = NULL;
+ ep->rx_skbuff[i] = 0;
}
/* Mark the last entry as wrapping the ring. */
ep->rx_ring[i-1].next = ep->rx_ring_dma;
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- ep->tx_skbuff[i] = NULL;
+ ep->tx_skbuff[i] = 0;
ep->tx_ring[i].txstatus = 0x0000;
ep->tx_ring[i].next = ep->tx_ring_dma +
(i+1)*sizeof(struct epic_tx_desc);
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- ep->tx_skbuff[entry] = NULL;
+ ep->tx_skbuff[entry] = 0;
}
#ifndef final_version
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = ep->rx_skbuff[i];
- ep->rx_skbuff[i] = NULL;
+ ep->rx_skbuff[i] = 0;
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = ep->tx_skbuff[i];
- ep->tx_skbuff[i] = NULL;
+ ep->tx_skbuff[i] = 0;
if (!skb)
continue;
pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
{
if (!eql_is_full(queue)) {
- slave_t *duplicate_slave = NULL;
+ slave_t *duplicate_slave = 0;
duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
if (duplicate_slave != 0)
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
+
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
static int ethertap_debug;
static int max_taps = 1;
-module_param(max_taps, int, 0);
+MODULE_PARM(max_taps, "i");
MODULE_PARM_DESC(max_taps,"Max number of ethernet tap devices");
static struct net_device **tap_map; /* Returns the tap device for a given netlink */
--- /dev/null
+/*
+ * drivers/net/gianfar.c
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Gianfar: AKA Lambda Draconis, "Dragon"
+ * RA 11 31 24.2
+ * Dec +69 19 52
+ * V 3.84
+ * B-V +1.62
+ *
+ * Theory of operation
+ * This driver is designed for the Triple-speed Ethernet
+ * controllers on the Freescale 8540/8560 integrated processors,
+ * as well as the Fast Ethernet Controller on the 8540.
+ *
+ * The driver is initialized through OCP. Structures which
+ * define the configuration needed by the board are defined in a
+ * board structure in arch/ppc/platforms (though I do not
+ * discount the possibility that other architectures could one
+ * day be supported. One assumption the driver currently makes
+ * is that the PHY is configured in such a way to advertise all
+ * capabilities. This is a sensible default, and on certain
+ * PHYs, changing this default encounters substantial errata
+ * issues. Future versions may remove this requirement, but for
+ * now, it is best for the firmware to ensure this is the case.
+ *
+ * The Gianfar Ethernet Controller uses a ring of buffer
+ * descriptors. The beginning is indicated by a register
+ * pointing to the physical address of the start of the ring.
+ * The end is determined by a "wrap" bit being set in the
+ * last descriptor of the ring.
+ *
+ * When a packet is received, the RXF bit in the
+ * IEVENT register is set, triggering an interrupt when the
+ * corresponding bit in the IMASK register is also set (if
+ * interrupt coalescing is active, then the interrupt may not
+ * happen immediately, but will wait until either a set number
+ * of frames or amount of time have passed.). In NAPI, the
+ * interrupt handler will signal there is work to be done, and
+ * exit. Without NAPI, the packet(s) will be handled
+ * immediately. Both methods will start at the last known empty
+ * descriptor, and process every subsequent descriptor until there
+ * are none left with data (NAPI will stop after a set number of
+ * packets to give time to other tasks, but will eventually
+ * process all the packets). The data arrives inside a
+ * pre-allocated skb, and so after the skb is passed up to the
+ * stack, a new skb must be allocated, and the address field in
+ * the buffer descriptor must be updated to indicate this new
+ * skb.
+ *
+ * When the kernel requests that a packet be transmitted, the
+ * driver starts where it left off last time, and points the
+ * descriptor at the buffer which was passed in. The driver
+ * then informs the DMA engine that there are packets ready to
+ * be transmitted. Once the controller is finished transmitting
+ * the packet, an interrupt may be triggered (under the same
+ * conditions as for reception, but depending on the TXF bit).
+ * The driver then cleans up the buffer.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+
+#include "gianfar.h"
+#include "gianfar_phy.h"
+#ifdef CONFIG_NET_FASTROUTE
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
+#define irqreturn_t void
+#define IRQ_HANDLED
+#endif
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 1000000
+#undef BRIEF_GFAR_ERRORS
+#define VERBOSE_GFAR_ERRORS
+
+#ifdef CONFIG_GFAR_NAPI
+#define RECEIVE(x) netif_receive_skb(x)
+#else
+#define RECEIVE(x) netif_rx(x)
+#endif
+
+#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.0, "
+char gfar_driver_name[] = "Gianfar Ethernet";
+char gfar_driver_version[] = "1.0";
+
+int startup_gfar(struct net_device *dev);
+static int gfar_enet_open(struct net_device *dev);
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_timeout(struct net_device *dev);
+static int gfar_close(struct net_device *dev);
+struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
+static struct net_device_stats *gfar_get_stats(struct net_device *dev);
+static int gfar_set_mac_address(struct net_device *dev);
+static int gfar_change_mtu(struct net_device *dev, int new_mtu);
+static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
+irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void gfar_phy_change(void *data);
+static void gfar_phy_timer(unsigned long data);
+static void adjust_link(struct net_device *dev);
+static void init_registers(struct net_device *dev);
+static int init_phy(struct net_device *dev);
+static int gfar_probe(struct ocp_device *ocpdev);
+static void gfar_remove(struct ocp_device *ocpdev);
+void free_skb_resources(struct gfar_private *priv);
+static void gfar_set_multi(struct net_device *dev);
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_poll(struct net_device *dev, int *budget);
+#endif
+#ifdef CONFIG_NET_FASTROUTE
+static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst);
+#endif
+static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length);
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+#else
+static int gfar_clean_rx_ring(struct net_device *dev);
+#endif
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
+
+extern struct ethtool_ops gfar_ethtool_ops;
+extern void gfar_gstrings_normon(struct net_device *dev, u32 stringset,
+ u8 * buf);
+extern void gfar_fill_stats_normon(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 * buf);
+extern int gfar_stats_count_normon(struct net_device *dev);
+
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Gianfar Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+/* Called by the ocp code to initialize device data structures
+ * required for bringing up the device
+ * returns 0 on success */
+static int gfar_probe(struct ocp_device *ocpdev)
+{
+ u32 tempval;
+ struct ocp_device *mdiodev;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct ocp_gfar_data *einfo;
+ int idx;
+ int err = 0;
+ struct ethtool_ops *dev_ethtool_ops;
+
+ einfo = (struct ocp_gfar_data *) ocpdev->def->additions;
+
+ if (einfo == NULL) {
+ printk(KERN_ERR "gfar %d: Missing additional data!\n",
+ ocpdev->def->index);
+
+ return -ENODEV;
+ }
+
+ /* get a pointer to the register memory which can
+ * configure the PHYs. If it's different from this set,
+ * get the device which has those regs */
+ if ((einfo->phyregidx >= 0) && (einfo->phyregidx != ocpdev->def->index)) {
+ mdiodev = ocp_find_device(OCP_ANY_ID,
+ OCP_FUNC_GFAR, einfo->phyregidx);
+
+ /* If the device which holds the MDIO regs isn't
+ * up, wait for it to come up */
+ if (mdiodev == NULL)
+ return -EAGAIN;
+ } else {
+ mdiodev = ocpdev;
+ }
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof (*priv));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = einfo;
+
+ /* get a pointer to the register memory */
+ priv->regs = (struct gfar *)
+ ioremap(ocpdev->def->paddr, sizeof (struct gfar));
+
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+
+ /* Set the PHY base address */
+ priv->phyregs = (struct gfar *)
+ ioremap(mdiodev->def->paddr, sizeof (struct gfar));
+
+ if (priv->phyregs == NULL) {
+ err = -ENOMEM;
+ goto phy_regs_fail;
+ }
+
+ ocp_set_drvdata(ocpdev, dev);
+
+ /* Stop the DMA engine now, in case it was running before */
+ /* (The firmware could have used it, and left it running). */
+ /* To do this, we write Graceful Receive Stop and Graceful */
+ /* Transmit Stop, and then wait until the corresponding bits */
+ /* in IEVENT indicate the stops have completed. */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+
+ /* Reset MAC layer */
+ gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ gfar_write(&priv->regs->maccfg1, tempval);
+
+ /* Initialize MACCFG2. */
+ gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+
+ /* Initialize ECNTRL */
+ gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+ /* Copy the station address into the dev structure, */
+ /* and into the address registers MAC_STNADDR1,2. */
+ /* Backwards, because little endian MACs are dumb. */
+ /* Don't set the regs if the firmware already did */
+ memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) (priv->regs);
+
+ SET_MODULE_OWNER(dev);
+
+ /* Fill in the dev structure */
+ dev->open = gfar_enet_open;
+ dev->hard_start_xmit = gfar_start_xmit;
+ dev->tx_timeout = gfar_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_GFAR_NAPI
+ dev->poll = gfar_poll;
+ dev->weight = GFAR_DEV_WEIGHT;
+#endif
+ dev->stop = gfar_close;
+ dev->get_stats = gfar_get_stats;
+ dev->change_mtu = gfar_change_mtu;
+ dev->mtu = 1500;
+ dev->set_multicast_list = gfar_set_multi;
+ dev->flags |= IFF_MULTICAST;
+
+ dev_ethtool_ops =
+ (struct ethtool_ops *)kmalloc(sizeof(struct ethtool_ops),
+ GFP_KERNEL);
+
+ if(dev_ethtool_ops == NULL) {
+ err = -ENOMEM;
+ goto ethtool_fail;
+ }
+
+ memcpy(dev_ethtool_ops, &gfar_ethtool_ops, sizeof(gfar_ethtool_ops));
+
+ /* If there is no RMON support in this device, we don't
+ * want to expose non-existant statistics */
+ if((priv->einfo->flags & GFAR_HAS_RMON) == 0) {
+ dev_ethtool_ops->get_strings = gfar_gstrings_normon;
+ dev_ethtool_ops->get_stats_count = gfar_stats_count_normon;
+ dev_ethtool_ops->get_ethtool_stats = gfar_fill_stats_normon;
+ }
+
+ if((priv->einfo->flags & GFAR_HAS_COALESCE) == 0) {
+ dev_ethtool_ops->set_coalesce = NULL;
+ dev_ethtool_ops->get_coalesce = NULL;
+ }
+
+ dev->ethtool_ops = dev_ethtool_ops;
+
+#ifdef CONFIG_NET_FASTROUTE
+ dev->accept_fastpath = gfar_accept_fastpath;
+#endif
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+#ifdef CONFIG_GFAR_BUFSTASH
+ priv->rx_stash_size = STASH_LENGTH;
+#endif
+ priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
+
+ /* Initially, coalescing is disabled */
+ priv->txcoalescing = 0;
+ priv->txcount = 0;
+ priv->txtime = 0;
+ priv->rxcoalescing = 0;
+ priv->rxcount = 0;
+ priv->rxtime = 0;
+
+ err = register_netdev(dev);
+
+ if (err) {
+ printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ /* Print out the device info */
+ printk(DEVICE_NAME, dev->name);
+ for (idx = 0; idx < 6; idx++)
+ printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
+ printk("\n");
+
+ /* Even more device info helps when determining which kernel */
+ /* provided which set of benchmarks. Since this is global for all */
+ /* devices, we only print it once */
+#ifdef CONFIG_GFAR_NAPI
+ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
+#else
+ printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
+#endif
+ printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
+ dev->name, priv->rx_ring_size, priv->tx_ring_size);
+
+ return 0;
+
+
+register_fail:
+ kfree(dev_ethtool_ops);
+ethtool_fail:
+ iounmap((void *) priv->phyregs);
+phy_regs_fail:
+ iounmap((void *) priv->regs);
+regs_fail:
+ free_netdev(dev);
+ return -ENOMEM;
+}
+
+static void gfar_remove(struct ocp_device *ocpdev)
+{
+ struct net_device *dev = ocp_get_drvdata(ocpdev);
+ struct gfar_private *priv = netdev_priv(dev);
+
+ ocp_set_drvdata(ocpdev, NULL);
+
+ kfree(dev->ethtool_ops);
+ iounmap((void *) priv->regs);
+ iounmap((void *) priv->phyregs);
+ free_netdev(dev);
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_info *curphy;
+
+ priv->link = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->olddplx = -1;
+
+ /* get info for this PHY */
+ curphy = get_phy_info(dev);
+
+ if (curphy == NULL) {
+ printk(KERN_ERR "%s: No PHY found\n", dev->name);
+ return -1;
+ }
+
+ priv->phyinfo = curphy;
+
+ /* Run the commands which configure the PHY */
+ phy_run_commands(dev, curphy->config);
+
+ return 0;
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+
+ /* Init hash registers to zero */
+ gfar_write(&priv->regs->iaddr0, 0);
+ gfar_write(&priv->regs->iaddr1, 0);
+ gfar_write(&priv->regs->iaddr2, 0);
+ gfar_write(&priv->regs->iaddr3, 0);
+ gfar_write(&priv->regs->iaddr4, 0);
+ gfar_write(&priv->regs->iaddr5, 0);
+ gfar_write(&priv->regs->iaddr6, 0);
+ gfar_write(&priv->regs->iaddr7, 0);
+
+ gfar_write(&priv->regs->gaddr0, 0);
+ gfar_write(&priv->regs->gaddr1, 0);
+ gfar_write(&priv->regs->gaddr2, 0);
+ gfar_write(&priv->regs->gaddr3, 0);
+ gfar_write(&priv->regs->gaddr4, 0);
+ gfar_write(&priv->regs->gaddr5, 0);
+ gfar_write(&priv->regs->gaddr6, 0);
+ gfar_write(&priv->regs->gaddr7, 0);
+
+ /* Zero out rctrl */
+ gfar_write(&priv->regs->rctrl, 0x00000000);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->einfo->flags & GFAR_HAS_RMON) {
+ memset((void *) &(priv->regs->rmon), 0,
+ sizeof (struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
+ gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+ }
+
+ /* Initialize the max receive buffer length */
+ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+
+#ifdef CONFIG_GFAR_BUFSTASH
+ /* If we are stashing buffers, we need to set the
+ * extraction length to the size of the buffer */
+ gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
+#endif
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
+
+ /* Setup Attributes so that snooping is on for rx */
+ gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
+ gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
+
+ /* Assign the TBI an address which won't conflict with the PHYs */
+ gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ unsigned long flags;
+ u32 tempval;
+
+ /* Lock it down */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Tell the kernel the link is down */
+ priv->link = 0;
+ adjust_link(dev);
+
+ /* Mask all interrupts */
+ gfar_write(®s->imask, IMASK_INIT_CLEAR);
+
+ /* Clear all interrupts */
+ gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
+
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
+ != (DMACTRL_GRS | DMACTRL_GTS)) {
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+ }
+
+ /* Disable Rx and Tx */
+ tempval = gfar_read(®s->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(®s->maccfg1, tempval);
+
+ if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
+ phy_run_commands(dev, priv->phyinfo->shutdown);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Free the IRQs */
+ if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
+ free_irq(priv->einfo->interruptError, dev);
+ free_irq(priv->einfo->interruptTransmit, dev);
+ free_irq(priv->einfo->interruptReceive, dev);
+ } else {
+ free_irq(priv->einfo->interruptTransmit, dev);
+ }
+
+ if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
+ free_irq(priv->einfo->interruptPHY, dev);
+ } else {
+ del_timer_sync(&priv->phy_info_timer);
+ }
+
+ free_skb_resources(priv);
+
+ dma_unmap_single(NULL, gfar_read(®s->tbase),
+ sizeof(struct txbd)*priv->tx_ring_size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(NULL, gfar_read(®s->rbase),
+ sizeof(struct rxbd)*priv->rx_ring_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Free the buffer descriptors */
+ kfree(priv->tx_bd_base);
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff */
+void free_skb_resources(struct gfar_private *priv)
+{
+ struct rxbd8 *rxbdp;
+ struct txbd8 *txbdp;
+ int i;
+
+ /* Go through all the buffer descriptors and free their data buffers */
+ txbdp = priv->tx_bd_base;
+
+ for (i = 0; i < priv->tx_ring_size; i++) {
+
+ if (priv->tx_skbuff[i]) {
+ dma_unmap_single(NULL, txbdp->bufPtr,
+ txbdp->length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+ }
+
+ kfree(priv->tx_skbuff);
+
+ rxbdp = priv->rx_bd_base;
+
+ /* rx_skbuff is not guaranteed to be allocated, so only
+ * free it and its contents if it is allocated */
+ if(priv->rx_skbuff != NULL) {
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(NULL, rxbdp->bufPtr,
+ priv->rx_buffer_size
+ + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+
+ rxbdp->status = 0;
+ rxbdp->length = 0;
+ rxbdp->bufPtr = 0;
+
+ rxbdp++;
+ }
+
+ kfree(priv->rx_skbuff);
+ }
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *dev)
+{
+ struct txbd8 *txbdp;
+ struct rxbd8 *rxbdp;
+ unsigned long addr;
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+ int err = 0;
+
+ gfar_write(®s->imask, IMASK_INIT_CLEAR);
+
+ /* Allocate memory for the buffer descriptors */
+ addr =
+ (unsigned int) kmalloc(sizeof (struct txbd8) * priv->tx_ring_size +
+ sizeof (struct rxbd8) * priv->rx_ring_size,
+ GFP_KERNEL);
+
+ if (addr == 0) {
+ printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
+ dev->name);
+ return -ENOMEM;
+ }
+
+ priv->tx_bd_base = (struct txbd8 *) addr;
+
+ /* enet DMA only understands physical addresses */
+ gfar_write(®s->tbase,
+ dma_map_single(NULL, (void *)addr,
+ sizeof(struct txbd8) * priv->tx_ring_size,
+ DMA_BIDIRECTIONAL));
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
+ priv->rx_bd_base = (struct rxbd8 *) addr;
+ gfar_write(®s->rbase,
+ dma_map_single(NULL, (void *)addr,
+ sizeof(struct rxbd8) * priv->rx_ring_size,
+ DMA_BIDIRECTIONAL));
+
+ /* Setup the skbuff rings */
+ priv->tx_skbuff =
+ (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
+ priv->tx_ring_size, GFP_KERNEL);
+
+ if (priv->tx_skbuff == NULL) {
+ printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
+ dev->name);
+ err = -ENOMEM;
+ goto tx_skb_fail;
+ }
+
+ for (i = 0; i < priv->tx_ring_size; i++)
+ priv->tx_skbuff[i] = NULL;
+
+ priv->rx_skbuff =
+ (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
+ priv->rx_ring_size, GFP_KERNEL);
+
+ if (priv->rx_skbuff == NULL) {
+ printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
+ dev->name);
+ err = -ENOMEM;
+ goto rx_skb_fail;
+ }
+
+ for (i = 0; i < priv->rx_ring_size; i++)
+ priv->rx_skbuff[i] = NULL;
+
+ /* Initialize some variables in our dev structure */
+ priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
+ priv->cur_rx = priv->rx_bd_base;
+ priv->skb_curtx = priv->skb_dirtytx = 0;
+ priv->skb_currx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = priv->tx_bd_base;
+ for (i = 0; i < priv->tx_ring_size; i++) {
+ txbdp->status = 0;
+ txbdp->length = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status |= TXBD_WRAP;
+
+ rxbdp = priv->rx_bd_base;
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct sk_buff *skb = NULL;
+
+ rxbdp->status = 0;
+
+ skb = gfar_new_skb(dev, rxbdp);
+
+ priv->rx_skbuff[i] = skb;
+
+ rxbdp++;
+ }
+
+ /* Set the last descriptor in the ring to wrap */
+ rxbdp--;
+ rxbdp->status |= RXBD_WRAP;
+
+ /* If the device has multiple interrupts, register for
+ * them. Otherwise, only register for the one */
+ if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
+ /* Install our interrupt handlers for Error,
+ * Transmit, and Receive */
+ if (request_irq(priv->einfo->interruptError, gfar_error,
+ SA_SHIRQ, "enet_error", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->einfo->interruptError);
+
+ err = -1;
+ goto err_irq_fail;
+ }
+
+ if (request_irq(priv->einfo->interruptTransmit, gfar_transmit,
+ SA_SHIRQ, "enet_tx", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->einfo->interruptTransmit);
+
+ err = -1;
+
+ goto tx_irq_fail;
+ }
+
+ if (request_irq(priv->einfo->interruptReceive, gfar_receive,
+ SA_SHIRQ, "enet_rx", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
+ dev->name, priv->einfo->interruptReceive);
+
+ err = -1;
+ goto rx_irq_fail;
+ }
+ } else {
+ if (request_irq(priv->einfo->interruptTransmit, gfar_interrupt,
+ SA_SHIRQ, "gfar_interrupt", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d\n",
+ dev->name, priv->einfo->interruptError);
+
+ err = -1;
+ goto err_irq_fail;
+ }
+ }
+
+ /* Grab the PHY interrupt */
+ if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
+ if (request_irq(priv->einfo->interruptPHY, phy_interrupt,
+ SA_SHIRQ, "phy_interrupt", dev) < 0) {
+ printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
+ dev->name, priv->einfo->interruptPHY);
+
+ err = -1;
+
+ if (priv->einfo->flags & GFAR_HAS_MULTI_INTR)
+ goto phy_irq_fail;
+ else
+ goto tx_irq_fail;
+ }
+ } else {
+ init_timer(&priv->phy_info_timer);
+ priv->phy_info_timer.function = &gfar_phy_timer;
+ priv->phy_info_timer.data = (unsigned long) dev;
+ mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
+ }
+
+ /* Set up the bottom half queue */
+ INIT_WORK(&priv->tq, (void (*)(void *))gfar_phy_change, dev);
+
+ /* Configure the PHY interrupt */
+ phy_run_commands(dev, priv->phyinfo->startup);
+
+ /* Tell the kernel the link is up, and determine the
+ * negotiated features (speed, duplex) */
+ adjust_link(dev);
+
+ if (priv->link == 0)
+ printk(KERN_INFO "%s: No link detected\n", dev->name);
+
+ /* Configure the coalescing support */
+ if (priv->txcoalescing)
+ gfar_write(®s->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(®s->txic, 0);
+
+ if (priv->rxcoalescing)
+ gfar_write(®s->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(®s->rxic, 0);
+
+ init_waitqueue_head(&priv->rxcleanupq);
+
+ /* Enable Rx and Tx in MACCFG1 */
+ tempval = gfar_read(®s->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(®s->maccfg1, tempval);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ /* Clear THLT, so that the DMA starts polling now */
+ gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ /* Unmask the interrupts we look for */
+ gfar_write(®s->imask, IMASK_DEFAULT);
+
+ return 0;
+
+phy_irq_fail:
+ free_irq(priv->einfo->interruptReceive, dev);
+rx_irq_fail:
+ free_irq(priv->einfo->interruptTransmit, dev);
+tx_irq_fail:
+ free_irq(priv->einfo->interruptError, dev);
+err_irq_fail:
+rx_skb_fail:
+ free_skb_resources(priv);
+tx_skb_fail:
+ kfree(priv->tx_bd_base);
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int gfar_enet_open(struct net_device *dev)
+{
+ int err;
+
+ /* Initialize a bunch of registers */
+ init_registers(dev);
+
+ gfar_set_mac_address(dev);
+
+ err = init_phy(dev);
+
+ if (err)
+ return err;
+
+ err = startup_gfar(dev);
+
+ netif_start_queue(dev);
+
+ return err;
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *txbdp;
+
+ /* Update transmit stats */
+ priv->stats.tx_bytes += skb->len;
+
+ /* Lock priv now */
+ spin_lock_irq(&priv->lock);
+
+ /* Point at the first free tx descriptor */
+ txbdp = priv->cur_tx;
+
+ /* Clear all but the WRAP status flags */
+ txbdp->status &= TXBD_WRAP;
+
+ /* Set buffer length and pointer */
+ txbdp->length = skb->len;
+ txbdp->bufPtr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[priv->skb_curtx] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ priv->skb_curtx =
+ (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ /* Flag the BD as interrupt-causing */
+ txbdp->status |= TXBD_INTERRUPT;
+
+ /* Flag the BD as ready to go, last in frame, and */
+ /* in need of CRC */
+ txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
+
+ dev->trans_start = jiffies;
+
+ /* If this was the last BD in the ring, the next one */
+ /* is at the beginning of the ring */
+ if (txbdp->status & TXBD_WRAP)
+ txbdp = priv->tx_bd_base;
+ else
+ txbdp++;
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (txbdp == priv->dirty_tx) {
+ netif_stop_queue(dev);
+
+ priv->stats.tx_fifo_errors++;
+ }
+
+ /* Update the current txbd to the next one */
+ priv->cur_tx = txbdp;
+
+ /* Tell the DMA to go go go */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+
+ /* Unlock priv */
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+ stop_gfar(dev);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/* returns a net_device_stats structure pointer */
+static struct net_device_stats * gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return &(priv->stats);
+}
+
+/* Changes the mac address if the controller is not running. */
+int gfar_set_mac_address(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i;
+ char tmpbuf[MAC_ADDR_LEN];
+ u32 tempval;
+
+ /* Now copy it into the mac registers backwards, cuz */
+ /* little endian is silly */
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
+
+ gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
+
+ tempval = *((u32 *) (tmpbuf + 4));
+
+ gfar_write(&priv->regs->macstnaddr2, tempval);
+
+ return 0;
+}
+
+/**********************************************************************
+ * gfar_accept_fastpath
+ *
+ * Used to authenticate to the kernel that a fast path entry can be
+ * added to device's routing table cache
+ *
+ * Input : pointer to ethernet interface network device structure and
+ * a pointer to the designated entry to be added to the cache.
+ * Output : zero upon success, negative upon failure
+ **********************************************************************/
+#ifdef CONFIG_NET_FASTROUTE
+static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
+{
+ struct net_device *odev = dst->dev;
+
+ if ((dst->ops->protocol != __constant_htons(ETH_P_IP))
+ || (odev->type != ARPHRD_ETHER)
+ || (odev->accept_fastpath == NULL)) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+/* try_fastroute() -- Checks the fastroute cache to see if a given packet
+ * can be routed immediately to another device. If it can, we send it.
+ * If we used a fastroute, we return 1. Otherwise, we return 0.
+ * Returns 0 if CONFIG_NET_FASTROUTE is not on
+ */
+static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length)
+{
+#ifdef CONFIG_NET_FASTROUTE
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ unsigned int hash;
+ struct rtable *rt;
+ struct net_device *odev;
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned int CPU_ID = smp_processor_id();
+
+ eth = (struct ethhdr *) (skb->data);
+
+ /* Only route ethernet IP packets */
+ if (eth->h_proto == __constant_htons(ETH_P_IP)) {
+ iph = (struct iphdr *) (skb->data + ETH_HLEN);
+
+ /* Generate the hash value */
+ hash = ((*(u8 *) &iph->daddr) ^ (*(u8 *) & iph->saddr)) & NETDEV_FASTROUTE_HMASK;
+
+ rt = (struct rtable *) (dev->fastpath[hash]);
+ if (rt != NULL
+ && ((*(u32 *) &iph->daddr) == (*(u32 *) &rt->key.dst))
+ && ((*(u32 *) &iph->saddr) == (*(u32 *) &rt->key.src))
+ && !(rt->u.dst.obsolete)) {
+ odev = rt->u.dst.dev;
+ netdev_rx_stat[CPU_ID].fastroute_hit++;
+
+ /* Make sure the packet is:
+ * 1) IPv4
+ * 2) without any options (header length of 5)
+ * 3) Not a multicast packet
+ * 4) going to a valid destination
+ * 5) Not out of time-to-live
+ */
+ if (iph->version == 4
+ && iph->ihl == 5
+ && (!(eth->h_dest[0] & 0x01))
+ && neigh_is_valid(rt->u.dst.neighbour)
+ && iph->ttl > 1) {
+
+ /* Fast Route Path: Taken if the outgoing device is ready to transmit the packet now */
+ if ((!netif_queue_stopped(odev))
+ && (!spin_is_locked(odev->xmit_lock))
+ && (skb->len <= (odev->mtu + ETH_HLEN + 2 + 4))) {
+
+ skb->pkt_type = PACKET_FASTROUTE;
+ skb->protocol = __constant_htons(ETH_P_IP);
+ ip_decrease_ttl(iph);
+ memcpy(eth->h_source, odev->dev_addr, MAC_ADDR_LEN);
+ memcpy(eth->h_dest, rt->u.dst.neighbour->ha, MAC_ADDR_LEN);
+ skb->dev = odev;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ if (odev->hard_start_xmit(skb, odev) != 0) {
+ panic("%s: FastRoute path corrupted", dev->name);
+ }
+ netdev_rx_stat[CPU_ID].fastroute_success++;
+ }
+
+ /* Semi Fast Route Path: Mark the packet as needing fast routing, but let the
+ * stack handle getting it to the device */
+ else {
+ skb->pkt_type = PACKET_FASTROUTE;
+ skb->nh.raw = skb->data + ETH_HLEN;
+ skb->protocol = __constant_htons(ETH_P_IP);
+ netdev_rx_stat[CPU_ID].fastroute_defer++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ if(RECEIVE(skb) == NET_RX_DROP) {
+ priv->extra_stats.kernel_dropped++;
+ }
+ }
+
+ return 1;
+ }
+ }
+ }
+#endif /* CONFIG_NET_FASTROUTE */
+ return 0;
+}
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int tempsize, tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ int oldsize = priv->rx_buffer_size;
+ int frame_size = new_mtu + 18;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name);
+ return -EINVAL;
+ }
+
+ tempsize =
+ (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ /* Only stop and start the controller if it isn't already
+ * stopped */
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ stop_gfar(dev);
+
+ priv->rx_buffer_size = tempsize;
+
+ dev->mtu = new_mtu;
+
+ gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length */
+ tempval = gfar_read(&priv->regs->maccfg2);
+
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
+ tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ else
+ tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+
+ gfar_write(&priv->regs->maccfg2, tempval);
+
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ startup_gfar(dev);
+
+ return 0;
+}
+
+/* gfar_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem. */
+static void gfar_timeout(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ priv->stats.tx_errors++;
+
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ startup_gfar(dev);
+ }
+
+ if (!netif_queue_stopped(dev))
+ netif_schedule(dev);
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp;
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
+
+ /* Lock priv */
+ spin_lock(&priv->lock);
+ bdp = priv->dirty_tx;
+ while ((bdp->status & TXBD_READY) == 0) {
+ /* If dirty_tx and cur_tx are the same, then either the */
+ /* ring is empty or full now (it could only be full in the beginning, */
+ /* obviously). If it is empty, we are done. */
+ if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
+ break;
+
+ priv->stats.tx_packets++;
+
+ /* Deferred means some collisions occurred during transmit, */
+ /* but we eventually sent the packet. */
+ if (bdp->status & TXBD_DEF)
+ priv->stats.collisions++;
+
+ /* Free the sk buffer associated with this TxBD */
+ dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+ priv->tx_skbuff[priv->skb_dirtytx] = NULL;
+ priv->skb_dirtytx =
+ (priv->skb_dirtytx +
+ 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ /* update bdp to point at next bd in the ring (wrapping if necessary) */
+ if (bdp->status & TXBD_WRAP)
+ bdp = priv->tx_bd_base;
+ else
+ bdp++;
+
+ /* Move dirty_tx to be the next bd */
+ priv->dirty_tx = bdp;
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } /* while ((bdp->status & TXBD_READY) == 0) */
+
+ /* If we are coalescing the interrupts, reset the timer */
+ /* Otherwise, clear it */
+ if (priv->txcoalescing)
+ gfar_write(&priv->regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&priv->regs->txic, 0);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+ unsigned int timeout = SKB_ALLOC_TIMEOUT;
+
+ /* We have to allocate the skb, so keep trying till we succeed */
+ while ((!skb) && timeout--)
+ skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
+
+ if (skb == NULL)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ RXBUF_ALIGNMENT -
+ (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
+
+ skb->dev = dev;
+
+ bdp->bufPtr = dma_map_single(NULL, skb->data,
+ priv->rx_buffer_size + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ bdp->length = 0;
+
+ /* Mark the buffer empty */
+ bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
+
+ return skb;
+}
+
+static inline void count_errors(unsigned short status, struct gfar_private *priv)
+{
+ struct net_device_stats *stats = &priv->stats;
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ /* If the packet was truncated, none of the other errors
+ * matter */
+ if (status & RXBD_TRUNCATED) {
+ stats->rx_length_errors++;
+
+ estats->rx_trunc++;
+
+ return;
+ }
+ /* Count the errors, if there were any */
+ if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ stats->rx_length_errors++;
+
+ if (status & RXBD_LARGE)
+ estats->rx_large++;
+ else
+ estats->rx_short++;
+ }
+ if (status & RXBD_NONOCTET) {
+ stats->rx_frame_errors++;
+ estats->rx_nonoctet++;
+ }
+ if (status & RXBD_CRCERR) {
+ estats->rx_crcerr++;
+ stats->rx_crc_errors++;
+ }
+ if (status & RXBD_OVERRUN) {
+ estats->rx_overrun++;
+ stats->rx_crc_errors++;
+ }
+}
+
+irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+#ifdef CONFIG_GFAR_NAPI
+ u32 tempval;
+#endif
+
+ /* Clear IEVENT, so rx interrupt isn't called again
+ * because of this interrupt */
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
+
+ /* support NAPI */
+#ifdef CONFIG_GFAR_NAPI
+ if (netif_rx_schedule_prep(dev)) {
+ tempval = gfar_read(&priv->regs->imask);
+ tempval &= IMASK_RX_DISABLED;
+ gfar_write(&priv->regs->imask, tempval);
+
+ __netif_rx_schedule(dev);
+ } else {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
+ dev->name, gfar_read(priv->regs->ievent),
+ gfar_read(priv->regs->imask));
+#endif
+ }
+#else
+
+ spin_lock(&priv->lock);
+ gfar_clean_rx_ring(dev);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ /* Just in case we need to wake the ring param changer */
+ priv->rxclean = 1;
+
+ spin_unlock(&priv->lock);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+
+/* gfar_process_frame() -- handle one incoming packet if skb
+ * isn't NULL. Try the fastroute before using the stack */
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int length)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (skb == NULL) {
+#ifdef BRIEF_GFAR_ERRORS
+ printk(KERN_WARNING "%s: Missing skb!!.\n",
+ dev->name);
+#endif
+ priv->stats.rx_dropped++;
+ priv->extra_stats.rx_skbmissing++;
+ } else {
+ if(try_fastroute(skb, dev, length) == 0) {
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Send the packet up the stack */
+ if (RECEIVE(skb) == NET_RX_DROP) {
+ priv->extra_stats.kernel_dropped++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until all are gone (or, in the case of NAPI, the budget/quota
+ * has been reached). Returns the number of frames handled
+ */
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+#else
+static int gfar_clean_rx_ring(struct net_device *dev)
+#endif
+{
+ struct rxbd8 *bdp;
+ struct sk_buff *skb;
+ u16 pkt_len;
+ int howmany = 0;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Get the first full descriptor */
+ bdp = priv->cur_rx;
+
+#ifdef CONFIG_GFAR_NAPI
+#define GFAR_RXDONE() ((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))
+#else
+#define GFAR_RXDONE() (bdp->status & RXBD_EMPTY)
+#endif
+ while (!GFAR_RXDONE()) {
+ skb = priv->rx_skbuff[priv->skb_currx];
+
+ if (!(bdp->status &
+ (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
+ | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
+ /* Increment the number of packets */
+ priv->stats.rx_packets++;
+ howmany++;
+
+ /* Remove the FCS from the packet length */
+ pkt_len = bdp->length - 4;
+
+ gfar_process_frame(dev, skb, pkt_len);
+
+ priv->stats.rx_bytes += pkt_len;
+
+ } else {
+ count_errors(bdp->status, priv);
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ priv->rx_skbuff[priv->skb_currx] = NULL;
+ }
+
+ dev->last_rx = jiffies;
+
+ /* Clear the status flags for this buffer */
+ bdp->status &= ~RXBD_STATS;
+
+ /* Add another skb for the future */
+ skb = gfar_new_skb(dev, bdp);
+ priv->rx_skbuff[priv->skb_currx] = skb;
+
+ /* Update to the next pointer */
+ if (bdp->status & RXBD_WRAP)
+ bdp = priv->rx_bd_base;
+ else
+ bdp++;
+
+ /* update to point at the next skb */
+ priv->skb_currx =
+ (priv->skb_currx +
+ 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
+
+ }
+
+ /* Update the current rxbd pointer to be the next one */
+ priv->cur_rx = bdp;
+
+ /* If no packets have arrived since the
+ * last one we processed, clear the IEVENT RX and
+ * BSY bits so that another interrupt won't be
+ * generated when we set IMASK */
+ if (bdp->status & RXBD_EMPTY)
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
+
+ return howmany;
+}
+
+#ifdef CONFIG_GFAR_NAPI
+static int gfar_poll(struct net_device *dev, int *budget)
+{
+ int howmany;
+ struct gfar_private *priv = netdev_priv(dev);
+ int rx_work_limit = *budget;
+
+ if (rx_work_limit > dev->quota)
+ rx_work_limit = dev->quota;
+
+ spin_lock(&priv->lock);
+ howmany = gfar_clean_rx_ring(dev, rx_work_limit);
+
+ dev->quota -= howmany;
+ rx_work_limit -= howmany;
+ *budget -= howmany;
+
+ if (rx_work_limit >= 0) {
+ netif_rx_complete(dev);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+
+ gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ /* Signal to the ring size changer that it's safe to go */
+ priv->rxclean = 1;
+ }
+
+ spin_unlock(priv->lock);
+
+ return (rx_work_limit < 0) ? 1 : 0;
+}
+#endif
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&priv->regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, events);
+
+ /* Check for reception */
+ if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
+ gfar_receive(irq, dev_id, regs);
+
+ /* Check for transmit completion */
+ if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
+ gfar_transmit(irq, dev_id, regs);
+
+ /* Update error statistics */
+ if (events & IEVENT_TXE) {
+ priv->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ priv->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ priv->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_WARNING "%s: tx underrun. dropped packet\n",
+ dev->name);
+#endif
+ priv->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+ }
+ }
+ if (events & IEVENT_BSY) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, dev_id, regs);
+
+#ifndef CONFIG_GFAR_NAPI
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+#endif
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
+ gfar_read(priv->regs->rstat));
+#endif
+ }
+ if (events & IEVENT_BABR) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babbling error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: EBERR\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_RXC) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: control frame\n", dev->name);
+#endif
+ }
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babt error\n", dev->name);
+#endif
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Run the commands which acknowledge the interrupt */
+ phy_run_commands(dev, priv->phyinfo->ack_int);
+
+ /* Schedule the bottom half */
+ schedule_work(&priv->tq);
+
+ return IRQ_HANDLED;
+}
+
+/* Scheduled by the phy_interrupt/timer to handle PHY changes */
+static void gfar_phy_change(void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct gfar_private *priv = netdev_priv(dev);
+ int timeout = HZ / 1000 + 1;
+
+ /* Delay to give the PHY a chance to change the
+ * register state */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ /* Run the commands which check the link state */
+ phy_run_commands(dev, priv->phyinfo->handle_int);
+
+ /* React to the change in state */
+ adjust_link(dev);
+}
+
+/* Called every so often on systems that don't interrupt
+ * the core for PHY changes */
+static void gfar_phy_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tq);
+
+ mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the priv structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+
+ if (priv->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (priv->duplexity != priv->olddplx) {
+ if (!(priv->duplexity)) {
+ tempval = gfar_read(®s->maccfg2);
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ gfar_write(®s->maccfg2, tempval);
+
+ printk(KERN_INFO "%s: Half Duplex\n",
+ dev->name);
+ } else {
+ tempval = gfar_read(®s->maccfg2);
+ tempval |= MACCFG2_FULL_DUPLEX;
+ gfar_write(®s->maccfg2, tempval);
+
+ printk(KERN_INFO "%s: Full Duplex\n",
+ dev->name);
+ }
+
+ priv->olddplx = priv->duplexity;
+ }
+
+ if (priv->speed != priv->oldspeed) {
+ switch (priv->speed) {
+ case 1000:
+ tempval = gfar_read(®s->maccfg2);
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+ gfar_write(®s->maccfg2, tempval);
+ break;
+ case 100:
+ case 10:
+ tempval = gfar_read(®s->maccfg2);
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+ gfar_write(®s->maccfg2, tempval);
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Ack! Speed (%d) is not 10/100/1000!\n",
+ dev->name, priv->speed);
+ break;
+ }
+
+ printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
+ priv->speed);
+
+ priv->oldspeed = priv->speed;
+ }
+
+ if (!priv->oldlink) {
+ printk(KERN_INFO "%s: Link is up\n", dev->name);
+ priv->oldlink = 1;
+ netif_carrier_on(dev);
+ netif_schedule(dev);
+ }
+ } else {
+ if (priv->oldlink) {
+ printk(KERN_INFO "%s: Link is down\n", dev->name);
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->olddplx = -1;
+ netif_carrier_off(dev);
+ }
+ }
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_INFO "%s: Link now %s; %dBT %s-duplex\n",
+ dev->name, priv->link ? "up" : "down", priv->speed, priv->duplexity ? "full" : "half");
+#endif
+}
+
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to. Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed */
+static void gfar_set_multi(struct net_device *dev)
+{
+ struct dev_mc_list *mc_ptr;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 tempval;
+
+ if(dev->flags & IFF_PROMISC) {
+ printk(KERN_INFO "%s: Entering promiscuous mode.\n",
+ dev->name);
+ /* Set RCTRL to PROM */
+ tempval = gfar_read(®s->rctrl);
+ tempval |= RCTRL_PROM;
+ gfar_write(®s->rctrl, tempval);
+ } else {
+ /* Set RCTRL to not PROM */
+ tempval = gfar_read(®s->rctrl);
+ tempval &= ~(RCTRL_PROM);
+ gfar_write(®s->rctrl, tempval);
+ }
+
+ if(dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ gfar_write(®s->gaddr0, 0xffffffff);
+ gfar_write(®s->gaddr1, 0xffffffff);
+ gfar_write(®s->gaddr2, 0xffffffff);
+ gfar_write(®s->gaddr3, 0xffffffff);
+ gfar_write(®s->gaddr4, 0xffffffff);
+ gfar_write(®s->gaddr5, 0xffffffff);
+ gfar_write(®s->gaddr6, 0xffffffff);
+ gfar_write(®s->gaddr7, 0xffffffff);
+ } else {
+ /* zero out the hash */
+ gfar_write(®s->gaddr0, 0x0);
+ gfar_write(®s->gaddr1, 0x0);
+ gfar_write(®s->gaddr2, 0x0);
+ gfar_write(®s->gaddr3, 0x0);
+ gfar_write(®s->gaddr4, 0x0);
+ gfar_write(®s->gaddr5, 0x0);
+ gfar_write(®s->gaddr6, 0x0);
+ gfar_write(®s->gaddr7, 0x0);
+
+ if(dev->mc_count == 0)
+ return;
+
+ /* Parse the list, and set the appropriate bits */
+ for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ }
+ }
+
+ return;
+}
+
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table. The table is controlled through 8 32-bit registers:
+ * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7. This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry. */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar *regs = priv->regs;
+ u32 *hash = ®s->gaddr0;
+ u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u8 whichreg = ((result >> 29) & 0x7);
+ u8 whichbit = ((result >> 24) & 0x1f);
+ u32 value = (1 << (31-whichbit));
+
+ tempval = gfar_read(&hash[whichreg]);
+ tempval |= value;
+ gfar_write(&hash[whichreg], tempval);
+
+ return;
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&priv->regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
+
+ /* Hmm... */
+#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS)
+ printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ dev->name, events, gfar_read(priv->regs->imask));
+#endif
+
+ /* Update the error counters */
+ if (events & IEVENT_TXE) {
+ priv->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ priv->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ priv->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
+ dev->name);
+#endif
+ priv->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+ }
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_BSY) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, dev_id, regs);
+
+#ifndef CONFIG_GFAR_NAPI
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+#endif
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
+ gfar_read(priv->regs->rstat));
+#endif
+ }
+ if (events & IEVENT_BABR) {
+ priv->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babbling error\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: EBERR\n", dev->name);
+#endif
+ }
+ if (events & IEVENT_RXC)
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: control frame\n", dev->name);
+#endif
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+#ifdef VERBOSE_GFAR_ERRORS
+ printk(KERN_DEBUG "%s: babt error\n", dev->name);
+#endif
+ }
+ return IRQ_HANDLED;
+}
+
+/* Structure for a device driver */
+static struct ocp_device_id gfar_ids[] = {
+ {.vendor = OCP_ANY_ID,.function = OCP_FUNC_GFAR},
+ {.vendor = OCP_VENDOR_INVALID}
+};
+
+static struct ocp_driver gfar_driver = {
+ .name = "gianfar",
+ .id_table = gfar_ids,
+
+ .probe = gfar_probe,
+ .remove = gfar_remove,
+};
+
+static int __init gfar_init(void)
+{
+ int rc;
+
+ rc = ocp_register_driver(&gfar_driver);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ if (rc != 0) {
+#else
+ if (rc == 0) {
+#endif
+ ocp_unregister_driver(&gfar_driver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit gfar_exit(void)
+{
+ ocp_unregister_driver(&gfar_driver);
+}
+
+module_init(gfar_init);
+module_exit(gfar_exit);
--- /dev/null
+/*
+ * drivers/net/gianfar.h
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Still left to do:
+ * -Add support for module parameters
+ */
+#ifndef __GIANFAR_H
+#define __GIANFAR_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#define schedule_work schedule_task
+#endif
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/ocp.h>
+#include "gianfar_phy.h"
+
+/* The maximum number of packets to be handled in one call of gfar_poll */
+#define GFAR_DEV_WEIGHT 64
+
+/* Number of bytes to align the rx bufs to */
+#define RXBUF_ALIGNMENT 64
+
+/* The number of bytes which composes a unit for the purpose of
+ * allocating data buffers. ie-for any given MTU, the data buffer
+ * will be the next highest multiple of 512 bytes. */
+#define INCREMENTAL_BUFFER_SIZE 512
+
+
+#define MAC_ADDR_LEN 6
+
+extern char gfar_driver_name[];
+extern char gfar_driver_version[];
+
+/* These need to be powers of 2 for this driver */
+#ifdef CONFIG_GFAR_NAPI
+#define DEFAULT_TX_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+#else
+#define DEFAULT_TX_RING_SIZE 64
+#define DEFAULT_RX_RING_SIZE 64
+#endif
+
+#define GFAR_RX_MAX_RING_SIZE 256
+#define GFAR_TX_MAX_RING_SIZE 256
+
+#define DEFAULT_RX_BUFFER_SIZE 1536
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+#define JUMBO_BUFFER_SIZE 9728
+#define JUMBO_FRAME_SIZE 9600
+
+/* Latency of interface clock in nanoseconds */
+/* Interface clock latency , in this case, means the
+ * time described by a value of 1 in the interrupt
+ * coalescing registers' time fields. Since those fields
+ * refer to the time it takes for 64 clocks to pass, the
+ * latencies are as such:
+ * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
+ * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
+ * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
+ */
+#define GFAR_GBIT_TIME 512
+#define GFAR_100_TIME 2560
+#define GFAR_10_TIME 25600
+
+#define DEFAULT_TXCOUNT 16
+#define DEFAULT_TXTIME 32768
+
+#define DEFAULT_RXCOUNT 16
+#define DEFAULT_RXTIME 32768
+
+#define TBIPA_VALUE 0x1f
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+#define MIIMIND_BUSY 0x00000001
+
+/* MAC register bits */
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RESET_RX_MC 0x00080000
+#define MACCFG1_RESET_TX_MC 0x00040000
+#define MACCFG1_RESET_RX_FUN 0x00020000
+#define MACCFG1_RESET_TX_FUN 0x00010000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_SYNCD_RX_EN 0x00000008
+#define MACCFG1_RX_EN 0x00000004
+#define MACCFG1_SYNCD_TX_EN 0x00000002
+#define MACCFG1_TX_EN 0x00000001
+
+#define MACCFG2_INIT_SETTINGS 0x00007205
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_IF 0x00000300
+#define MACCFG2_MII 0x00000100
+#define MACCFG2_GMII 0x00000200
+#define MACCFG2_HUGEFRAME 0x00000020
+#define MACCFG2_LENGTHCHECK 0x00000010
+
+#define ECNTRL_INIT_SETTINGS 0x00001000
+#define ECNTRL_TBI_MODE 0x00000020
+
+#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
+
+#define MINFLR_INIT_SETTINGS 0x00000040
+
+/* Init to do tx snooping for buffers and descriptors */
+#define DMACTRL_INIT_SETTINGS 0x000000c3
+#define DMACTRL_GRS 0x00000010
+#define DMACTRL_GTS 0x00000008
+
+#define TSTAT_CLEAR_THALT 0x80000000
+
+/* Interrupt coalescing macros */
+#define IC_ICEN 0x80000000
+#define IC_ICFT_MASK 0x1fe00000
+#define IC_ICFT_SHIFT 21
+#define mk_ic_icft(x) \
+ (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
+#define IC_ICTT_MASK 0x0000ffff
+#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
+
+#define mk_ic_value(count, time) (IC_ICEN | \
+ mk_ic_icft(count) | \
+ mk_ic_ictt(time))
+
+#define RCTRL_PROM 0x00000008
+#define RSTAT_CLEAR_RHALT 0x00800000
+
+#define IEVENT_INIT_CLEAR 0xffffffff
+#define IEVENT_BABR 0x80000000
+#define IEVENT_RXC 0x40000000
+#define IEVENT_BSY 0x20000000
+#define IEVENT_EBERR 0x10000000
+#define IEVENT_MSRO 0x04000000
+#define IEVENT_GTSC 0x02000000
+#define IEVENT_BABT 0x01000000
+#define IEVENT_TXC 0x00800000
+#define IEVENT_TXE 0x00400000
+#define IEVENT_TXB 0x00200000
+#define IEVENT_TXF 0x00100000
+#define IEVENT_LC 0x00040000
+#define IEVENT_CRL 0x00020000
+#define IEVENT_XFUN 0x00010000
+#define IEVENT_RXB0 0x00008000
+#define IEVENT_GRSC 0x00000100
+#define IEVENT_RXF0 0x00000080
+#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
+#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_ERR_MASK \
+(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
+ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
+ | IEVENT_CRL | IEVENT_XFUN)
+
+#define IMASK_INIT_CLEAR 0x00000000
+#define IMASK_BABR 0x80000000
+#define IMASK_RXC 0x40000000
+#define IMASK_BSY 0x20000000
+#define IMASK_EBERR 0x10000000
+#define IMASK_MSRO 0x04000000
+#define IMASK_GRSC 0x02000000
+#define IMASK_BABT 0x01000000
+#define IMASK_TXC 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_TXBEN 0x00200000
+#define IMASK_TXFEN 0x00100000
+#define IMASK_LC 0x00040000
+#define IMASK_CRL 0x00020000
+#define IMASK_XFUN 0x00010000
+#define IMASK_RXB0 0x00008000
+#define IMASK_GTSC 0x00000100
+#define IMASK_RXFEN0 0x00000080
+#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT)
+
+
+/* Attribute fields */
+
+/* This enables rx snooping for buffers and descriptors */
+#ifdef CONFIG_GFAR_BDSTASH
+#define ATTR_BDSTASH 0x00000800
+#else
+#define ATTR_BDSTASH 0x00000000
+#endif
+
+#ifdef CONFIG_GFAR_BUFSTASH
+#define ATTR_BUFSTASH 0x00004000
+#define STASH_LENGTH 64
+#else
+#define ATTR_BUFSTASH 0x00000000
+#endif
+
+#define ATTR_SNOOPING 0x000000c0
+#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \
+ | ATTR_BDSTASH | ATTR_BUFSTASH)
+
+#define ATTRELI_INIT_SETTINGS 0x0
+
+
+/* TxBD status field bits */
+#define TXBD_READY 0x8000
+#define TXBD_PADCRC 0x4000
+#define TXBD_WRAP 0x2000
+#define TXBD_INTERRUPT 0x1000
+#define TXBD_LAST 0x0800
+#define TXBD_CRC 0x0400
+#define TXBD_DEF 0x0200
+#define TXBD_HUGEFRAME 0x0080
+#define TXBD_LATECOLLISION 0x0080
+#define TXBD_RETRYLIMIT 0x0040
+#define TXBD_RETRYCOUNTMASK 0x003c
+#define TXBD_UNDERRUN 0x0002
+
+/* RxBD status field bits */
+#define RXBD_EMPTY 0x8000
+#define RXBD_RO1 0x4000
+#define RXBD_WRAP 0x2000
+#define RXBD_INTERRUPT 0x1000
+#define RXBD_LAST 0x0800
+#define RXBD_FIRST 0x0400
+#define RXBD_MISS 0x0100
+#define RXBD_BROADCAST 0x0080
+#define RXBD_MULTICAST 0x0040
+#define RXBD_LARGE 0x0020
+#define RXBD_NONOCTET 0x0010
+#define RXBD_SHORT 0x0008
+#define RXBD_CRCERR 0x0004
+#define RXBD_OVERRUN 0x0002
+#define RXBD_TRUNCATED 0x0001
+#define RXBD_STATS 0x01ff
+
+struct txbd8
+{
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer length */
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rxbd8
+{
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer Length */
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rmon_mib
+{
+ u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
+ u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
+ u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
+ u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
+ u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
+ u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
+ u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
+ u32 rbyt; /* 0x.69c - Receive Byte Counter */
+ u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
+ u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
+ u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
+ u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
+ u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
+ u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
+ u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
+ u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
+ u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
+ u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
+ u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
+ u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
+ u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
+ u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
+ u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
+ u32 rdrp; /* 0x.6dc - Receive Drop Counter */
+ u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
+ u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
+ u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
+ u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
+ u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
+ u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
+ u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
+ u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
+ u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
+ u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
+ u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
+ u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
+ u8 res1[4];
+ u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
+ u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
+ u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
+ u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
+ u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
+ u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
+ u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
+ u32 car1; /* 0x.730 - Carry Register One */
+ u32 car2; /* 0x.734 - Carry Register Two */
+ u32 cam1; /* 0x.738 - Carry Mask Register One */
+ u32 cam2; /* 0x.73c - Carry Mask Register Two */
+};
+
+struct gfar_extra_stats {
+ u64 kernel_dropped;
+ u64 rx_large;
+ u64 rx_short;
+ u64 rx_nonoctet;
+ u64 rx_crcerr;
+ u64 rx_overrun;
+ u64 rx_bsy;
+ u64 rx_babr;
+ u64 rx_trunc;
+ u64 eberr;
+ u64 tx_babt;
+ u64 tx_underrun;
+ u64 rx_skbmissing;
+ u64 tx_timeout;
+};
+
+#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
+#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+
+/* Number of stats in the stats structure (ignore car and cam regs)*/
+#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
+
+#define GFAR_INFOSTR_LEN 32
+
+struct gfar_stats {
+ u64 extra[GFAR_EXTRA_STATS_LEN];
+ u64 rmon[GFAR_RMON_LEN];
+};
+
+
+struct gfar {
+ u8 res1[16];
+ u32 ievent; /* 0x.010 - Interrupt Event Register */
+ u32 imask; /* 0x.014 - Interrupt Mask Register */
+ u32 edis; /* 0x.018 - Error Disabled Register */
+ u8 res2[4];
+ u32 ecntrl; /* 0x.020 - Ethernet Control Register */
+ u32 minflr; /* 0x.024 - Minimum Frame Length Register */
+ u32 ptv; /* 0x.028 - Pause Time Value Register */
+ u32 dmactrl; /* 0x.02c - DMA Control Register */
+ u32 tbipa; /* 0x.030 - TBI PHY Address Register */
+ u8 res3[88];
+ u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
+ u8 res4[8];
+ u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
+ u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
+ u8 res5[96];
+ u32 tctrl; /* 0x.100 - Transmit Control Register */
+ u32 tstat; /* 0x.104 - Transmit Status Register */
+ u8 res6[4];
+ u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
+ u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
+ u8 res7[16];
+ u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */
+ u8 res8[92];
+ u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */
+ u8 res9[124];
+ u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */
+ u8 res10[168];
+ u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */
+ u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */
+ u8 res11[72];
+ u32 rctrl; /* 0x.300 - Receive Control Register */
+ u32 rstat; /* 0x.304 - Receive Status Register */
+ u8 res12[4];
+ u32 rbdlen; /* 0x.30c - RxBD Data Length Register */
+ u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
+ u8 res13[16];
+ u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */
+ u8 res14[24];
+ u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
+ u8 res15[64];
+ u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */
+ u8 res16[124];
+ u32 rbase; /* 0x.404 - Receive Descriptor Base Address */
+ u8 res17[248];
+ u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
+ u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
+ u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
+ u32 hafdup; /* 0x.50c - Half Duplex Register */
+ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
+ u8 res18[12];
+ u32 miimcfg; /* 0x.520 - MII Management Configuration Register */
+ u32 miimcom; /* 0x.524 - MII Management Command Register */
+ u32 miimadd; /* 0x.528 - MII Management Address Register */
+ u32 miimcon; /* 0x.52c - MII Management Control Register */
+ u32 miimstat; /* 0x.530 - MII Management Status Register */
+ u32 miimind; /* 0x.534 - MII Management Indicator Register */
+ u8 res19[4];
+ u32 ifstat; /* 0x.53c - Interface Status Register */
+ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
+ u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
+ u8 res20[312];
+ struct rmon_mib rmon;
+ u8 res21[192];
+ u32 iaddr0; /* 0x.800 - Indivdual address register 0 */
+ u32 iaddr1; /* 0x.804 - Indivdual address register 1 */
+ u32 iaddr2; /* 0x.808 - Indivdual address register 2 */
+ u32 iaddr3; /* 0x.80c - Indivdual address register 3 */
+ u32 iaddr4; /* 0x.810 - Indivdual address register 4 */
+ u32 iaddr5; /* 0x.814 - Indivdual address register 5 */
+ u32 iaddr6; /* 0x.818 - Indivdual address register 6 */
+ u32 iaddr7; /* 0x.81c - Indivdual address register 7 */
+ u8 res22[96];
+ u32 gaddr0; /* 0x.880 - Global address register 0 */
+ u32 gaddr1; /* 0x.884 - Global address register 1 */
+ u32 gaddr2; /* 0x.888 - Global address register 2 */
+ u32 gaddr3; /* 0x.88c - Global address register 3 */
+ u32 gaddr4; /* 0x.890 - Global address register 4 */
+ u32 gaddr5; /* 0x.894 - Global address register 5 */
+ u32 gaddr6; /* 0x.898 - Global address register 6 */
+ u32 gaddr7; /* 0x.89c - Global address register 7 */
+ u8 res23[856];
+ u32 attr; /* 0x.bf8 - Attributes Register */
+ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
+ u8 res24[1024];
+
+};
+
+/* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblence)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+ * and tx_bd_base always point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct gfar_private
+{
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff ** tx_skbuff;
+ struct sk_buff ** rx_skbuff;
+
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx;
+ u16 skb_currx;
+
+ /* index of the first skb which hasn't been transmitted
+ * yet. */
+ u16 skb_dirtytx;
+
+ /* Configuration info for the coalescing features */
+ unsigned char txcoalescing;
+ unsigned short txcount;
+ unsigned short txtime;
+ unsigned char rxcoalescing;
+ unsigned short rxcount;
+ unsigned short rxtime;
+
+ /* GFAR addresses */
+ struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
+ struct txbd8 *tx_bd_base;
+ struct rxbd8 *cur_rx; /* Next free rx ring entry */
+ struct txbd8 *cur_tx; /* Next free ring entry */
+ struct txbd8 *dirty_tx; /* The Ring entry to be freed. */
+ struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
+ struct phy_info *phyinfo;
+ struct gfar *phyregs;
+ struct work_struct tq;
+ struct timer_list phy_info_timer;
+ struct net_device_stats stats; /* linux network statistics */
+ struct gfar_extra_stats extra_stats;
+ spinlock_t lock;
+ unsigned int rx_buffer_size;
+ unsigned int rx_stash_size;
+ unsigned int tx_ring_size;
+ unsigned int rx_ring_size;
+ wait_queue_head_t rxcleanupq;
+ unsigned int rxclean;
+ int link; /* current link state */
+ int oldlink;
+ int duplexity; /* Indicates negotiated duplex state */
+ int olddplx;
+ int speed; /* Indicates negotiated speed */
+ int oldspeed;
+
+ /* Info structure initialized by board setup code */
+ struct ocp_gfar_data *einfo;
+};
+
+extern inline u32 gfar_read(volatile unsigned *addr)
+{
+ u32 val;
+ val = in_be32(addr);
+ return val;
+}
+
+extern inline void gfar_write(volatile unsigned *addr, u32 val)
+{
+ out_be32(addr, val);
+}
+
+
+
+#endif /* __GIANFAR_H */
--- /dev/null
+/*
+ * drivers/net/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This software may be used and distributed according to
+ * the terms of the GNU Public License, Version 2, incorporated herein
+ * by reference.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <linux/ethtool.h>
+
+#include "gianfar.h"
+
+#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+extern int startup_gfar(struct net_device *dev);
+extern void stop_gfar(struct net_device *dev);
+extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+
+void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 * buf);
+void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
+int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+
+static char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "RX Dropped by Kernel",
+ "RX Large Frame Errors",
+ "RX Short Frame Errors",
+ "RX Non-Octet Errors",
+ "RX CRC Errors",
+ "RX Overrun Errors",
+ "RX Busy Errors",
+ "RX Babbling Errors",
+ "RX Truncated Frames",
+ "Ethernet Bus Error",
+ "TX Babbling Errors",
+ "TX Underrun Errors",
+ "RX SKB Missing Errors",
+ "TX Timeout Errors",
+ "tx&rx 64B frames",
+ "tx&rx 65-127B frames",
+ "tx&rx 128-255B frames",
+ "tx&rx 256-511B frames",
+ "tx&rx 512-1023B frames",
+ "tx&rx 1024-1518B frames",
+ "tx&rx 1519-1522B Good VLAN",
+ "RX bytes",
+ "RX Packets",
+ "RX FCS Errors",
+ "Receive Multicast Packet",
+ "Receive Broadcast Packet",
+ "RX Control Frame Packets",
+ "RX Pause Frame Packets",
+ "RX Unknown OP Code",
+ "RX Alignment Error",
+ "RX Frame Length Error",
+ "RX Code Error",
+ "RX Carrier Sense Error",
+ "RX Undersize Packets",
+ "RX Oversize Packets",
+ "RX Fragmented Frames",
+ "RX Jabber Frames",
+ "RX Dropped Frames",
+ "TX Byte Counter",
+ "TX Packets",
+ "TX Multicast Packets",
+ "TX Broadcast Packets",
+ "TX Pause Control Frames",
+ "TX Deferral Packets",
+ "TX Excessive Deferral Packets",
+ "TX Single Collision Packets",
+ "TX Multiple Collision Packets",
+ "TX Late Collision Packets",
+ "TX Excessive Collision Packets",
+ "TX Total Collision",
+ "RESERVED",
+ "TX Dropped Frames",
+ "TX Jabber Frames",
+ "TX FCS Errors",
+ "TX Control Frames",
+ "TX Oversize Frames",
+ "TX Undersize Frames",
+ "TX Fragmented Frames",
+};
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ u32 *rmon = (u32 *) & priv->regs->rmon;
+ u64 *extra = (u64 *) & priv->extra_stats;
+ struct gfar_stats *stats = (struct gfar_stats *) buf;
+
+ for (i = 0; i < GFAR_RMON_LEN; i++) {
+ stats->rmon[i] = (u64) (rmon[i]);
+ }
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
+ stats->extra[i] = extra[i];
+ }
+}
+
+/* Returns the number of stats (and their corresponding strings) */
+int gfar_stats_count(struct net_device *dev)
+{
+ return GFAR_STATS_LEN;
+}
+
+void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+void gfar_fill_stats_normon(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ u64 *extra = (u64 *) & priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) {
+ buf[i] = extra[i];
+ }
+}
+
+
+int gfar_stats_count_normon(struct net_device *dev)
+{
+ return GFAR_EXTRA_STATS_LEN;
+}
+/* Fills in the drvinfo structure with some basic info */
+void gfar_gdrvinfo(struct net_device *dev, struct
+ ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, gfar_driver_name, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ drvinfo->n_stats = GFAR_STATS_LEN;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+/* Return the current settings in the ethtool_cmd structure */
+int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ uint gigabit_support =
+ priv->einfo->flags & GFAR_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0;
+ uint gigabit_advert =
+ priv->einfo->flags & GFAR_HAS_GIGABIT ? ADVERTISED_1000baseT_Full: 0;
+
+ cmd->supported = (SUPPORTED_10baseT_Half
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | gigabit_support | SUPPORTED_Autoneg);
+
+ /* For now, we always advertise everything */
+ cmd->advertising = (ADVERTISED_10baseT_Half
+ | ADVERTISED_100baseT_Half
+ | ADVERTISED_100baseT_Full
+ | gigabit_advert | ADVERTISED_Autoneg);
+
+ cmd->speed = priv->speed;
+ cmd->duplex = priv->duplexity;
+ cmd->port = PORT_MII;
+ cmd->phy_address = priv->einfo->phyid;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = priv->txcount;
+ cmd->maxrxpkt = priv->rxcount;
+
+ return 0;
+}
+
+/* Return the length of the register structure */
+int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ u32 *theregs = (u32 *) priv->regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = theregs[i];
+}
+
+/* Return the link state 1 is up, 0 is down */
+u32 gfar_get_link(struct net_device *dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ return (u32) priv->link;
+}
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->speed) {
+ case 1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case 100:
+ count = GFAR_100_TIME;
+ break;
+ case 10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return ((usecs * 1000 + count - 1) / count);
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->speed) {
+ case 1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case 100:
+ count = GFAR_100_TIME;
+ break;
+ case 10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return ((ticks * count) / 1000);
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
+ cvals->rx_max_coalesced_frames = priv->rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime);
+ cvals->tx_max_coalesced_frames = priv->txcount;
+
+ cvals->use_adaptive_rx_coalesce = 0;
+ cvals->use_adaptive_tx_coalesce = 0;
+
+ cvals->pkt_rate_low = 0;
+ cvals->rx_coalesce_usecs_low = 0;
+ cvals->rx_max_coalesced_frames_low = 0;
+ cvals->tx_coalesce_usecs_low = 0;
+ cvals->tx_max_coalesced_frames_low = 0;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ cvals->pkt_rate_high = 0;
+ cvals->rx_coalesce_usecs_high = 0;
+ cvals->rx_max_coalesced_frames_high = 0;
+ cvals->tx_coalesce_usecs_high = 0;
+ cvals->tx_max_coalesced_frames_high = 0;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ cvals->rate_sample_interval = 0;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0))
+ priv->rxcoalescing = 0;
+ else
+ priv->rxcoalescing = 1;
+
+ priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
+ priv->rxcount = cvals->rx_max_coalesced_frames;
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0))
+ priv->txcoalescing = 0;
+ else
+ priv->txcoalescing = 1;
+
+ priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
+ priv->txcount = cvals->tx_max_coalesced_frames;
+
+ if (priv->rxcoalescing)
+ gfar_write(&priv->regs->rxic,
+ mk_ic_value(priv->rxcount, priv->rxtime));
+ else
+ gfar_write(&priv->regs->rxic, 0);
+
+ if (priv->txcoalescing)
+ gfar_write(&priv->regs->txic,
+ mk_ic_value(priv->txcount, priv->txtime));
+ else
+ gfar_write(&priv->regs->txic, 0);
+
+ return 0;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = priv->rx_ring_size;
+ rvals->rx_mini_pending = priv->rx_ring_size;
+ rvals->rx_jumbo_pending = priv->rx_ring_size;
+ rvals->tx_pending = priv->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in
+ * motion. We wait for the ring to be clean before reallocating
+ * the rings. */
+int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ u32 tempval;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ int err = 0;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ printk("%s: Ring sizes must be a power of 2\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ printk("%s: Ring sizes must be a power of 2\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ /* Stop the controller so we don't rx any more frames */
+ /* But first, make sure we clear the bits */
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ tempval = gfar_read(&priv->regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&priv->regs->dmactrl, tempval);
+
+ while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
+ cpu_relax();
+
+ /* Note that rx is not clean right now */
+ priv->rxclean = 0;
+
+ if (dev->flags & IFF_UP) {
+ /* Tell the driver to process the rest of the frames */
+ gfar_receive(0, (void *) dev, NULL);
+
+ /* Now wait for it to be done */
+ wait_event_interruptible(priv->rxcleanupq, priv->rxclean);
+
+ /* Ok, all packets have been handled. Now we bring it down,
+ * change the ring size, and bring it up */
+
+ stop_gfar(dev);
+ }
+
+ priv->rx_ring_size = rvals->rx_pending;
+ priv->tx_ring_size = rvals->tx_pending;
+
+ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+
+ return err;
+}
+
+struct ethtool_ops gfar_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = gfar_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings,
+ .get_stats_count = gfar_stats_count,
+ .get_ethtool_stats = gfar_fill_stats,
+};
--- /dev/null
+/*
+ * drivers/net/gianfar_phy.c
+ *
+ * Gianfar Ethernet Driver -- PHY handling
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/mii.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+
+#include "gianfar.h"
+#include "gianfar_phy.h"
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void write_phy_reg(struct net_device *dev, u16 regnum, u16 value)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct gfar *regbase = priv->phyregs;
+ struct ocp_gfar_data *einfo = priv->einfo;
+
+ /* Set the PHY address and the register address we want to write */
+ gfar_write(®base->miimadd, ((einfo->phyid) << 8) | regnum);
+
+ /* Write out the value we want */
+ gfar_write(®base->miimcon, value);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(®base->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+u16 read_phy_reg(struct net_device *dev, u16 regnum)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct gfar *regbase = priv->phyregs;
+ struct ocp_gfar_data *einfo = priv->einfo;
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ gfar_write(®base->miimadd, ((einfo->phyid) << 8) | regnum);
+
+ /* Clear miimcom, and then initiate a read */
+ gfar_write(®base->miimcom, 0);
+ gfar_write(®base->miimcom, MIIM_READ_COMMAND);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(®base->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
+ cpu_relax();
+
+ /* Grab the value of the register from miimstat */
+ value = gfar_read(®base->miimstat);
+
+ return value;
+}
+
+/* returns which value to write to the control register. */
+/* For 10/100 the value is slightly different. */
+u16 mii_cr_init(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct ocp_gfar_data *einfo = priv->einfo;
+
+ if (einfo->flags & GFAR_HAS_GIGABIT)
+ return MIIM_CONTROL_INIT;
+ else
+ return MIIM_CR_INIT;
+}
+
+#define BRIEF_GFAR_ERRORS
+/* Wait for auto-negotiation to complete */
+u16 mii_parse_sr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ unsigned int timeout = GFAR_AN_TIMEOUT;
+
+ if (mii_reg & MIIM_STATUS_LINK)
+ priv->link = 1;
+ else
+ priv->link = 0;
+
+ /* Only auto-negotiate if the link has just gone up */
+ if (priv->link && !priv->oldlink) {
+ while ((!(mii_reg & MIIM_STATUS_AN_DONE)) && timeout--)
+ mii_reg = read_phy_reg(dev, MIIM_STATUS);
+
+#if defined(BRIEF_GFAR_ERRORS)
+ if (mii_reg & MIIM_STATUS_AN_DONE)
+ printk(KERN_INFO "%s: Auto-negotiation done\n",
+ dev->name);
+ else
+ printk(KERN_INFO "%s: Auto-negotiation timed out\n",
+ dev->name);
+#endif
+ }
+
+ return 0;
+}
+
+/* Determine the speed and duplex which was negotiated */
+u16 mii_parse_88E1011_psr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ unsigned int speed;
+
+ if (priv->link) {
+ if (mii_reg & MIIM_88E1011_PHYSTAT_DUPLEX)
+ priv->duplexity = 1;
+ else
+ priv->duplexity = 0;
+
+ speed = (mii_reg & MIIM_88E1011_PHYSTAT_SPEED);
+
+ switch (speed) {
+ case MIIM_88E1011_PHYSTAT_GBIT:
+ priv->speed = 1000;
+ break;
+ case MIIM_88E1011_PHYSTAT_100:
+ priv->speed = 100;
+ break;
+ default:
+ priv->speed = 10;
+ break;
+ }
+ } else {
+ priv->speed = 0;
+ priv->duplexity = 0;
+ }
+
+ return 0;
+}
+
+u16 mii_parse_cis8201(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ unsigned int speed;
+
+ if (priv->link) {
+ if (mii_reg & MIIM_CIS8201_AUXCONSTAT_DUPLEX)
+ priv->duplexity = 1;
+ else
+ priv->duplexity = 0;
+
+ speed = mii_reg & MIIM_CIS8201_AUXCONSTAT_SPEED;
+
+ switch (speed) {
+ case MIIM_CIS8201_AUXCONSTAT_GBIT:
+ priv->speed = 1000;
+ break;
+ case MIIM_CIS8201_AUXCONSTAT_100:
+ priv->speed = 100;
+ break;
+ default:
+ priv->speed = 10;
+ break;
+ }
+ } else {
+ priv->speed = 0;
+ priv->duplexity = 0;
+ }
+
+ return 0;
+}
+
+u16 mii_parse_dm9161_scsr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_100H))
+ priv->speed = 100;
+ else
+ priv->speed = 10;
+
+ if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_10F))
+ priv->duplexity = 1;
+ else
+ priv->duplexity = 0;
+
+ return 0;
+}
+
+u16 dm9161_wait(u16 mii_reg, struct net_device *dev)
+{
+ int timeout = HZ;
+ int secondary = 10;
+ u16 temp;
+
+ do {
+
+ /* Davicom takes a bit to come up after a reset,
+ * so wait here for a bit */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ temp = read_phy_reg(dev, MIIM_STATUS);
+
+ secondary--;
+ } while ((!(temp & MIIM_STATUS_AN_DONE)) && secondary);
+
+ return 0;
+}
+
+/*
+ * consult the BCM54xx auxilliary status register to find the link settings
+ */
+u16 mii_parse_bcm54xx_sr(u16 mii_reg, struct net_device * dev)
+{
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+
+ /* Link modes of the BCM5400 PHY */
+ static const uint16_t link_table[8][3] = {
+ { 0, 0 }, /* No link */
+ { 0, 10 }, /* 10BT Half Duplex */
+ { 1, 10 }, /* 10BT Full Duplex */
+ { 0, 100 }, /* 100BT Half Duplex */
+ { 0, 100 }, /* 100BT Half Duplex */
+ { 1, 100 }, /* 100BT Full Duplex*/
+ { 1, 1000 }, /* 1000BT */
+ { 1, 1000 }, /* 1000BT */
+ };
+
+ uint16_t link_mode;
+
+ link_mode = mii_reg & MIIM_BCM54xx_AUXSTATUS_LINKMODE_MASK;
+ link_mode >>= MIIM_BCM54xx_AUXSTATUS_LINKMODE_SHIFT;
+
+ priv->duplexity = link_table[link_mode][0];
+ priv->speed = link_table[link_mode][1];
+
+ return 0;
+}
+
+static struct phy_info phy_info_M88E1011S = {
+ 0x01410c6,
+ "Marvell 88E1011S",
+ 4,
+ (const struct phy_cmd[]) { /* config */
+ /* Reset and configure the PHY */
+ {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Status is read once to clear old link state */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr},
+ /* Clear the IEVENT register */
+ {MIIM_88E1011_IEVENT, miim_read, NULL},
+ /* Set up the mask */
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ /* Clear the interrupt */
+ {MIIM_88E1011_IEVENT, miim_read, NULL},
+ /* Disable interrupts */
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Check the status */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr},
+ /* Enable Interrupts */
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ {MIIM_88E1011_IEVENT, miim_read, NULL},
+ {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL},
+ {miim_end,}
+ },
+};
+
+/* Cicada 8204 */
+static struct phy_info phy_info_cis8204 = {
+ 0x3f11,
+ "Cicada Cis8204",
+ 6,
+ (const struct phy_cmd[]) { /* config */
+ /* Override PHY config settings */
+ {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL},
+ /* Set up the interface mode */
+ {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL},
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
+ /* Clear the status register */
+ {MIIM_CIS8204_ISTAT, miim_read, NULL},
+ /* Enable interrupts */
+ {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ /* Clear the status register */
+ {MIIM_CIS8204_ISTAT, miim_read, NULL},
+ /* Disable interrupts */
+ {MIIM_CIS8204_IMASK, 0x0, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
+ /* Enable interrupts */
+ {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ /* Clear the status register */
+ {MIIM_CIS8204_ISTAT, miim_read, NULL},
+ /* Disable interrupts */
+ {MIIM_CIS8204_IMASK, 0x0, NULL},
+ {miim_end,}
+ },
+};
+
+/* Cicada 8201 */
+static struct phy_info phy_info_cis8201 = {
+ 0xfc41,
+ "CIS8201",
+ 4,
+ (const struct phy_cmd[]) { /* config */
+ /* Override PHY config settings */
+ {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL},
+ /* Set up the interface mode */
+ {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL},
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Read the Status (2x to make sure link is right) */
+ {MIIM_STATUS, miim_read, NULL},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ {miim_end,}
+ },
+};
+
+static struct phy_info phy_info_dm9161 = {
+ 0x0181b88,
+ "Davicom DM9161E",
+ 4,
+ (const struct phy_cmd[]) { /* config */
+ {MIIM_CONTROL, MIIM_DM9161_CR_STOP, NULL},
+ /* Do not bypass the scrambler/descrambler */
+ {MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT, NULL},
+ /* Clear 10BTCSR to default */
+ {MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT, NULL},
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CR_INIT, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* startup */
+ /* Restart Auto Negotiation */
+ {MIIM_CONTROL, MIIM_DM9161_CR_RSTAN, NULL},
+ /* Status is read once to clear old link state */
+ {MIIM_STATUS, miim_read, dm9161_wait},
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ /* Read the status */
+ {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr},
+ /* Clear any pending interrupts */
+ {MIIM_DM9161_INTR, miim_read, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* ack_int */
+ {MIIM_DM9161_INTR, miim_read, NULL},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* handle_int */
+ {MIIM_STATUS, miim_read, NULL},
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr},
+ {miim_end,}
+ },
+ (const struct phy_cmd[]) { /* shutdown */
+ {MIIM_DM9161_INTR, miim_read, NULL},
+ {miim_end,}
+ },
+};
+
+/* Broadcom BCM5421S PHY */
+static struct phy_info phy_info_bcm5421s = {
+ .id = 0x2060E1,
+ .name = "Broadcom BCM5421S",
+ .shift = 0,
+ .config = (const struct phy_cmd[]) {
+ /* Configure some basic stuff */
+ {MIIM_CONTROL, MIIM_CR_INIT, NULL},
+#if 0 /* 5421 only */
+ miim_write(MII_BCM5400_AUXCONTROL, 0x1007),
+ miim_set_bits(MII_BCM5400_AUXCONTROL, 0x0400),
+ miim_write(MII_BCM5400_AUXCONTROL, 0x0007),
+ miim_set_bits(MII_BCM5400_AUXCONTROL, 0x0800),
+ miim_write(0x17, 0x000a),
+ miim_set_bits(MII_RERRCOUNTER, 0x0200),
+#endif
+#if 0 /* enable automatic low power */
+ miim_write(MII_NCONFIG, 0x9002),
+ miim_write(MII_NCONFIG, 0xa821),
+ miim_write(MII_NCONFIG, 0x941d),
+#endif
+ {miim_end,}
+ },
+ .startup = (const struct phy_cmd[]) {
+ /* Restart Auto Negotiation */
+ miim_set_bits(MIIM_CONTROL, BMCR_ANENABLE | BMCR_ANRESTART),
+#if 0
+ /* Status is read once to clear old link state */
+ {MIIM_STATUS, miim_read, dm9161_wait},
+#endif
+ /* Auto-negotiate */
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+
+ /* Read the link status */
+ {MIIM_BCM54xx_AUXSTATUS, miim_read, mii_parse_bcm54xx_sr},
+
+ {miim_end,}
+ },
+ .ack_int = (const struct phy_cmd[]) {
+ {miim_end,}
+ },
+ .handle_int = (const struct phy_cmd[]) {
+ {MIIM_STATUS, miim_read, NULL},
+ {MIIM_STATUS, miim_read, mii_parse_sr},
+ {miim_end,}
+ },
+ .shutdown = (const struct phy_cmd[]) {
+ {miim_end,}
+ },
+};
+
+static struct phy_info *phy_info[] = {
+ &phy_info_cis8201,
+ &phy_info_cis8204,
+ &phy_info_M88E1011S,
+ &phy_info_dm9161,
+ &phy_info_bcm5421s,
+ NULL
+};
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev. return a struct phy_info structure describing that PHY
+ */
+struct phy_info * get_phy_info(struct net_device *dev)
+{
+ u16 phy_reg;
+ u32 phy_ID;
+ int i;
+ struct phy_info *theInfo = NULL;
+
+ /* Grab the bits from PHYIR1, and put them in the upper half */
+ phy_reg = read_phy_reg(dev, MIIM_PHYIR1);
+ phy_ID = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = read_phy_reg(dev, MIIM_PHYIR2);
+ phy_ID |= (phy_reg & 0xffff);
+
+ /* loop through all the known PHY types, and find one that */
+ /* matches the ID we read from the PHY. */
+ for (i = 0; phy_info[i]; i++)
+ if (phy_info[i]->id == (phy_ID >> phy_info[i]->shift))
+ theInfo = phy_info[i];
+
+ if (theInfo == NULL) {
+ printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
+ return NULL;
+ } else {
+ printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
+ phy_ID);
+ }
+
+ return theInfo;
+}
+
+/* Take a list of struct phy_cmd, and, depending on the values, either */
+/* read or write, using a helper function if provided */
+/* It is assumed that all lists of struct phy_cmd will be terminated by */
+/* mii_end. */
+void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd)
+{
+ int i;
+ u16 result;
+ struct gfar_private *priv = (struct gfar_private *) dev->priv;
+ struct gfar *phyregs = priv->phyregs;
+
+ /* Reset the management interface */
+ gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
+
+ /* Setup the MII Mgmt clock speed */
+ gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
+
+ /* Wait until the bus is free */
+ while (gfar_read(&phyregs->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+
+ for (i = 0; cmd->mii_reg != miim_end; i++) {
+ switch (cmd->mii_data >> 16) {
+ case 0x0000:
+ /* Otherwise, it's a write */
+ /* If a function was supplied, it will provide
+ * the value to write */
+ /* Otherwise, the value was supplied in cmd->mii_data */
+ if (cmd->funct != NULL)
+ result = (*(cmd->funct)) (0, dev);
+ else
+ result = cmd->mii_data;
+
+ write_phy_reg(dev, cmd->mii_reg, result);
+ break;
+
+ case 0x0001:
+ /* Read the value of the PHY reg */
+ result = read_phy_reg(dev, cmd->mii_reg);
+
+ /* If a function was supplied, we need to let it process */
+ /* the result. */
+ if (cmd->funct != NULL)
+ (*(cmd->funct)) (result, dev);
+ break;
+
+ case 0x0002:
+ /* read the value, clear some bits and write it back */
+ BUG_ON(cmd->funct);
+
+ result = read_phy_reg(dev, cmd->mii_reg);
+ result &= cmd->mii_data;
+ write_phy_reg(dev, cmd->mii_reg, result);
+ break;
+
+ case 0x0003:
+ /* read the value, set some bits and write it back */
+ BUG_ON(cmd->funct);
+
+ result = read_phy_reg(dev, cmd->mii_reg);
+ result &= cmd->mii_data;
+ write_phy_reg(dev, cmd->mii_reg, result);
+ break;
+
+ case 0x0004:
+ /* read the value, flip some bits and write it back */
+ BUG_ON(cmd->funct);
+
+ result = read_phy_reg(dev, cmd->mii_reg);
+ result &= cmd->mii_data;
+ write_phy_reg(dev, cmd->mii_reg, result);
+ break;
+
+ default:
+ printk("GIANFAR: Unknown MII command %08x\n",
+ cmd->mii_data);
+ BUG();
+ }
+ cmd++;
+ }
+}
--- /dev/null
+/*
+ * drivers/net/gianfar_phy.h
+ *
+ * Gianfar Ethernet Driver -- PHY handling
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __GIANFAR_PHY_H
+#define __GIANFAR_PHY_H
+
+/* simple datum processing commands */
+#define miim_end (0xffff0000U)
+#define miim_read (0x00010000U)
+#define miim_clear_bits(reg,x) { reg, (0x00020000U | ~(u32)(x)), NULL }
+#define miim_set_bits(reg,x) { reg, (0x00030000U | (u32)(x)), NULL }
+#define miim_flip_bits(reg,x) { reg, (0x00040000U | (u32)(x)), NULL }
+#define miim_write(reg, x) { reg, (0x0000ffffU & (u32)(x)), NULL }
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define MIIM_CONTROL 0x00
+#define MIIM_CONTROL_RESET 0x00008000
+#define MIIM_CONTROL_INIT 0x00001140
+#define MIIM_ANEN 0x00001000
+
+#define MIIM_CR 0x00
+#define MIIM_CR_RST 0x00008000
+#define MIIM_CR_INIT 0x00001000
+
+#define MIIM_STATUS 0x1
+#define MIIM_STATUS_AN_DONE 0x00000020
+#define MIIM_STATUS_LINK 0x0004
+
+#define MIIM_PHYIR1 0x2
+#define MIIM_PHYIR2 0x3
+
+#define GFAR_AN_TIMEOUT 0x000fffff
+
+#define MIIM_ANLPBPA 0x5
+#define MIIM_ANLPBPA_HALF 0x00000040
+#define MIIM_ANLPBPA_FULL 0x00000020
+
+#define MIIM_ANEX 0x6
+#define MIIM_ANEX_NP 0x00000004
+#define MIIM_ANEX_PRX 0x00000002
+
+
+/* Cicada Extended Control Register 1 */
+#define MIIM_CIS8201_EXT_CON1 0x17
+#define MIIM_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MIIM_CIS8204_IMASK 0x19
+#define MIIM_CIS8204_IMASK_IEN 0x8000
+#define MIIM_CIS8204_IMASK_SPEED 0x4000
+#define MIIM_CIS8204_IMASK_LINK 0x2000
+#define MIIM_CIS8204_IMASK_DUPLEX 0x1000
+#define MIIM_CIS8204_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MIIM_CIS8204_ISTAT 0x1a
+#define MIIM_CIS8204_ISTAT_STATUS 0x8000
+#define MIIM_CIS8204_ISTAT_SPEED 0x4000
+#define MIIM_CIS8204_ISTAT_LINK 0x2000
+#define MIIM_CIS8204_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MIIM_CIS8201_AUX_CONSTAT 0x1c
+#define MIIM_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MIIM_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MIIM_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MIIM_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MIIM_CIS8201_AUXCONSTAT_100 0x0008
+
+/* 88E1011 PHY Status Register */
+#define MIIM_88E1011_PHY_STATUS 0x11
+#define MIIM_88E1011_PHYSTAT_SPEED 0xc000
+#define MIIM_88E1011_PHYSTAT_GBIT 0x8000
+#define MIIM_88E1011_PHYSTAT_100 0x4000
+#define MIIM_88E1011_PHYSTAT_DUPLEX 0x2000
+#define MIIM_88E1011_PHYSTAT_LINK 0x0400
+
+#define MIIM_88E1011_IEVENT 0x13
+#define MIIM_88E1011_IEVENT_CLEAR 0x0000
+
+#define MIIM_88E1011_IMASK 0x12
+#define MIIM_88E1011_IMASK_INIT 0x6400
+#define MIIM_88E1011_IMASK_CLEAR 0x0000
+
+/* DM9161 Control register values */
+#define MIIM_DM9161_CR_STOP 0x0400
+#define MIIM_DM9161_CR_RSTAN 0x1200
+
+#define MIIM_DM9161_SCR 0x10
+#define MIIM_DM9161_SCR_INIT 0x0610
+
+/* DM9161 Specified Configuration and Status Register */
+#define MIIM_DM9161_SCSR 0x11
+#define MIIM_DM9161_SCSR_100F 0x8000
+#define MIIM_DM9161_SCSR_100H 0x4000
+#define MIIM_DM9161_SCSR_10F 0x2000
+#define MIIM_DM9161_SCSR_10H 0x1000
+
+/* DM9161 Interrupt Register */
+#define MIIM_DM9161_INTR 0x15
+#define MIIM_DM9161_INTR_PEND 0x8000
+#define MIIM_DM9161_INTR_DPLX_MASK 0x0800
+#define MIIM_DM9161_INTR_SPD_MASK 0x0400
+#define MIIM_DM9161_INTR_LINK_MASK 0x0200
+#define MIIM_DM9161_INTR_MASK 0x0100
+#define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MIIM_DM9161_INTR_SPD_CHANGE 0x0008
+#define MIIM_DM9161_INTR_LINK_CHANGE 0x0004
+#define MIIM_DM9161_INTR_INIT 0x0000
+#define MIIM_DM9161_INTR_STOP \
+(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \
+ | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MIIM_DM9161_10BTCSR 0x12
+#define MIIM_DM9161_10BTCSR_INIT 0x7800
+
+/* BCM54xx regs */
+#define MIIM_BCM54xx_AUXCONTROL 0x18
+#define MIIM_BCM54xx_AUXSTATUS 0x19
+#define MIIM_BCM54xx_AUXSTATUS_LINKMODE_MASK 0x0700
+#define MIIM_BCM54xx_AUXSTATUS_LINKMODE_SHIFT 8
+
+#define MIIM_READ_COMMAND 0x00000001
+
+/*
+ * struct phy_cmd: A command for reading or writing a PHY register
+ *
+ * mii_reg: The register to read or write
+ *
+ * mii_data: For writes, the value to put in the register.
+ * A value of -1 indicates this is a read.
+ *
+ * funct: A function pointer which is invoked for each command.
+ * For reads, this function will be passed the value read
+ * from the PHY, and process it.
+ * For writes, the result of this function will be written
+ * to the PHY register
+ */
+struct phy_cmd {
+ u32 mii_reg;
+ u32 mii_data;
+ u16 (*funct) (u16 mii_reg, struct net_device * dev);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY. During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is. The 32-bit result
+ * gotten from the PHY will be shifted right by "shift" bits to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * The struct phy_cmd entries represent pointers to an arrays of
+ * commands which tell the driver what to do to the PHY.
+ */
+struct phy_info {
+ u32 id;
+ char *name;
+ unsigned int shift;
+ /* Called to configure the PHY, and modify the controller
+ * based on the results */
+ const struct phy_cmd *config;
+
+ /* Called when starting up the controller. Usually sets
+ * up the interrupt for state changes */
+ const struct phy_cmd *startup;
+
+ /* Called inside the interrupt handler to acknowledge
+ * the interrupt */
+ const struct phy_cmd *ack_int;
+
+ /* Called in the bottom half to handle the interrupt */
+ const struct phy_cmd *handle_int;
+
+ /* Called when bringing down the controller. Usually stops
+ * the interrupts from being generated */
+ const struct phy_cmd *shutdown;
+};
+
+struct phy_info *get_phy_info(struct net_device *dev);
+void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd);
+
+#endif /* GIANFAR_PHY_H */
hmp->tx_ring[entry].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[entry] = NULL;
+ hmp->tx_skbuff[entry] = 0;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
pci_unmap_single(hmp->pci_dev, hmp->tx_ring[i].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[i] = NULL;
+ hmp->tx_skbuff[i] = 0;
}
}
pci_unmap_single(hmp->pci_dev, hmp->rx_ring[i].addr,
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- hmp->rx_skbuff[i] = NULL;
+ hmp->rx_skbuff[i] = 0;
}
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[i].status_n_length = 0;
- hmp->rx_skbuff[i] = NULL;
+ hmp->rx_skbuff[i] = 0;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
for (i = 0; i < TX_RING_SIZE; i++) {
- hmp->tx_skbuff[i] = NULL;
+ hmp->tx_skbuff[i] = 0;
hmp->tx_ring[i].status_n_length = 0;
}
/* Mark the last entry of the ring */
skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- hmp->tx_skbuff[entry] = NULL;
+ hmp->tx_skbuff[entry] = 0;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->rx_ring[i].addr, hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- hmp->rx_skbuff[i] = NULL;
+ hmp->rx_skbuff[i] = 0;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
hmp->tx_ring[i].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[i] = NULL;
+ hmp->tx_skbuff[i] = 0;
}
}
write_lock(&disc_data_lock);
sp = tty->disc_data;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
write_unlock(&disc_data_lock);
if (sp == 0)
return;
unregister_netdev(sp->dev);
}
-static int sp_set_mac_address(struct net_device *dev, void __user *addr)
+static int sp_set_mac_address(struct net_device *dev, void *addr)
{
return copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN) ? -EFAULT : 0;
}
switch(cmd) {
case SIOCGIFNAME:
- err = copy_to_user((void __user *) arg, sp->dev->name,
+ err = copy_to_user((void *) arg, sp->dev->name,
strlen(sp->dev->name) + 1) ? -EFAULT : 0;
break;
case SIOCGIFENCAP:
- err = put_user(0, (int __user *)arg);
+ err = put_user(0, (int *)arg);
break;
case SIOCSIFENCAP:
- if (get_user(tmp, (int __user *) arg)) {
+ if (get_user(tmp, (int *) arg)) {
err = -EFAULT;
break;
}
break;
case SIOCSIFHWADDR:
- err = sp_set_mac_address(sp->dev, (void __user *) arg);
+ err = sp_set_mac_address(sp->dev, (void *) arg);
break;
/* Allow stty to read, but not set, the serial port */
unregister_netdev(ax->dev);
- tty->disc_data = NULL;
+ tty->disc_data = 0;
ax->tty = NULL;
ax_free(ax);
}
-static int ax_set_mac_address(struct net_device *dev, void __user *addr)
+static int ax_set_mac_address(struct net_device *dev, void *addr)
{
if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
return -EFAULT;
/* Perform I/O control on an active ax25 channel. */
-static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
+static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
{
struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
unsigned int tmp;
return 0;
case SIOCGIFENCAP:
- return put_user(4, (int __user *)arg);
+ return put_user(4, (int *)arg);
case SIOCSIFENCAP:
- if (get_user(tmp, (int __user *)arg))
+ if (get_user(tmp, (int *)arg))
return -EFAULT;
ax->mode = tmp;
ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
* ------------------
*
* You can find a subset of the documentation in
- * Documentation/networking/z8530drv.txt.
+ * linux/Documentation/networking/z8530drv.txt.
*/
/*
}
/* Set the wrap registers for string I/O reads. */
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = ioaddr + NIC_OFFSET;
ei_status.name = name;
ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
+ ei_status.rx_start_page = HP_START_PG + TX_2X_PAGES;
ei_status.stop_page = HP_STOP_PG;
ei_status.reset_8390 = &hpp_reset_8390;
ei_status.block_output = &hpp_mem_block_output;
ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
dev->mem_start = mem_start;
- ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
+ ei_status.rmem_start = dev->mem_start + TX_2X_PAGES*256;
dev->mem_end = ei_status.rmem_end
= dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
}
/* Set the wrap registers for programmed-I/O operation. */
outw(HW_Page, ioaddr + HP_PAGING);
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
/* Select the operational page. */
outw(Perf_Page, ioaddr + HP_PAGING);
config TOSHIBA_FIR
tristate "Toshiba Type-O IR Port"
- depends on IRDA && PCI && !64BIT
+ depends on IRDA && !64BIT
help
Say Y here if you want to build support for the Toshiba Type-O IR
and Donau oboe chipsets. These chipsets are used by the Toshiba
config VIA_FIR
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
- depends on IRDA && ISA && PCI
+ depends on IRDA && ISA
help
Say Y here if you want to build support for the VIA VT8231
and VIA VT1211 IrDA controllers, found on the motherboards using
/* Delay a few ms just to allow the reset to complete */
msec_delay(IXGB_DELAY_AFTER_RESET);
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-#ifdef DBG
+#if DBG
/* Make sure the self-clearing global reset bit did self clear */
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
#define ASSERT(x) if(!(x)) BUG()
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-#ifdef DBG
+#if DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
#else
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = lp->rx_skbuff[i];
- lp->rx_skbuff[i] = NULL;
+ lp->rx_skbuff[i] = 0;
lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
if (skb)
dev_kfree_skb_any(skb);
/* The Tx buffer address is filled in as needed, but we do need to clear
the upper ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- lp->tx_skbuff[i] = NULL;
+ lp->tx_skbuff[i] = 0;
lp->tx_ring[i].base = 0;
}
in the bounce buffer. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = NULL;
+ lp->tx_skbuff[entry] = 0;
}
dirty_tx++;
}
#include <asm/cache.h>
#include <asm/parisc-device.h>
-#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
+static char version[] __devinitdata =
+ "82596.c $Revision: 1.29 $\n";
/* DEBUG flags
*/
do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
#define CHECK_INV(addr,len) \
- do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+ do { dma_cache_sync((void *)addr,len, DMA_FROM_DEVICE); } while(0)
#define CHECK_WBACK_INV(addr,len) \
- do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+ do { dma_cache_sync((void *)addr,len, DMA_BIDIRECTIONAL); } while (0)
#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
rfd = lp->rfd_head;
printk("rfd_head = %p\n", rfd);
do {
- printk(" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
+ printk (" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
" count %04x\n",
rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
rfd->count);
struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
if (skb == NULL)
- panic("%s: alloc_skb() failed", __FILE__);
+ panic("82596: alloc_skb() failed");
skb_reserve(skb, 2);
dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
DMA_FROM_DEVICE);
disable_irq(dev->irq); /* disable IRQs from LAN */
DEB(DEB_INIT,
- printk("RESET 82596 port: %p (with IRQ %d disabled)\n",
- (void*)(dev->base_addr + PA_I82596_RESET),
+ printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
+ dev->base_addr + PA_I82596_RESET,
dev->irq));
gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
lp->cmd_head = NULL;
lp->scb.cmd = I596_NULL;
- DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
+ DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
CA(dev);
- if (wait_istat(dev, lp, 1000, "initialization timed out"))
+ if (wait_istat(dev,lp,1000,"initialization timed out"))
goto failed;
- DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
+ DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
/* Ensure rx frame/buffer descriptors are tidy */
rebuild_rx_bufs(dev);
enable_irq(dev->irq); /* enable IRQs from LAN */
- DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
+ DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
memcpy(lp->cf_cmd.i596_config, init_setup, 14);
lp->cf_cmd.cmd.command = CmdConfigure;
CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
i596_add_cmd(dev, &lp->cf_cmd.cmd);
- DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
+ DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
lp->sa_cmd.cmd.command = CmdSASetup;
CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &lp->sa_cmd.cmd);
- DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
+ DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
lp->tdr_cmd.cmd.command = CmdTDR;
CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
i596_add_cmd(dev, &lp->tdr_cmd.cmd);
spin_lock_irqsave (&lp->lock, flags);
- if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
+ if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
spin_unlock_irqrestore (&lp->lock, flags);
goto failed;
}
- DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
+ DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
lp->scb.command = RX_START;
lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
spin_unlock_irqrestore (&lp->lock, flags);
- if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
+ if (wait_cmd(dev,lp,1000,"RX_START not processed"))
goto failed;
- DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
+ DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
return 0;
struct i596_rbd *rbd;
int frames = 0;
- DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
+ DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
lp->rfd_head, lp->rbd_head));
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
}
else {
CHECK_INV(rfd, sizeof(struct i596_rfd));
}
- DEB(DEB_RXFRAME, printk("frames %d\n", frames));
+ DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
return 0;
}
CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
}
- wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
+ wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
lp->scb.cmd = I596_NULL;
CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
}
{
unsigned long flags;
- DEB(DEB_RESET, printk("i596_reset\n"));
+ DEB(DEB_RESET,printk("i596_reset\n"));
spin_lock_irqsave (&lp->lock, flags);
- wait_cmd(dev, lp, 100, "i596_reset timed out");
+ wait_cmd(dev,lp,100,"i596_reset timed out");
netif_stop_queue(dev);
CA(dev);
/* wait for shutdown */
- wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
+ wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
spin_unlock_irqrestore (&lp->lock, flags);
i596_cleanup_cmd(dev,lp);
struct i596_private *lp = dev->priv;
unsigned long flags;
- DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
+ DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
cmd->status = 0;
cmd->command |= (CMD_EOL | CMD_INTR);
CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
} else {
lp->cmd_head = cmd;
- wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
+ wait_cmd(dev,lp,100,"i596_add_cmd timed out");
lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
lp->scb.command = CUC_START;
CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
static int i596_open(struct net_device *dev)
{
- DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
+ DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
printk("%s: IRQ %d not free\n", dev->name, dev->irq);
struct i596_private *lp = dev->priv;
/* Transmitter timeout, serious problems. */
- DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
+ DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
dev->name));
lp->stats.tx_errors++;
/* Try to restart the adaptor */
if (lp->last_restart == lp->stats.tx_packets) {
- DEB(DEB_ERRORS, printk("Resetting board.\n"));
+ DEB(DEB_ERRORS,printk ("Resetting board.\n"));
/* Shutdown and restart */
i596_reset (dev, lp);
} else {
/* Issue a channel attention signal */
- DEB(DEB_ERRORS, printk("Kicking board.\n"));
+ DEB(DEB_ERRORS,printk ("Kicking board.\n"));
lp->scb.command = CUC_START | RX_START;
CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
CA (dev);
short length = skb->len;
dev->trans_start = jiffies;
- DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
+ DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
skb->len, skb->data));
if (length < ETH_ZLEN) {
tbd = lp->tbds + lp->next_tx_cmd;
if (tx_cmd->cmd.command) {
- DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
+ DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
dev->name));
lp->stats.tx_dropped++;
/* This lot is ensure things have been cache line aligned. */
if (sizeof(struct i596_rfd) != 32) {
printk("82596: sizeof(struct i596_rfd) = %d\n",
- (int)sizeof(struct i596_rfd));
+ sizeof(struct i596_rfd));
return -ENODEV;
}
if ((sizeof(struct i596_rbd) % 32) != 0) {
printk("82596: sizeof(struct i596_rbd) = %d\n",
- (int)sizeof(struct i596_rbd));
+ sizeof(struct i596_rbd));
return -ENODEV;
}
if ((sizeof(struct tx_cmd) % 32) != 0) {
printk("82596: sizeof(struct tx_cmd) = %d\n",
- (int)sizeof(struct tx_cmd));
+ sizeof(struct tx_cmd));
return -ENODEV;
}
if (sizeof(struct i596_tbd) != 32) {
printk("82596: sizeof(struct i596_tbd) = %d\n",
- (int)sizeof(struct i596_tbd));
+ sizeof(struct i596_tbd));
return -ENODEV;
}
#ifndef __LP64__
if (sizeof(struct i596_private) > 4096) {
printk("82596: sizeof(struct i596_private) = %d\n",
- (int)sizeof(struct i596_private));
+ sizeof(struct i596_private));
return -ENODEV;
}
#endif
for (i=0; i < 6; i++) {
eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
- printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
+ printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
}
dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
if (!dev->mem_start) {
- printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
+ printk("%s: Couldn't get shared memory\n", dev->name);
return -ENOMEM;
}
+ DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
+
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = eth_addr[i];
+ DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+
+ DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
+
+ DEB(DEB_PROBE,printk(version));
/* The 82596-specific entries in the device structure. */
dev->open = i596_open;
dev->priv = (void *)(dev->mem_start);
lp = dev->priv;
+ DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
+ dev->name, (unsigned long)lp,
+ sizeof(struct i596_private), (unsigned long)&lp->scb));
memset(lp, 0, sizeof(struct i596_private));
lp->scb.command = 0;
CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
- i = register_netdev(dev);
- if (i) {
- lp = dev->priv;
- dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
- (void *)dev->mem_start, lp->dma_addr);
- return i;
- };
-
- DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
- for (i = 0; i < 6; i++)
- DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
- DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
- DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
- dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
-
return 0;
}
unsigned short status, ack_cmd = 0;
if (dev == NULL) {
- printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
+ printk("i596_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
spin_lock (&lp->lock);
- wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
status = lp->scb.status;
- DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
dev->name, irq, status));
ack_cmd = status & 0xf000;
struct i596_cmd *ptr;
if ((status & 0x8000))
- DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
+ DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
if ((status & 0x2000))
- DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
+ DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
while (lp->cmd_head != NULL) {
CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
ptr = lp->cmd_head;
- DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
+ DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
lp->cmd_head->status, lp->cmd_head->command));
lp->cmd_head = ptr->v_next;
lp->cmd_backlog--;
struct sk_buff *skb = tx_cmd->skb;
if ((ptr->status) & STAT_OK) {
- DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
} else {
lp->stats.tx_errors++;
if ((ptr->status) & 0x0020)
unsigned short status = ((struct tdr_cmd *)ptr)->status;
if (status & 0x8000) {
- DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
+ DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
} else {
if (status & 0x4000)
printk("%s: Transceiver problem.\n", dev->name);
if (status & 0x1000)
printk("%s: Short circuit.\n", dev->name);
- DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
+ DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
}
break;
}
}
if ((status & 0x1000) || (status & 0x4000)) {
if ((status & 0x4000))
- DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
+ DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
i596_rx(dev);
/* Only RX_START if stopped - RGH 07-07-96 */
if (status & 0x1000) {
if (netif_running(dev)) {
- DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
+ DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
ack_cmd |= RX_START;
lp->stats.rx_errors++;
lp->stats.rx_fifo_errors++;
}
}
}
- wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
lp->scb.command = ack_cmd;
CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
CA(dev);
- wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
- DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
+ wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
+ DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
spin_unlock (&lp->lock);
return IRQ_HANDLED;
netif_stop_queue(dev);
- DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
dev->name, lp->scb.status));
spin_lock_irqsave(&lp->lock, flags);
- wait_cmd(dev, lp, 100, "close1 timed out");
+ wait_cmd(dev,lp,100,"close1 timed out");
lp->scb.command = CUC_ABORT | RX_ABORT;
CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
CA(dev);
- wait_cmd(dev, lp, 100, "close2 timed out");
+ wait_cmd(dev,lp,100,"close2 timed out");
spin_unlock_irqrestore(&lp->lock, flags);
DEB(DEB_STRUCT,i596_display_data(dev));
i596_cleanup_cmd(dev,lp);
struct i596_private *lp = dev->priv;
int config = 0, cnt;
- DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
- dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+ DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
lp->cf_cmd.i596_config[8] |= 0x01;
for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
memcpy(cp, dmi->dmi_addr, 6);
if (i596_debug > 1)
- DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
+ DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
}
CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
if (num_drivers >= MAX_DRIVERS) {
/* max count of possible i82596 drivers reached */
- return -ENOMEM;
+ return -ENODEV;
}
-
- if (num_drivers == 0)
- printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
if (!dev->irq) {
- printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
- __FILE__, dev->hpa);
+ printk(KERN_ERR __FILE__ ": IRQ not found for i82596 at 0x%lx\n", dev->hpa);
return -ENODEV;
}
return -ENODEV;
}
+ retval = register_netdev(netdevice);
+ if (retval) {
+ struct i596_private *lp = netdevice->priv;
+ printk(KERN_WARNING __FILE__ ": register_netdevice ret'd %d\n", retval);
+ dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
+ (void *)netdevice->mem_start, lp->dma_addr);
+ free_netdev(netdevice);
+ return -ENODEV;
+ };
if (dev->id.sversion == 0x72) {
((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
}
(void *)netdevice->mem_start, lp->dma_addr);
free_netdev(netdevice);
}
- num_drivers = 0;
unregister_parisc_driver(&lan_driver);
}
kfree(rfd);
} while (rfd != lp->rx_tail);
- lp->rx_tail = NULL;
+ lp->rx_tail = 0;
#if 0
for (lp->rbd_list) {
for (i = 0; i < N_RX_RING; ++i) {
if (mp->rx_bufs[i] != 0) {
dev_kfree_skb(mp->rx_bufs[i]);
- mp->rx_bufs[i] = NULL;
+ mp->rx_bufs[i] = 0;
}
}
for (i = mp->tx_empty; i != mp->tx_fill; ) {
cp->xfer_status = 0;
++cp;
}
- mp->rx_bufs[i] = NULL;
+ mp->rx_bufs[i] = 0;
st_le16(&cp->command, DBDMA_STOP);
mp->rx_fill = i;
mp->rx_empty = 0;
mp->stats.rx_bytes += skb->len;
netif_rx(skb);
dev->last_rx = jiffies;
- mp->rx_bufs[i] = NULL;
+ mp->rx_bufs[i] = 0;
++mp->stats.rx_packets;
}
} else {
static int __init myri_sbus_probe(void)
{
struct sbus_bus *bus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
static int called;
int cards = 0, v;
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
{"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
- {NULL,}
+ {0,}
};
#endif
static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
MODULE_LICENSE("GPL");
+#ifdef MODULE_PARM
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM_DESC(io, "(ignored)");
MODULE_PARM_DESC(irq, "(ignored)");
MODULE_PARM_DESC(bad, "(ignored)");
+#endif
/* Module code fixed by David Weinehall */
frag = skb_shinfo(skb)->frags;
if (!nr_frags)
- frag = NULL;
+ frag = 0;
extsts = 0;
if (skb->ip_summed == CHECKSUM_HW) {
extsts |= EXTSTS_IPPKT;
}
else if (ei_local->tx2 == 0)
{
- output_page = ei_local->tx_start_page + TX_PAGES/2;
+ output_page = ei_local->tx_start_page + TX_1X_PAGES;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
Modified from Am79C90 data sheet.
---------------------------------------------------------------------------- */
-#ifdef BROKEN_MULTICAST
+#if BROKEN_MULTICAST
static void updateCRC(int *CRC, int bit)
{
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
write_unlock_irq(&disc_data_lock);
if (ap == 0)
return;
ap->olim = buf;
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
return 1;
}
clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
ap->optr = ap->olim;
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
/* queue the frame to be processed */
skb->cb[0] = ap->state;
skb_queue_tail(&ap->rqueue, skb);
- ap->rpkt = NULL;
+ ap->rpkt = 0;
ap->state = 0;
return;
struct ppp *ppp;
if (pf != 0) {
- file->private_data = NULL;
+ file->private_data = 0;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
if (file == ppp->owner)
struct ppp_file *pf = file->private_data;
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = 0;
ret = count;
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- *skb_push(skb, 2) = 1;
+ {
+ u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
+
+ *p = htons(4); /* indicate outbound in DLT_LINUX_SLL */;
+ }
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
kfree_skb(skb);
return;
}
spin_lock_bh(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
} else {
/* channel got unregistered */
kfree_skb(skb);
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
}
spin_unlock_bh(&pch->downl);
return;
return;
#endif /* CONFIG_PPP_MULTILINK */
- ppp->xmit_pending = NULL;
+ ppp->xmit_pending = 0;
kfree_skb(skb);
}
/* check if the packet passes the pass and active filters */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- *skb_push(skb, 2) = 0;
+ {
+ u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
+
+ *p = 0; /* indicate inbound in DLT_LINUX_SLL */
+ }
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
if (pch == 0)
return; /* should never happen */
- chan->ppp = NULL;
+ chan->ppp = 0;
/*
* This ensures that we have returned from any calls into the
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = NULL;
+ pch->chan = 0;
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
ppp->xstate = 0;
xcomp = ppp->xcomp;
xstate = ppp->xc_state;
- ppp->xc_state = NULL;
+ ppp->xc_state = 0;
ppp->rstate = 0;
rcomp = ppp->rcomp;
rstate = ppp->rc_state;
- ppp->rc_state = NULL;
+ ppp->rc_state = 0;
ppp_unlock(ppp);
if (xstate) {
if (ce->comp->compress_proto == proto)
return ce;
}
- return NULL;
+ return 0;
}
/* Register a compressor */
find_compressor(int type)
{
struct compressor_entry *ce;
- struct compressor *cp = NULL;
+ struct compressor *cp = 0;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(type);
down(&all_ppp_sem);
ppp_lock(ppp);
dev = ppp->dev;
- ppp->dev = NULL;
+ ppp->dev = 0;
ppp_unlock(ppp);
/* This will call dev_close() for us. */
if (dev) {
ppp_ccp_closed(ppp);
if (ppp->vj) {
slhc_free(ppp->vj);
- ppp->vj = NULL;
+ ppp->vj = 0;
}
skb_queue_purge(&ppp->file.xq);
skb_queue_purge(&ppp->file.rq);
}
if (ppp->active_filter) {
kfree(ppp->active_filter);
- ppp->active_filter = NULL;
+ ppp->active_filter = 0;
}
#endif /* CONFIG_PPP_FILTER */
if (pch->file.index == unit)
return pch;
}
- return NULL;
+ return 0;
}
/*
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
write_unlock_irq(&disc_data_lock);
if (ap == 0)
return;
tty_stuffed = 1;
} else {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
flush:
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_lock_bh(&ap->xmit_lock);
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
- ap->tpkt = NULL;
+ ap->tpkt = 0;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
#define PPPOE_HASH_BITS 4
#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS)
-static struct ppp_channel_ops pppoe_chan_ops;
-
static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
skb ? session_id : session_id | 0x40, frame_id);
if (skb) {
dev_kfree_skb(skb);
- skb = NULL;
+ skb = 0;
}
good_frame:
dev->last_rx = jiffies;
stats->rx_bytes+=dlen;
stats->rx_packets++;
- lp->rx_skb[ns] = NULL;
+ lp->rx_skb[ns] = 0;
lp->rx_session_id[ns] |= 0x40;
return 0;
if (ns < NPIDS) {
if ((skb = lp->rx_skb[ns])) {
dev_kfree_skb(skb);
- lp->rx_skb[ns] = NULL;
+ lp->rx_skb[ns] = 0;
}
lp->rx_session_id[ns] |= 0x40;
}
return 0;
}
\f
-/*
- * wait_for_buffer
- *
- * This routine waits for the SEEQ chip to assert that the FIFO is ready
- * by checking for a window interrupt, and then clearing it. This has to
- * occur in the interrupt handler!
- */
-inline void wait_for_buffer(struct net_device * dev)
-{
- int ioaddr = dev->base_addr;
- unsigned long tmp;
- int status;
-
- tmp = jiffies + HZ;
- while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
- cpu_relax();
-
- if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-}
-\f
/* The typical workload of the driver:
Handle the network interface interrupts. */
static irqreturn_t seeq8005_interrupt(int irq, void *dev_id, struct pt_regs * regs)
}
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it. This has to
+ * occur in the interrupt handler!
+ */
+inline void wait_for_buffer(struct net_device * dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned long tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
+ cpu_relax();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
#ifdef MODULE
static struct net_device *dev_seeq;
{ "NS 83851 PHY", 0x2000, 0x5C20, MIX },
{ "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
{ "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
- {NULL,},
+ {0,},
};
struct mii_phy {
sis_priv->tx_ring[i].bufptr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- sis_priv->tx_skbuff[i] = NULL;
+ sis_priv->tx_skbuff[i] = 0;
sis_priv->tx_ring[i].cmdsts = 0;
sis_priv->tx_ring[i].bufptr = 0;
sis_priv->stats.tx_dropped++;
sis_priv->rx_ring[i].bufptr,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- sis_priv->rx_skbuff[i] = NULL;
+ sis_priv->rx_skbuff[i] = 0;
}
}
for (i = 0; i < NUM_TX_DESC; i++) {
sis_priv->tx_ring[i].bufptr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- sis_priv->tx_skbuff[i] = NULL;
+ sis_priv->tx_skbuff[i] = 0;
}
}
for (i = 0 ; i < 6 ; i++, p++)
*p = canonical[*p] ;
}
- slot = NULL;
+ slot = 0 ;
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->n) { /* not used */
if (!del && !slot) /* if !del save first free */
{
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t1 ;
- struct s_smt_fp_txd volatile *t2 = NULL ;
+ struct s_smt_fp_txd volatile *t2=0 ;
SMbuf *mb ;
u_long tbctrl ;
int i ;
{
struct smt_para *pa ;
const struct s_p_tab *pt ;
- struct fddi_mib_m *mib_m = NULL;
- struct fddi_mib_p *mib_p = NULL;
+ struct fddi_mib_m *mib_m = 0 ;
+ struct fddi_mib_p *mib_p = 0 ;
int len ;
int plen ;
char *from ;
/*
* check special paras
*/
- swap = NULL;
+ swap = 0 ;
switch (para) {
case SMT_P10F0 :
case SMT_P10F1 :
char c ;
char *mib_addr ;
struct fddi_mib *mib ;
- struct fddi_mib_m *mib_m = NULL;
- struct fddi_mib_a *mib_a = NULL;
- struct fddi_mib_p *mib_p = NULL;
+ struct fddi_mib_m *mib_m = 0 ;
+ struct fddi_mib_a *mib_a = 0 ;
+ struct fddi_mib_p *mib_p = 0 ;
int mac ;
int path ;
int port ;
const struct s_p_tab *pt ;
for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
;
- return(pt->p_num ? pt : NULL) ;
+ return(pt->p_num ? pt : 0) ;
}
static int smt_mib_phys(struct s_smc *smc)
char *p ;
int len ;
int plen ;
- void *found = NULL;
+ void *found = 0 ;
SK_UNUSED(smc) ;
len -= plen ;
if (len < 0) {
DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ;
- return NULL;
+ return(0) ;
}
if ((plen & 3) && (para != SMT_P_ECHODATA)) {
DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ;
- return NULL;
+ return(0) ;
}
if (found)
return(found) ;
}
- return NULL;
+ return(0) ;
}
#if 0
*/
/* Attention: don't initialize mib pointer here! */
/* It must be initialized during phase 2 */
- smc->y[port].mib = NULL;
+ smc->y[port].mib = 0 ;
mib->fddiSMTPORTIndexes[port] = port+INDEX_PORT ;
pm->fddiPORTIndex = port+INDEX_PORT ;
{ "SBACOMMAND",16, 0 } ,
{ "SBAAVAILABLE",17, 1, 0, 100 } ,
#endif
- { NULL }
+ { 0 }
} ;
/* Define maximum string size for values and keybuffer */
void smt_timer_init(struct s_smc *smc)
{
- smc->t.st_queue = NULL;
+ smc->t.st_queue = 0 ;
smc->t.st_fast.tm_active = FALSE ;
- smc->t.st_fast.tm_next = NULL;
+ smc->t.st_fast.tm_next = 0 ;
hwt_init(smc) ;
}
timer->tm_active = TRUE ;
if (!smc->t.st_queue) {
smc->t.st_queue = timer ;
- timer->tm_next = NULL;
+ timer->tm_next = 0 ;
timer->tm_delta = time ;
hwt_start(smc,time) ;
return ;
done = 1 ;
}
}
- *last = NULL;
+ *last = 0 ;
next = smc->t.st_queue ;
smc->t.st_queue = tm ;
if (evc->evc_code == code && evc->evc_index == index)
return(evc) ;
}
- return NULL;
+ return(0) ;
}
#define THRESHOLD_2 (2*TICKS_PER_SECOND)
if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
return;
- tty->disc_data = NULL;
+ tty->disc_data = 0;
sl->tty = NULL;
if (!sl->leased)
sl->line = 0;
static int __init bigmac_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
static int called;
int cards = 0, v;
{"D-Link DFE-530TXS FAST Ethernet Adapter"},
{"D-Link DL10050-based FAST Ethernet Adapter"},
{"Sundance Technology Alta"},
- {NULL,}, /* 0 terminated list. */
+ {0,}, /* 0 terminated list. */
};
/* This driver was written to use PCI memory space, however x86-oriented
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
np->rx_ring[i].frag[0].length = 0;
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
np->tx_ring[i].status = 0;
}
return;
dev_kfree_skb_irq (skb);
else
dev_kfree_skb (skb);
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
np->stats.tx_dropped++;
}
}
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = NULL;
+ np->tx_skbuff[entry] = 0;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
}
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = NULL;
+ np->tx_skbuff[entry] = 0;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
}
np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- np->rx_skbuff[i] = NULL;
+ np->rx_skbuff[i] = 0;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].frag[0].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- np->tx_skbuff[i] = NULL;
+ np->tx_skbuff[i] = 0;
}
}
/* Let the chip settle down a bit, it seems that helps
* for sleep mode on some models
*/
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
/* Make sure we aren't polling PHY status change. We
* don't currently use that feature though
* dont wait a bit here, looks like the chip takes
* some time to really shut down
*/
- msleep(10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
}
writel(0, gp->regs + MAC_TXCFG);
lp->tx_new = TX_NEXT(entry);
}
-struct net_device *last_dev;
+struct net_device *last_dev = 0;
static int lance_open(struct net_device *dev)
{
static int __init sparc_lance_probe(void)
{
struct sbus_bus *bus;
- struct sbus_dev *sdev = NULL;
- struct sbus_dma *ledma = NULL;
+ struct sbus_dev *sdev = 0;
+ struct sbus_dma *ledma = 0;
static int called;
int cards = 0, v;
for_each_sbusdev (sdev, bus) {
if (strcmp(sdev->prom_name, "le") == 0) {
cards++;
- if ((v = sparc_lance_init(sdev, NULL, NULL)))
+ if ((v = sparc_lance_init(sdev, 0, 0)))
return v;
continue;
}
cards++;
ledma = find_ledma(sdev);
if ((v = sparc_lance_init(sdev->child,
- ledma, NULL)))
+ ledma, 0)))
return v;
continue;
}
if (strcmp(sdev->prom_name, "lebuffer") == 0){
cards++;
if ((v = sparc_lance_init(sdev->child,
- NULL, sdev)))
+ 0, sdev)))
return v;
continue;
}
{
struct net_device *dev = NULL;
struct sbus_bus *bus;
- struct sbus_dev *sdev = NULL;
+ struct sbus_dev *sdev = 0;
static int called;
int cards = 0, v;
/* restore 5701 hardware bug workaround flag */
tp->tg3_flags = flags_save;
- /* Unfortunately, we have to delay before the PCI read back.
- * Some 575X chips even will not respond to a PCI cfg access
- * when the reset command is given to the chip.
- *
- * How do these hardware designers expect things to work
- * properly if the PCI write is posted for a long period
- * of time? It is always necessary to have some method by
- * which a register read back can occur to push the write
- * out which does the reset.
- *
- * For most tg3 variants the trick below was working.
- * Ho hum...
- */
- udelay(120);
-
/* Flush PCI posted writes. The normal MMIO registers
* are inaccessible at this time so this is the only
- * way to make this reliably (actually, this is no longer
- * the case, see above). I tried to use indirect
+ * way to make this reliably. I tried to use indirect
* register read/write but this upset some 5701 variants.
*/
pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
/* Define this to enable Link beat monitoring */
#undef MONITOR
-/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+/* Turn on debugging. See linux/Documentation/networking/tlan.txt for details */
static int debug;
static int bbuf;
const char *media[] = {
"10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", NULL
+ "100baseTx-FD", "100baseT4", 0
};
int media_map[] = { 0x0020, 0x0040, 0x0080, 0x0100, 0x0200,};
config SMCTR
tristate "SMC ISA/MCA adapter support"
- depends on TR && (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
+ depends on TR && (ISA || MCA_LEGACY)
---help---
This is support for the ISA and MCA SMC Token Ring cards,
specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
}
}
/* Lite-On boards have the address byte-swapped. */
- if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
+ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0)
&& dev->dev_addr[1] == 0x00)
for (i = 0; i < 6; i+=2) {
char tmp = dev->dev_addr[i];
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
-#ifdef DEBUG
+#if DEBUG
print_binary(status);
printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
xircom_init_ring(dev);
/* Clear the tx ring */
for (i = 0; i < TX_RING_SIZE; i++) {
- tp->tx_skbuff[i] = NULL;
+ tp->tx_skbuff[i] = 0;
tp->tx_ring[i].status = 0;
}
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- tp->tx_skbuff[i] = NULL;
+ tp->tx_skbuff[i] = 0;
tp->tx_ring[i].status = 0;
tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
#ifdef CARDBUS
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_skbuff[entry]);
- tp->tx_skbuff[entry] = NULL;
+ tp->tx_skbuff[entry] = 0;
}
#ifndef final_version
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = tp->rx_skbuff[i];
- tp->rx_skbuff[i] = NULL;
+ tp->rx_skbuff[i] = 0;
tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
tp->rx_ring[i].length = 0;
tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
for (i = 0; i < TX_RING_SIZE; i++) {
if (tp->tx_skbuff[i])
dev_kfree_skb(tp->tx_skbuff[i]);
- tp->tx_skbuff[i] = NULL;
+ tp->tx_skbuff[i] = 0;
}
tp->open = 0;
if (entry != 0) {
/* Avoid a chip errata by prefixing a dummy entry. */
- tp->tx_skbuff[entry] = NULL;
+ tp->tx_skbuff[entry] = 0;
tp->tx_ring[entry].length =
(entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
tp->tx_ring[entry].buffer1 = 0;
entry = tp->cur_tx++ % TX_RING_SIZE;
}
- tp->tx_skbuff[entry] = NULL;
+ tp->tx_skbuff[entry] = 0;
/* Put the setup frame on the Tx list. */
if (entry == TX_RING_SIZE - 1)
tx_flags |= Tx1RingWrap; /* Wrap ring. */
}
#endif /* USE_MMIO */
dev->base_addr = ioaddr;
- rp = netdev_priv(dev);
- rp->quirks = quirks;
rhine_power_init(dev);
dev->irq = pdev->irq;
+ rp = netdev_priv(dev);
spin_lock_init(&rp->lock);
rp->pdev = pdev;
+ rp->quirks = quirks;
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
next += sizeof(struct rx_desc);
rp->rx_ring[i].next_desc = cpu_to_le32(next);
- rp->rx_skbuff[i] = NULL;
+ rp->rx_skbuff[i] = 0;
}
/* Mark the last entry as wrapping the ring. */
rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
- rp->rx_skbuff[i] = NULL;
+ rp->rx_skbuff[i] = 0;
}
}
rp->dirty_tx = rp->cur_tx = 0;
next = rp->tx_ring_dma;
for (i = 0; i < TX_RING_SIZE; i++) {
- rp->tx_skbuff[i] = NULL;
+ rp->tx_skbuff[i] = 0;
rp->tx_ring[i].tx_status = 0;
rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
next += sizeof(struct tx_desc);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
- rp->tx_skbuff[i] = NULL;
- rp->tx_buf[i] = NULL;
+ rp->tx_skbuff[i] = 0;
+ rp->tx_buf[i] = 0;
}
}
struct velocity_info *vptr = dev->priv;
struct mac_regs * regs = vptr->mac_regs;
unsigned long flags;
- struct mii_ioctl_data *miidata = if_mii(ifr);
+ struct mii_ioctl_data *miidata = (struct mii_ioctl_data *) &(ifr->ifr_data);
int err;
switch (cmd) {
register u32 _crc;
_crc = crc;
- __asm__ __volatile__ (
+ __asm __volatile (
"xorl %%ebx, %%ebx\n"
"movl %2, %%esi\n"
"movl %3, %%ecx\n"
config ARLAN
tristate "Aironet Arlan 655 & IC2200 DS support"
- depends on NET_RADIO && ISA && !64BIT
+ depends on NET_RADIO && ISA
---help---
Aironet makes Arlan, a class of wireless LAN adapters. These use the
www.Telxon.com chip, which is also used on several similar cards.
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on NET_RADIO && ISA && (PCI || BROKEN)
+ depends on NET_RADIO && (ISA || PCI)
---help---
This is the standard Linux driver to support Cisco/Aironet ISA and
PCI 802.11 wireless cards.
static void wifi_setup(struct net_device *dev)
{
- dev->hard_header = NULL;
- dev->rebuild_header = NULL;
- dev->hard_header_cache = NULL;
- dev->header_cache_update= NULL;
+ dev->hard_header = 0;
+ dev->rebuild_header = 0;
+ dev->hard_header_cache = 0;
+ dev->header_cache_update= 0;
dev->hard_header_parse = wll_header_parse;
dev->hard_start_xmit = &airo_start_xmit11;
}
ai = dev->priv;
- ai->wifidev = NULL;
+ ai->wifidev = 0;
ai->flags = 0;
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
printk(KERN_DEBUG "airo: Found an MPI350 card\n");
struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia )
{
- return _init_airo_card ( irq, port, is_pcmcia, NULL);
+ return _init_airo_card ( irq, port, is_pcmcia, 0);
}
EXPORT_SYMBOL(init_airo_card);
.release = proc_close
};
-static struct proc_dir_entry *airo_entry;
+static struct proc_dir_entry *airo_entry = 0;
struct proc_data {
int release_buffer;
(data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
index = data->wbuffer[0] - '0';
if (data->wbuffer[1] == '\n') {
- set_wep_key(ai, index, NULL, 0, 1, 1);
+ set_wep_key(ai, index, 0, 0, 1, 1);
return;
}
j = 2;
}
data->writelen = 0;
data->maxwritelen = 0;
- data->wbuffer = NULL;
- data->on_close = NULL;
+ data->wbuffer = 0;
+ data->on_close = 0;
if (file->f_mode & FMODE_WRITE) {
if (!(file->f_mode & FMODE_READ)) {
static struct net_device_list {
struct net_device *dev;
struct net_device_list *next;
-} *airo_devices;
+} *airo_devices = 0;
/* Since the card doesn't automatically switch to the right WEP mode,
we will make it do it. If the card isn't associated, every secs we
break;
case AUTH_SHAREDKEY:
if (apriv->keyindex < auto_wep) {
- set_wep_key(apriv, apriv->keyindex, NULL, 0, 0, 0);
+ set_wep_key(apriv, apriv->keyindex, 0, 0, 0, 0);
apriv->config.authType = AUTH_SHAREDKEY;
apriv->keyindex++;
} else {
/* Drop to ENCRYPT */
apriv->keyindex = 0;
- set_wep_key(apriv, apriv->defindex, NULL, 0, 0, 0);
+ set_wep_key(apriv, apriv->defindex, 0, 0, 0, 0);
apriv->config.authType = AUTH_ENCRYPT;
}
break;
/* Do we want to just set the transmit key index ? */
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
if ((index >= 0) && (index < ((cap_rid.softCap & 0x80)?4:1))) {
- set_wep_key(local, index, NULL, 0, 1, 1);
+ set_wep_key(local, index, 0, 0, 1, 1);
} else
/* Don't complain if only change the mode */
if(!dwrq->flags & IW_ENCODE_MODE) {
if (card->vaddr)
iounmap(card->vaddr);
- card->vaddr = NULL;
+ card->vaddr = 0;
macio_release_resource(mdev, 0);
static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
*lenp = pos;
if (!write)
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
else
{
*lenp = 0;
static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
SARLBNpln(u_char, txBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
SARLBNpln(u_char, rxBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int i;
int retv, pos, devnum;
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp);
return retv;
}
static char conf_reset_result[200];
static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int pos = 0;
int devnum = ctl->procname[6] - '0';
return -1;
*lenp = pos;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, filp, buffer, lenp);
}
static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
+ void __user *buffer, size_t * lenp)
{
int pos = 0;
int devnum = ctl->procname[5] - '0';
} else
return -1;
*lenp = pos + 3;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, filp, buffer, lenp);
}
extern int arlan_entry_debug;
extern int arlan_exit_debug;
extern int testMemory;
+extern const char* arlan_version;
extern int arlan_command(struct net_device * dev, int command);
#define SIDUNKNOWN -1
memcpy(header.addr3, priv->CurrentBSSID, 6);
if (priv->wep_is_on) {
- auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY);
+ auth.alg = C80211_MGMT_AAN_SHAREDKEY;
/* no WEP for authentication frames with TrSeqNo 1 */
if (priv->CurrentAuthentTransactionSeqNum != 1)
header.frame_ctl |= cpu_to_le16(IEEE802_11_FCTL_WEP);
} else {
- auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM);
+ auth.alg = C80211_MGMT_AAN_OPENSYSTEM;
}
auth.status = 0;
*
*/
+#define __KERNEL_SYSCALLS__
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
{
struct iw_range *range = (struct iw_range *) extra;
islpci_private *priv = netdev_priv(ndev);
- u8 *data;
+ char *data;
int i, m, rvalue;
struct obj_frequencies *freq;
union oid_res_t r;
i = 0;
while ((i < IW_MAX_BITRATES) && (*data != 0)) {
/* the result must be in bps. The card gives us 500Kbps */
- range->bitrate[i] = *data * 500000;
+ range->bitrate[i] = (__s32) (*data >> 1);
+ range->bitrate[i] *= 1000000;
i++;
data++;
}
return mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile);
}
- ret = mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r);
- if (ret) {
- kfree(r.ptr);
+ if ((ret =
+ mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r)))
return ret;
- }
rate = (u32) (vwrq->value / 500000);
data = r.ptr;
}
if (!data[i]) {
- kfree(r.ptr);
return -EINVAL;
}
vwrq->value = r.u * 500000;
/* request the device for the enabled rates */
- rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r);
- if (rvalue) {
- kfree(r.ptr);
+ if ((rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r)))
return rvalue;
- }
data = r.ptr;
vwrq->fixed = (data[0] != 0) && (data[1] == 0);
kfree(r.ptr);
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_mgmtframe *response = NULL;
- int ret = -EIO;
+ int ret = -EIO, response_op = PIMFOR_OP_ERROR;
printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid);
data->length = 0;
islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
priv->priv_oid, extra, 256,
&response);
+ response_op = response->header->operation;
printk("%s: ret: %i\n", ndev->name, ret);
+ printk("%s: response_op: %i\n", ndev->name, response_op);
if (ret || !response
|| response->header->operation == PIMFOR_OP_ERROR) {
if (response) {
priv->priv_oid, extra, data->length,
&response);
printk("%s: ret: %i\n", ndev->name, ret);
- if (ret || !response
- || response->header->operation == PIMFOR_OP_ERROR) {
- if (response) {
- islpci_mgt_release(response);
- }
- printk("%s: EIO\n", ndev->name);
- ret = -EIO;
- }
if (!ret) {
response_op = response->header->operation;
printk("%s: response_op: %i\n", ndev->name,
response_op);
islpci_mgt_release(response);
}
+ if (ret || response_op == PIMFOR_OP_ERROR) {
+ printk("%s: EIO\n", ndev->name);
+ ret = -EIO;
+ }
}
return (ret ? ret : -EINPROGRESS);
#include "oid_mgt.h"
#define ISL3877_IMAGE_FILE "isl3877"
-#define ISL3886_IMAGE_FILE "isl3886"
#define ISL3890_IMAGE_FILE "isl3890"
static int prism54_bring_down(islpci_private *);
mdelay(50);
{
- const struct firmware *fw_entry = NULL;
+ const struct firmware *fw_entry = 0;
long fw_len;
const u32 *fw_ptr;
void *device = priv->device_base;
int powerstate = ISL38XX_PSM_POWERSAVE_STATE;
- /* lock the interrupt handler */
- spin_lock(&priv->slock);
-
/* received an interrupt request on a shared IRQ line
* first check whether the device is in sleep mode */
reg = readl(device + ISL38XX_CTRL_STAT_REG);
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
#endif
- spin_unlock(&priv->slock);
return IRQ_NONE;
}
+ if (islpci_get_state(priv) != PRV_STATE_SLEEP)
+ powerstate = ISL38XX_PSM_ACTIVE_STATE;
+
+ /* lock the interrupt handler */
+ spin_lock(&priv->slock);
/* check whether there is any source of interrupt on the device */
reg = readl(device + ISL38XX_INT_IDENT_REG);
reg &= ISL38XX_INT_SOURCES;
if (reg != 0) {
- if (islpci_get_state(priv) != PRV_STATE_SLEEP)
- powerstate = ISL38XX_PSM_ACTIVE_STATE;
-
/* reset the request bits in the Identification register */
isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG);
isl38xx_handle_wakeup(priv->control_block,
&powerstate, priv->device_base);
}
- } else {
-#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n");
-#endif
- spin_unlock(&priv->slock);
- return IRQ_NONE;
}
/* sleep -> ready */
if (priv->device_base)
iounmap(priv->device_base);
- priv->device_base = NULL;
+ priv->device_base = 0;
/* free consistent DMA area... */
if (priv->driver_mem_address)
priv->device_host_address);
/* clear some dangling pointers */
- priv->driver_mem_address = NULL;
+ priv->driver_mem_address = 0;
priv->device_host_address = 0;
priv->device_psm_buffer = 0;
- priv->control_block = NULL;
+ priv->control_block = 0;
/* clean up mgmt rx buffers */
for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
if (priv->data_low_rx[counter])
dev_kfree_skb(priv->data_low_rx[counter]);
- priv->data_low_rx[counter] = NULL;
+ priv->data_low_rx[counter] = 0;
}
/* Free the acces control list and the WPA list */
/* select the firmware file depending on the device id */
switch (pdev->device) {
- case 0x3877:
- strcpy(priv->firmware, ISL3877_IMAGE_FILE);
+ case PCIDEVICE_ISL3890:
+ case PCIDEVICE_3COM6001:
+ strcpy(priv->firmware, ISL3890_IMAGE_FILE);
break;
-
- case 0x3886:
- strcpy(priv->firmware, ISL3886_IMAGE_FILE);
+ case PCIDEVICE_ISL3877:
+ strcpy(priv->firmware, ISL3877_IMAGE_FILE);
break;
default:
do_islpci_free_memory:
islpci_free_memory(priv);
do_free_netdev:
- pci_set_drvdata(pdev, NULL);
+ pci_set_drvdata(pdev, 0);
free_netdev(ndev);
- priv = NULL;
+ priv = 0;
return NULL;
}
MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
MODULE_LICENSE("GPL");
-static int init_pcitm = 0;
-module_param(init_pcitm, int, 0);
-
/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
static const struct pci_device_id prism54_id_tbl[] = {
- /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
+ /* 3COM 3CRWE154G72 Wireless LAN adapter */
{
- 0x1260, 0x3890,
- PCI_ANY_ID, PCI_ANY_ID,
+ PCIVENDOR_3COM, PCIDEVICE_3COM6001,
+ PCIVENDOR_3COM, PCIDEVICE_3COM6001,
0, 0, 0
},
- /* 3COM 3CRWE154G72 Wireless LAN adapter */
+ /* D-Link Air Plus Xtreme G A1 - DWL-g650 A1 */
{
- 0x10b7, 0x6001,
- PCI_ANY_ID, PCI_ANY_ID,
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_DLINK, 0x3202UL,
+ 0, 0, 0
+ },
+
+ /* I-O Data WN-G54/CB - WN-G54/CB */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_IODATA, 0xd019UL,
+ 0, 0, 0
+ },
+
+ /* Netgear WG511 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_NETGEAR, 0x4800UL,
+ 0, 0, 0
+ },
+
+ /* Tekram Technology clones, Allnet, Netcomm, Zyxel */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_TTL, 0x1605UL,
+ 0, 0, 0
+ },
+
+ /* SMC2802W */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_SMC, 0x2802UL,
+ 0, 0, 0
+ },
+
+ /* SMC2835W */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_SMC, 0x2835UL,
+ 0, 0, 0
+ },
+
+ /* Corega CG-WLCB54GT */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_ATI, 0xc104UL,
+ 0, 0, 0
+ },
+
+ /* I4 Z-Com XG-600 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_I4, 0x0014UL,
+ 0, 0, 0
+ },
+
+ /* I4 Z-Com XG-900 and clones Macer, Ovislink, Planex, Peabird, */
+ /* Sitecom, Xterasys */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_I4, 0x0020UL,
+ 0, 0, 0
+ },
+
+ /* SMC 2802W V2 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_ACCTON, 0xee03UL,
+ 0, 0, 0
+ },
+
+ /* SMC 2835W V2 */
+ {
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
+ PCIVENDOR_SMC, 0xa835UL,
0, 0, 0
},
/* Intersil PRISM Indigo Wireless LAN adapter */
{
- 0x1260, 0x3877,
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3877,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0
},
- /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
+ /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
+ /* Default */
{
- 0x1260, 0x3886,
+ PCIVENDOR_INTERSIL, PCIDEVICE_ISL3890,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0
},
/* .enable_wake ; we don't support this yet */
};
+static void
+prism54_get_card_model(struct net_device *ndev)
+{
+ islpci_private *priv;
+ char *modelp;
+ int notwork = 0;
+
+ priv = netdev_priv(ndev);
+ switch (priv->pdev->subsystem_device) {
+ case PCIDEVICE_ISL3877:
+ modelp = "PRISM Indigo";
+ break;
+ case PCIDEVICE_ISL3886:
+ modelp = "PRISM Javelin / Xbow";
+ break;
+ case PCIDEVICE_3COM6001:
+ modelp = "3COM 3CRWE154G72";
+ break;
+ case 0x3202UL:
+ modelp = "D-Link DWL-g650 A1";
+ break;
+ case 0xd019UL:
+ modelp = "WN-G54/CB";
+ break;
+ case 0x4800UL:
+ modelp = "Netgear WG511";
+ break;
+ case 0x2802UL:
+ modelp = "SMC2802W";
+ break;
+ case 0xee03UL:
+ modelp = "SMC2802W V2";
+ notwork = 1;
+ break;
+ case 0x2835UL:
+ modelp = "SMC2835W";
+ break;
+ case 0xa835UL:
+ modelp = "SMC2835W V2";
+ notwork = 1;
+ break;
+ case 0xc104UL:
+ modelp = "CG-WLCB54GT";
+ break;
+ case 0x1605UL:
+ modelp = "Tekram Technology clone";
+ break;
+ /* Let's leave this one out for now since it seems bogus/wrong
+ * Even if the manufacturer did use 0x0000UL it may not be correct
+ * by their part, therefore deserving no name ;) */
+ /* case 0x0000UL:
+ * modelp = "SparkLAN WL-850F";
+ * break;*/
+
+ /* We have two reported for the one below :( */
+ case 0x0014UL:
+ modelp = "I4 Z-Com XG-600 and clones";
+ break;
+ case 0x0020UL:
+ modelp = "I4 Z-Com XG-900 and clones";
+ break;
+/* Default it */
+/*
+ case PCIDEVICE_ISL3890:
+ modelp = "PRISM Duette/GT";
+ break;
+*/
+ default:
+ modelp = "PRISM Duette/GT";
+ }
+ printk(KERN_DEBUG "%s: %s driver detected card model: %s\n",
+ ndev->name, DRV_NAME, modelp);
+ if ( notwork ) {
+ printk(KERN_DEBUG "%s: %s Warning - This may not work\n",
+ ndev->name, DRV_NAME);
+ }
+ return;
+}
+
/******************************************************************************
Module initialization functions
******************************************************************************/
*
* Writing zero to both these two registers will disable both timeouts and
* *can* solve problems caused by devices that are slow to respond.
- * Make this configurable - MSW
*/
- if ( init_pcitm >= 0 ) {
- pci_write_config_byte(pdev, 0x40, (u8)init_pcitm);
- pci_write_config_byte(pdev, 0x41, (u8)init_pcitm);
- } else {
- printk(KERN_INFO "PCI TRDY/RETRY unchanged\n");
- }
+ /* I am taking these out, we should not be poking around in the
+ * programmable timers - MSW
+ */
+/* Do not zero the programmable timers
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x41, 0);
+*/
/* request the pci device I/O regions */
rvalue = pci_request_regions(pdev, DRV_NAME);
/* firmware upload is triggered in islpci_open */
+ /* Pretty card model discovery output */
+ prism54_get_card_model(ndev);
+
return 0;
do_unregister_netdev:
unregister_netdev(ndev);
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
+ pci_set_drvdata(pdev, 0);
free_netdev(ndev);
- priv = NULL;
+ priv = 0;
do_pci_release_regions:
pci_release_regions(pdev);
do_pci_disable_device:
prism54_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
BUG_ON(!priv);
if (!__in_cleanup_module) {
/* free the PCI memory and unmap the remapped page */
islpci_free_memory(priv);
- pci_set_drvdata(pdev, NULL);
+ pci_set_drvdata(pdev, 0);
free_netdev(ndev);
- priv = NULL;
+ priv = 0;
pci_release_regions(pdev);
prism54_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got suspend request (state %d)\n",
prism54_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
int err;
DEFINE_WAIT(wait);
- *recvframe = NULL;
-
if (down_interruptible(&priv->mgmt_sem))
return -ERESTARTSYS;
/* General driver definitions */
+#define PCIVENDOR_INTERSIL 0x1260UL
+#define PCIVENDOR_3COM 0x10b7UL
+#define PCIVENDOR_DLINK 0x1186UL
+#define PCIVENDOR_I4 0x17cfUL
+#define PCIVENDOR_IODATA 0x10fcUL
+#define PCIVENDOR_NETGEAR 0x1385UL
+#define PCIVENDOR_SMC 0x10b8UL
+#define PCIVENDOR_ACCTON 0x1113UL
+#define PCIVENDOR_ATI 0x1259UL
+#define PCIVENDOR_TTL 0x16a5UL
+
+#define PCIDEVICE_ISL3877 0x3877UL
+#define PCIDEVICE_ISL3886 0x3886UL
+#define PCIDEVICE_ISL3890 0x3890UL
+#define PCIDEVICE_3COM6001 0x6001UL
#define PCIDEVICE_LATENCY_TIMER_MIN 0x40
#define PCIDEVICE_LATENCY_TIMER_VAL 0x50
OID_UNKNOWN(OID_INL_MEMORY, 0xFF020002),
OID_U32_C(OID_INL_MODE, 0xFF020003),
OID_UNKNOWN(OID_INL_COMPONENT_NR, 0xFF020004),
- OID_STRUCT(OID_INL_VERSION, 0xFF020005, u8[8], OID_TYPE_RAW),
+ OID_UNKNOWN(OID_INL_VERSION, 0xFF020005),
OID_UNKNOWN(OID_INL_INTERFACE_ID, 0xFF020006),
OID_UNKNOWN(OID_INL_COMPONENT_ID, 0xFF020007),
OID_U32_C(OID_INL_CONFIG, 0xFF020008),
mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data)
{
int ret = 0;
- struct islpci_mgmtframe *response = NULL;
+ struct islpci_mgmtframe *response;
int response_op = PIMFOR_OP_ERROR;
int dlen;
void *cache, *_data = data;
BUG_ON(OID_NUM_LAST <= n);
BUG_ON(extra > isl_oid[n].range);
- res->ptr = NULL;
-
if (!priv->mib)
/* memory has been freed */
return -1;
DOT11_OID_DEFKEYID,
DOT11_OID_DOT1XENABLE,
OID_INL_DOT11D_CONFORMANCE,
- /* Do not initialize this - fw < 1.0.4.3 rejects it
OID_INL_OUTPUTPOWER,
- */
};
/* update the MAC addr. */
static int
mgt_update_addr(islpci_private *priv)
{
- struct islpci_mgmtframe *res = NULL;
+ struct islpci_mgmtframe *res;
int ret;
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
- {NULL,},
+ {0,},
};
static struct pci_device_id yellowfin_pci_tbl[] = {
#ifdef NO_TXSTATS
/* In this mode the Tx ring needs only a single descriptor. */
for (i = 0; i < TX_RING_SIZE; i++) {
- yp->tx_skbuff[i] = NULL;
+ yp->tx_skbuff[i] = 0;
yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- yp->tx_skbuff[entry] = NULL;
+ yp->tx_skbuff[entry] = 0;
}
if (yp->tx_full
&& yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
if (yp->rx_skbuff[i]) {
dev_kfree_skb(yp->rx_skbuff[i]);
}
- yp->rx_skbuff[i] = NULL;
+ yp->rx_skbuff[i] = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (yp->tx_skbuff[i])
dev_kfree_skb(yp->tx_skbuff[i]);
- yp->tx_skbuff[i] = NULL;
+ yp->tx_skbuff[i] = 0;
}
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
{
- return simple_read_from_buffer(buf, count, offset, str, strlen(str));
+ size_t len = strlen(str);
+
+ if (!count)
+ return 0;
+
+ if (*offset > len)
+ return 0;
+
+ if (count > len - *offset)
+ count = len - *offset;
+
+ if (copy_to_user(buf, str + *offset, count))
+ return -EFAULT;
+
+ *offset += count;
+
+ return count;
}
ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
{
char tmpbuf[TMPBUFSIZE];
- size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+ size_t maxlen;
+
+ if (!count)
+ return 0;
+
+ spin_lock(&oprofilefs_lock);
+ maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+ spin_unlock(&oprofilefs_lock);
if (maxlen > TMPBUFSIZE)
maxlen = TMPBUFSIZE;
- return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+
+ if (*offset > maxlen)
+ return 0;
+
+ if (count > maxlen - *offset)
+ count = maxlen - *offset;
+
+ if (copy_to_user(buf, tmpbuf + *offset, count))
+ return -EFAULT;
+
+ *offset += count;
+
+ return count;
}
struct dino_device *dino_dev; // Dino specific control struct
const char *version = "unknown";
const int name_len = 32;
- char hw_path[64];
char *name;
int is_cujo = 0;
struct pci_bus *bus;
-
+
name = kmalloc(name_len, GFP_KERNEL);
- if(name) {
- print_pa_hwpath(dev, hw_path);
- snprintf(name, name_len, "Dino [%s]", hw_path);
- }
+ if(name)
+ snprintf(name, name_len, "Dino %s", dev->dev.bus_id);
else
name = "Dino";
sg_dma_len(startsg) = 0;
dma_offset = (unsigned long) pide & ~IOVP_MASK;
n_mappings++;
-#if defined(ZX1_SUPPORT)
- /* Pluto IOMMU IO Virt Address is not zero based */
- sg_dma_address(dma_sg) = pide | ioc->ibase;
-#else
- /* SBA, ccio, and dino are zero based.
- * Trying to save a few CPU cycles for most users.
- */
sg_dma_address(dma_sg) = pide;
-#endif
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
prefetchw(pdirp);
}
#include <asm/byteorder.h> /* get in-line asm for swab */
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/page.h>
#include <asm/segment.h>
#include <asm/system.h>
-#include <asm/io.h> /* read/write functions */
+#include <asm/io.h> /* gsc_read/write functions */
#ifdef CONFIG_SUPERIO
#include <asm/superio.h>
#endif
#endif
-#define IOSAPIC_REG_SELECT 0x00
+#define READ_U8(addr) gsc_readb(addr)
+#define READ_U16(addr) le16_to_cpu(gsc_readw((u16 *) (addr)))
+#define READ_U32(addr) le32_to_cpu(gsc_readl((u32 *) (addr)))
+#define READ_REG16(addr) gsc_readw((u16 *) (addr))
+#define READ_REG32(addr) gsc_readl((u32 *) (addr))
+#define WRITE_U8(value, addr) gsc_writeb(value, addr)
+#define WRITE_U16(value, addr) gsc_writew(cpu_to_le16(value), (u16 *) (addr))
+#define WRITE_U32(value, addr) gsc_writel(cpu_to_le32(value), (u32 *) (addr))
+#define WRITE_REG16(value, addr) gsc_writew(value, (u16 *) (addr))
+#define WRITE_REG32(value, addr) gsc_writel(value, (u32 *) (addr))
+
+
+#define IOSAPIC_REG_SELECT 0
#define IOSAPIC_REG_WINDOW 0x10
#define IOSAPIC_REG_EOI 0x40
#define IOSAPIC_IRDT_ENTRY(idx) (0x10+(idx)*2)
#define IOSAPIC_IRDT_ENTRY_HI(idx) (0x11+(idx)*2)
-static inline unsigned int iosapic_read(unsigned long iosapic, unsigned int reg)
-{
- writel(reg, iosapic + IOSAPIC_REG_SELECT);
- return readl(iosapic + IOSAPIC_REG_WINDOW);
-}
-
-static inline void iosapic_write(unsigned long iosapic, unsigned int reg, u32 val)
-{
- writel(reg, iosapic + IOSAPIC_REG_SELECT);
- writel(val, iosapic + IOSAPIC_REG_WINDOW);
-}
-
/*
+** FIXME: revisit which GFP flags we should really be using.
** GFP_KERNEL includes __GFP_WAIT flag and that may not
** be acceptable. Since this is boot time, we shouldn't have
** to wait ever and this code should (will?) never get called
#define IOSAPIC_UNLOCK(lck) spin_unlock_irqrestore(lck, irqflags)
-#define IOSAPIC_VERSION_MASK 0x000000ff
-#define IOSAPIC_VERSION(ver) ((int) (ver & IOSAPIC_VERSION_MASK))
+#define IOSAPIC_VERSION_MASK 0x000000ff
+#define IOSAPIC_VERSION_SHIFT 0x0
+#define IOSAPIC_VERSION(ver) \
+ (int) ((ver & IOSAPIC_VERSION_MASK) >> IOSAPIC_VERSION_SHIFT)
#define IOSAPIC_MAX_ENTRY_MASK 0x00ff0000
+
#define IOSAPIC_MAX_ENTRY_SHIFT 0x10
-#define IOSAPIC_IRDT_MAX_ENTRY(ver) \
- (int) (((ver) & IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT)
+#define IOSAPIC_IRDT_MAX_ENTRY(ver) \
+ (int) ((ver&IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT)
/* bits in the "low" I/O Sapic IRdT entry */
#define IOSAPIC_IRDT_ENABLE 0x10000
#define IOSAPIC_IRDT_ID_EID_SHIFT 0x10
+
+#define IOSAPIC_EOI(eoi_addr, eoi_data) gsc_writel(eoi_data, eoi_addr)
+
static struct iosapic_info *iosapic_list;
static spinlock_t iosapic_lock;
static int iosapic_count;
struct irt_entry *p = table;
int i;
- printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
- printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
+ printk(KERN_DEBUG MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
+ printk(KERN_DEBUG MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
table,
num_entries,
(int) sizeof(struct irt_entry));
for (i = 0 ; i < num_entries ; i++, p++) {
- printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
+ printk(KERN_DEBUG MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
p->entry_type, p->entry_length, p->interrupt_type,
p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id,
p->src_seg_id, p->dest_iosapic_intin,
static irqreturn_t
iosapic_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
- struct vector_info *vi = (struct vector_info *) dev_id;
+ struct vector_info *vi = (struct vector_info *)dev_id;
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
int irq_num = vi->iosapic->isi_region->data.irqbase + vi->irqline;
- DBG("iosapic_interrupt(): irq %d line %d eoi 0x%p 0x%x\n",
- irq, vi->irqline, vi->eoi_addr, vi->eoi_data);
-
- /* Do NOT need to mask/unmask IRQ. processor is already masked. */
+ DBG("iosapic_interrupt(): irq %d line %d eoi %p\n",
+ irq, vi->irqline, vi->eoi_addr);
+/* FIXME: Need to mask/unmask? processor IRQ is already masked... */
do_irq(&vi->iosapic->isi_region->action[vi->irqline], irq_num, regs);
/*
- ** PARISC only supports PCI devices below I/O SAPIC.
** PCI only supports level triggered in order to share IRQ lines.
- ** ergo I/O SAPIC must always issue EOI on parisc.
- **
- ** i386/ia64 support ISA devices and have to deal with
- ** edge-triggered interrupts too.
+ ** I/O SAPIC must always issue EOI.
*/
- __raw_writel(vi->eoi_data, vi->eoi_addr);
+ IOSAPIC_EOI(vi->eoi_addr, vi->eoi_data);
+
return IRQ_HANDLED;
}
ASSERT(tmp == 0);
vi->eoi_addr = (u32 *) (isi->isi_hpa + IOSAPIC_REG_EOI);
- vi->eoi_data = cpu_to_le32(vi->txn_data);
+ vi->eoi_data = cpu_to_le32(vi->irqline);
+
ASSERT(NULL != isi->isi_region);
DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n",
struct iosapic_info *isp = vi->iosapic;
u8 idx = vi->irqline;
- *dp0 = iosapic_read(isp->isi_hpa, IOSAPIC_IRDT_ENTRY(idx));
- *dp1 = iosapic_read(isp->isi_hpa, IOSAPIC_IRDT_ENTRY_HI(idx));
+ /* point the window register to the lower word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY(idx), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ *dp0 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
+
+ /* point the window register to the higher word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY_HI(idx), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ *dp1 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
}
ASSERT(NULL != isp);
ASSERT(0 != isp->isi_hpa);
- DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %p 0x%x 0x%x\n",
+ DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %p WINDOW %p 0x%x 0x%x\n",
vi->irqline,
- isp->isi_hpa,
+ isp->isi_hpa, isp->isi_hpa+IOSAPIC_REG_WINDOW,
dp0, dp1);
- iosapic_write(isp->isi_hpa, IOSAPIC_IRDT_ENTRY(vi->irqline), dp0);
+ /* point the window register to the lower word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY(vi->irqline), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ WRITE_U32( dp0, isp->isi_hpa+IOSAPIC_REG_WINDOW);
/* Read the window register to flush the writes down to HW */
- dp0 = readl(isp->isi_hpa+IOSAPIC_REG_WINDOW);
+ dp0 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
- iosapic_write(isp->isi_hpa, IOSAPIC_IRDT_ENTRY_HI(vi->irqline), dp1);
+ /* point the window register to the higher word */
+ WRITE_U32(IOSAPIC_IRDT_ENTRY_HI(vi->irqline), isp->isi_hpa+IOSAPIC_REG_SELECT);
+ WRITE_U32( dp1, isp->isi_hpa+IOSAPIC_REG_WINDOW);
/* Read the window register to flush the writes down to HW */
- dp1 = readl(isp->isi_hpa+IOSAPIC_REG_WINDOW);
+ dp1 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
}
iosapic_set_irt_data(vi, &d0, &d1);
iosapic_wr_irt_entry(vi, d0, d1);
+
#ifdef DEBUG_IOSAPIC_IRT
{
u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL);
printk("iosapic_enable_irq(): regs %p", vi->eoi_addr);
- for ( ; t < vi->eoi_addr; t++)
- printk(" %x", readl(t));
+ while (t < vi->eoi_addr) printk(" %x", READ_U32(t++));
printk("\n");
}
struct iosapic_info *isp = vi->iosapic;
for (d0=0x10; d0<0x1e; d0++) {
- d1 = iosapic_read(isp->isi_hpa, d0);
+ /* point the window register to the lower word */
+ WRITE_U32(d0, isp->isi_hpa+IOSAPIC_REG_SELECT);
+
+ /* read the word */
+ d1 = READ_U32(isp->isi_hpa+IOSAPIC_REG_WINDOW);
printk(" %x", d1);
}
}
#endif
/*
- ** Issueing I/O SAPIC an EOI causes an interrupt IFF IRQ line is
- ** asserted. IRQ generally should not be asserted when a driver
- ** enables their IRQ. It can lead to "interesting" race conditions
- ** in the driver initialization sequence.
+ ** KLUGE: IRQ should not be asserted when Drivers enabling their IRQ.
+ ** PCI supports level triggered in order to share IRQ lines.
+ **
+ ** Issueing I/O SAPIC an EOI causes an interrupt iff IRQ line is
+ ** asserted.
*/
- __raw_writel(vi->eoi_data, vi->eoi_addr);
+ IOSAPIC_EOI(vi->eoi_addr, vi->eoi_data);
}
ASSERT(isi);
ASSERT(isi->isi_hpa);
- return iosapic_read(isi->isi_hpa, IOSAPIC_REG_VERSION);
+ /* point window to the version register */
+ WRITE_U32(IOSAPIC_REG_VERSION, isi->isi_hpa+IOSAPIC_REG_SELECT);
+
+ /* now read the version register */
+ return (READ_U32(isi->isi_hpa+IOSAPIC_REG_WINDOW));
}
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/init.h> /* for __init and __devinit */
+/* #define PCI_DEBUG enable ASSERT */
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/irq.h> /* for struct irq_region support */
#include <asm/pdc.h>
-#include <asm/pdcpat.h>
#include <asm/page.h>
#include <asm/segment.h>
#include <asm/system.h>
#define DBG_PAT(x...)
#endif
-#ifdef DEBUG_LBA
-#undef ASSERT
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
/*
** Config accessor functions only pass in the 8-bit bus number and not
** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus
#define LBA_HINT_CFG 0x0310
#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
-#define LBA_BUS_MODE 0x0620
-
/* ERROR regs are needed for config cycle kluges */
#define LBA_ERROR_CONFIG 0x0680
#define LBA_SMART_MODE 0x20
#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
/* non-postable I/O port space, densely packed */
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
#define LBA_ASTRO_PORT_BASE (0xfffffffffee00000UL)
#else
#define LBA_ASTRO_PORT_BASE (0xfee00000UL)
#endif
-#define ELROY_HVERS 0x782
-#define MERCURY_HVERS 0x783
-#define QUICKSILVER_HVERS 0x784
-
-static inline int IS_ELROY(struct parisc_device *d)
-{
- return (d->id.hversion == ELROY_HVERS);
-}
-
-static inline int IS_MERCURY(struct parisc_device *d)
-{
- return (d->id.hversion == MERCURY_HVERS);
-}
-
-static inline int IS_QUICKSILVER(struct parisc_device *d)
-{
- return (d->id.hversion == QUICKSILVER_HVERS);
-}
-
/*
** lba_device: Per instance Elroy data structure
spinlock_t lba_lock;
void *iosapic_obj;
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
unsigned long iop_base; /* PA_VIEW - for IO port accessor funcs */
#endif
{
u8 first_bus = d->hba.hba_bus->secondary;
u8 last_sub_bus = d->hba.hba_bus->subordinate;
+#if 0
+/* FIXME - see below in this function */
+ u8 dev = PCI_SLOT(dfn);
+ u8 func = PCI_FUNC(dfn);
+#endif
ASSERT(bus >= first_bus);
ASSERT(bus <= last_sub_bus);
return(FALSE);
}
+#if 0
+/*
+** FIXME: Need to implement code to fill the devices bitmap based
+** on contents of the local pci_bus tree "data base".
+** pci_register_ops() walks the bus for us and builds the tree.
+** For now, always do the config cycle.
+*/
+ bus -= first_bus;
+
+ return (((d->devices[bus][dev]) >> func) & 0x1);
+#else
return TRUE;
+#endif
}
return(data);
}
-#ifdef CONFIG_PARISC64
-#define pat_cfg_addr(bus, devfn, addr) (((bus) << 16) | ((devfn) << 8) | (addr))
-
-static int pat_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
-{
- int tok = pat_cfg_addr(bus->number, devfn, pos);
- u32 tmp;
- int ret = pdc_pat_io_pci_cfg_read(tok, size, &tmp);
-
- DBG_CFG("%s(%d:%d.%d+0x%02x) -> 0x%x %d\n", __FUNCTION__, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), pos, tmp, ret);
-
- switch (size) {
- case 1: *data = (u8) tmp; return (tmp == (u8) ~0);
- case 2: *data = (u16) tmp; return (tmp == (u16) ~0);
- case 4: *data = (u32) tmp; return (tmp == (u32) ~0);
- }
- *data = ~0;
- return (ret);
-}
-
-static int pat_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
-{
- int tok = pat_cfg_addr(bus->number, devfn, pos);
- int ret = pdc_pat_io_pci_cfg_write(tok, size, data);
-
- DBG_CFG("%s(%d:%d.%d+0x%02x, 0x%lx/%d)\n", __FUNCTION__, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), pos, data, size);
- return (ret);
-}
-
-static struct pci_ops pat_cfg_ops = {
- .read = pat_cfg_read,
- .write = pat_cfg_write,
-};
-#else
-/* keep the compiler from complaining about undeclared variables */
-#define pat_cfg_ops lba_cfg_ops
-#endif
static int lba_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
{
}
DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
-
/* Basic Algorithm */
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
switch(size) {
}
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
/*
** Determine if a device is already configured.
}
}
}
-#else
-#define lba_claim_dev_resources(dev)
#endif
lba_dump_res(&iomem_resource, 2);
}
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
if (ldev->hba.gmmio_space.flags) {
err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
if (err < 0) {
bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
#endif
+#ifdef __LP64__
if (is_pdc_pat()) {
/* Claim resources for PDC's devices */
lba_claim_dev_resources(dev);
}
+#endif
/*
** P2PB's have no IRQs. ignore them.
};
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
#define PIOP_TO_GMMIO(lba, addr) \
((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
}
}
}
-#else
-/* keep compiler from complaining about missing declarations */
-#define lba_pat_port_ops lba_astro_port_ops
-#define lba_pat_resources(pa_dev, lba_dev)
-#endif /* CONFIG_PARISC64 */
+#endif /* __LP64__ */
static void
unsigned long rsize;
int lba_num;
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
/*
** Sign extend all BAR values on "legacy" platforms.
** "Sprockets" PDC (Forte/Allegro) initializes everything
printk("\n");
#endif /* DEBUG_LBA_PAT */
-#ifdef CONFIG_PARISC64
+#ifdef __LP64__
/*
* FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support
* Only N-Class and up can really make use of Get slot status.
** have work to do.
*/
static int __init
-lba_driver_probe(struct parisc_device *dev)
+lba_driver_callback(struct parisc_device *dev)
{
struct lba_device *lba_dev;
struct pci_bus *lba_bus;
/* Read HW Rev First */
func_class = READ_REG32(dev->hpa + LBA_FCLASS);
-
- if (IS_ELROY(dev)) {
- func_class &= 0xf;
- switch (func_class) {
- case 0: version = "TR1.0"; break;
- case 1: version = "TR2.0"; break;
- case 2: version = "TR2.1"; break;
- case 3: version = "TR2.2"; break;
- case 4: version = "TR3.0"; break;
- case 5: version = "TR4.0"; break;
- default: version = "TR4+";
- }
- printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
- MODULE_NAME, version, func_class & 0xf, dev->hpa);
-
- /* Just in case we find some prototypes... */
- } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
- func_class &= 0xff;
- version = kmalloc(6, GFP_KERNEL);
- sprintf(version,"TR%d.%d",(func_class >> 4),(func_class & 0xf));
- /* We could use one printk for both and have it outside,
- * but for the mask for func_class.
- */
- printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
- MODULE_NAME, version, func_class & 0xff, dev->hpa);
+ func_class &= 0xf;
+
+ switch (func_class) {
+ case 0: version = "TR1.0"; break;
+ case 1: version = "TR2.0"; break;
+ case 2: version = "TR2.1"; break;
+ case 3: version = "TR2.2"; break;
+ case 4: version = "TR3.0"; break;
+ case 5: version = "TR4.0"; break;
+ default: version = "TR4+";
}
+ printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
+ MODULE_NAME, version, func_class & 0xf, dev->hpa);
+
+ /* Just in case we find some prototypes... */
if (func_class < 2) {
- printk(KERN_WARNING "Can't support LBA older than TR2.1"
- " - continuing under adversity.\n");
+ printk(KERN_WARNING "Can't support LBA older than TR2.1 "
+ "- continuing under adversity.\n");
}
/*
/* ---------- Third : setup I/O Port and MMIO resources --------- */
+#ifdef __LP64__
if (is_pdc_pat()) {
/* PDC PAT firmware uses PIOP region of GMMIO space. */
pci_port = &lba_pat_port_ops;
+
/* Go ask PDC PAT what resources this LBA has */
lba_pat_resources(dev, lba_dev);
- } else {
+ } else
+#endif
+ {
/* Sprockets PDC uses NPIOP region */
pci_port = &lba_astro_port_ops;
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
- is_pdc_pat() ? &pat_cfg_ops : &lba_cfg_ops,
- NULL);
+ &lba_cfg_ops, NULL);
+#ifdef __LP64__
if (is_pdc_pat()) {
/* assign resources to un-initialized devices */
DBG_PAT("LBA pci_bus_assign_resources()\n");
lba_dump_res(&lba_dev->hba.lmmio_space, 2);
#endif
}
+#endif
/*
** Once PCI register ops has walked the bus, access to config
}
static struct parisc_device_id lba_tbl[] = {
- { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
- { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
- { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
+ { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x782, 0xa },
{ 0, }
};
static struct parisc_driver lba_driver = {
.name = MODULE_NAME,
.id_table = lba_tbl,
- .probe = lba_driver_probe,
+ .probe = lba_driver_callback,
};
/*
static int led_proc_write(struct file *file, const char *buf,
unsigned long count, void *data)
{
- char *cur, lbuf[count + 1];
+ char *cur, lbuf[count];
int d;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- memset(lbuf, 0, count + 1);
+ memset(lbuf, 0, count);
if (copy_from_user(lbuf, buf, count))
return -EFAULT;
break;
case LED_HASLCD:
- if (*cur && cur[strlen(cur)-1] == '\n')
+ while (*cur && cur[strlen(cur)-1] == '\n')
cur[strlen(cur)-1] = 0;
if (*cur == 0)
cur = lcd_text_default;
#include <linux/mm.h>
#include <linux/string.h>
+#undef PCI_DEBUG /* for ASSERT */
#include <linux/pci.h>
+#undef PCI_DEBUG
#include <asm/byteorder.h>
#include <asm/io.h>
#include <linux/proc_fs.h>
#include <asm/runway.h> /* for proc_runway_root */
#include <asm/pdc.h> /* for PDC_MODEL_* */
-#include <asm/pdcpat.h> /* for is_pdc_pat() */
#include <asm/parisc-device.h>
-
-/* declared in arch/parisc/kernel/setup.c */
-extern struct proc_dir_entry * proc_mckinley_root;
-
#define MODULE_NAME "SBA"
#ifdef CONFIG_PROC_FS
** Don't even think about messing with it unless you have
** plenty of 710's to sacrifice to the computer gods. :^)
*/
-#undef DEBUG_SBA_ASSERT
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
#undef DEBUG_SBA_RUN_SG
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_DMB_TRAP
+#define SBA_INLINE __inline__
+
#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
-#ifdef DEBUG_SBA_ASSERT
-#undef ASSERT
-#define ASSERT(expr) \
- if(!(expr)) { \
- printk("\n%s:%d: Assertion " #expr " failed!\n", \
- __FILE__, __LINE__); \
- panic(#expr); \
- }
-#else
-#define ASSERT(expr)
-#endif
-
-
-#if defined(__LP64__) && !defined(CONFIG_PDC_NARROW)
-/* "low end" PA8800 machines use ZX1 chipset */
-#define ZX1_SUPPORT
-#endif
-
-#define SBA_INLINE __inline__
-
-
/*
** The number of pdir entries to "free" before issueing
** a read to PCOM register to flush out PCOM writes.
#define REOG_MERCED_PORT 0x805
#define REOG_ROPES_PORT 0x783
-#define PLUTO_MCKINLEY_PORT 0x880
-#define PLUTO_ROPES_PORT 0x784
-
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define IS_IKE(id) \
(((id)->hversion == IKE_MERCED_PORT) || ((id)->hversion == IKE_ROPES_PORT))
-#define IS_PLUTO(id) \
-(((id)->hversion == PLUTO_MCKINLEY_PORT) || ((id)->hversion == PLUTO_ROPES_PORT))
-
#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define ASTRO_IOC_OFFSET 0x20000
/* Ike's IOC's occupy functions 2 and 3 (not 0 and 1) */
#define IKE_IOC_OFFSET(p) ((p+2)*SBA_FUNC_SIZE)
-#define PLUTO_IOC_OFFSET 0x1000
-
#define IOC_CTRL 0x8 /* IOC_CTRL offset */
#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_RM (1 << 8) /* Real Mode */
#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
-#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
+#define MAX_IOC 2 /* per Ike. Astro only has 1 */
/*
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
-/* AGP GART driver looks for this */
-#define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
-
+#define IOC_IOVA_SPACE_BASE 0 /* IOVA ranges start at 0 */
/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** page since the Virtual Coherence Index has to be generated
** and updated for each page.
**
-** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
+** IOVP_SIZE could only be greater than PAGE_SIZE if we are
+** confident the drivers really only touch the next physical
+** page iff that driver instance owns it.
*/
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
unsigned long ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
- unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
- unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
-#ifdef ZX1_SUPPORT
- unsigned long iovp_mask; /* help convert IOVA to IOVP */
-#endif
+
unsigned long *res_hint; /* next avail IOVP - circular search */
spinlock_t res_lock;
+ unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int res_bitshift; /* from the LEFT! */
unsigned int res_size; /* size of resource map in bytes */
-#if SBA_HINT_SUPPORT
-/* FIXME : DMA HINTs not used */
- unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int hint_shift_pdir;
-#endif
#if DELAYED_RESOURCE_CNT > 0
int saved_cnt;
struct sba_dma_pair {
/* STUFF We don't need in performance path */
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
+ unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
+ unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
};
struct sba_device {
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
-#if SBA_AGP_SUPPORT
-static int reserve_sba_gart = 1;
-#endif
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
sba_dump_tlb(unsigned long hpa)
{
DBG_INIT("IO TLB at 0x%lx\n", hpa);
- DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
- DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
- DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
- DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
+ DBG_INIT("IOC_IBASE : %Lx\n", READ_REG64(hpa+IOC_IBASE));
+ DBG_INIT("IOC_IMASK : %Lx\n", READ_REG64(hpa+IOC_IMASK));
+ DBG_INIT("IOC_TCNFG : %Lx\n", READ_REG64(hpa+IOC_TCNFG));
+ DBG_INIT("IOC_PDIR_BASE: %Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
DBG_INIT("\n");
}
-#else
-#define sba_dump_ranges(x)
-#define sba_dump_tlb(x)
#endif
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
+#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
+#define SBA_IOVP(ioc,iova) ((iova) & ioc->hint_mask_pdir)
-#ifdef ZX1_SUPPORT
-/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
-#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
-#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
-#else
-/* only support Astro and ancestors. Saves a few cycles in key places */
-#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
-#define SBA_IOVP(ioc,iova) (iova)
-#endif
-
+/* FIXME : review these macros to verify correctness and usage */
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
+#define MKIOVP(dma_hint,pide) (dma_addr_t)((long)(dma_hint) | ((long)(pide) << IOVP_SHIFT))
+#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
*
***************************************************************/
-#if SBA_HINT_SUPPORT
#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
-#endif
+
typedef unsigned long space_t;
#define KERNEL_SPACE 0
*
* Given a virtual address (vba, arg2) and space id, (sid, arg1)
* sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
- * pdir_ptr (arg0).
- * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
- * for Astro/Ike looks like:
- *
+ * pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as
+ * shown below (MSB == bit 0):
*
* 0 19 51 55 63
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[43:12] | U | VI |
* +-+---------------------+----------------------------------+----+--------+
*
- * Pluto is basically identical, supports fewer physical address bits:
- *
- * 0 23 51 55 63
- * +-+------------------------+-------------------------------+----+--------+
- * |V| U | PPN[39:12] | U | VI |
- * +-+------------------------+-------------------------------+----+--------+
- *
- * V == Valid Bit (Most Significant Bit is bit 0)
+ * V == Valid Bit
* U == Unused
* PPN == Physical Page Number
* VI == Virtual Index (aka Coherent Index)
*
- * LPA instruction output is put into PPN field.
- * LCI (Load Coherence Index) instruction provides the "VI" bits.
+ * The physical address fields are filled with the results of the LPA
+ * instruction. The virtual index field is filled with the results of
+ * of the LCI (Load Coherence Index) instruction. The 8 bits used for
+ * the virtual index are bits 12:19 of the value returned by LCI.
*
- * We pre-swap the bytes since PCX-W is Big Endian and the
- * IOMMU uses little endian for the pdir.
+ * We need to pre-swap the bytes since PCX-W is Big Endian.
*/
ASSERT(sid == KERNEL_SPACE);
pa = virt_to_phys(vba);
- pa &= IOVP_MASK;
+ pa &= ~4095ULL; /* clear out offset bits */
mtsp(sid,1);
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
} while (byte_cnt > 0);
}
- WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
+ WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM);
}
/**
pide = sba_alloc_range(ioc, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
- DBG_RUN("%s() 0x%p -> 0x%lx\n",
+ DBG_RUN("%s() 0x%p -> 0x%lx",
__FUNCTION__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
- DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
+ DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
pdir_start,
(u8) (((u8 *) pdir_start)[7]),
(u8) (((u8 *) pdir_start)[6]),
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
- sba_mark_invalid(ioc, iova, size);
-
#if DELAYED_RESOURCE_CNT > 0
- /* Delaying when we re-use a IO Pdir entry reduces the number
- * of MMIO reads needed to flush writes to the PCOM register.
- */
d = &(ioc->saved[ioc->saved_cnt]);
d->iova = iova;
d->size = size;
if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
int cnt = ioc->saved_cnt;
while (cnt--) {
+ sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
}
#else /* DELAYED_RESOURCE_CNT == 0 */
+ sba_mark_invalid(ioc, iova, size);
sba_free_range(ioc, iova, size);
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
return (void *) pdir_base;
}
-static void
-sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
-{
- /* lba_set_iregs() is in arch/parisc/kernel/lba_pci.c */
- extern void lba_set_iregs(struct parisc_device *, u32, u32);
-
- u32 iova_space_mask;
- u32 iova_space_size;
- int iov_order, tcnfg;
- struct parisc_device *lba;
-#if SBA_AGP_SUPPORT
- int agp_found = 0;
-#endif
- /*
- ** Firmware programs the base and size of a "safe IOVA space"
- ** (one that doesn't overlap memory or LMMIO space) in the
- ** IBASE and IMASK registers.
- */
- ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
- iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
-
- if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
- printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
- iova_space_size /= 2;
- }
-
- /*
- ** iov_order is always based on a 1GB IOVA space since we want to
- ** turn on the other half for AGP GART.
- */
- iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
- ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
-
- DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n",
- __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
- iov_order + PAGE_SHIFT);
-
- ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
- get_order(ioc->pdir_size));
- if (!ioc->pdir_base)
- panic("Couldn't allocate I/O Page Table\n");
-
- memset(ioc->pdir_base, 0, ioc->pdir_size);
-
- DBG_INIT("%s() pdir %p size %x\n",
- __FUNCTION__, ioc->pdir_base, ioc->pdir_size);
-
-#if SBA_HINT_SUPPORT
- ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
- ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
-
- DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
- ioc->hint_shift_pdir, ioc->hint_mask_pdir);
-#endif
-
- ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
- WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
-
- /* build IMASK for IOC and Elroy */
- iova_space_mask = 0xffffffff;
- iova_space_mask <<= (iov_order + PAGE_SHIFT);
- ioc->imask = iova_space_mask;
-#ifdef ZX1_SUPPORT
- ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
-#endif
- sba_dump_tlb(ioc->ioc_hpa);
-
- /*
- ** setup Mercury IBASE/IMASK registers as well.
- */
- for (lba = sba->child; lba; lba = lba->sibling) {
- int rope_num = (lba->hpa >> 13) & 0xf;
- if (rope_num >> 3 == ioc_num)
- lba_set_iregs(lba, ioc->ibase, ioc->imask);
- }
-
- WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
-
-#ifdef __LP64__
- /*
- ** Setting the upper bits makes checking for bypass addresses
- ** a little faster later on.
- */
- ioc->imask |= 0xFFFFFFFF00000000UL;
-#endif
-
- /* Set I/O PDIR Page size to system page size */
- switch (PAGE_SHIFT) {
- case 12: tcnfg = 0; break; /* 4K */
- case 13: tcnfg = 1; break; /* 8K */
- case 14: tcnfg = 2; break; /* 16K */
- case 16: tcnfg = 3; break; /* 64K */
- default:
- panic(__FILE__ "Unsupported system page size %d",
- 1 << PAGE_SHIFT);
- break;
- }
- WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
-
- /*
- ** Program the IOC's ibase and enable IOVA translation
- ** Bit zero == enable bit.
- */
- WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
-
- /*
- ** Clear I/O TLB of any possible entries.
- ** (Yes. This is a bit paranoid...but so what)
- */
- WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
-
-#if SBA_AGP_SUPPORT
- /*
- ** If an AGP device is present, only use half of the IOV space
- ** for PCI DMA. Unfortunately we can't know ahead of time
- ** whether GART support will actually be used, for now we
- ** can just key on any AGP device found in the system.
- ** We program the next pdir index after we stop w/ a key for
- ** the GART code to handshake on.
- */
- device=NULL;
- for (lba = sba->child; lba; lba = lba->sibling) {
- if (IS_QUICKSILVER(lba))
- break;
- }
-
- if (lba) {
- DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__);
- ioc->pdir_size /= 2;
- ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE;
- } else {
- DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
- }
-#endif /* 0 */
-
-}
static void
sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
__FUNCTION__, ioc->ioc_hpa, (int) (physmem>>20),
iova_space_size>>20, iov_order + PAGE_SHIFT, pdir_size);
- ioc->pdir_base = sba_alloc_pdir(pdir_size);
-
- DBG_INIT("%s() pdir %p size %x\n",
- __FUNCTION__, ioc->pdir_base, pdir_size);
-
-#if SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
- DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
- ioc->hint_shift_pdir, ioc->hint_mask_pdir);
-#endif
+ ioc->pdir_base = sba_alloc_pdir(pdir_size);
+
+ DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
+ __FUNCTION__, ioc->pdir_base, pdir_size,
+ ioc->hint_shift_pdir, ioc->hint_mask_pdir);
ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
** On C3000 w/512MB mem, HP-UX 10.20 reports:
** ibase=0, imask=0xFE000000, size=0x2000000.
*/
- ioc->ibase = 0;
+ ioc->ibase = IOC_IOVA_SPACE_BASE | 1; /* bit 0 == enable bit */
ioc->imask = iova_space_mask; /* save it */
-#ifdef ZX1_SUPPORT
- ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
-#endif
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
__FUNCTION__, ioc->ibase, ioc->imask);
/*
** Program the IOC's ibase and enable IOVA translation
*/
- WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
+ WRITE_REG(ioc->ibase, ioc->ioc_hpa+IOC_IBASE);
WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
/* Set I/O PDIR Page size to 4K */
*/
WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
- ioc->ibase = 0; /* used by SBA_IOVA and related macros */
-
DBG_INIT("%s() DONE\n", __FUNCTION__);
}
*/
}
- if (!IS_PLUTO(sba_dev->iodc)) {
- ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
- __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
- ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
- ioc_ctl |= IOC_CTRL_TC; /* Astro: firmware enables this */
+ ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
+ DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
+ __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
+ ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
+ ioc_ctl |= IOC_CTRL_TC; /* Astro: firmware enables this */
- WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
+ WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
#ifdef DEBUG_SBA_INIT
- ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT(" 0x%Lx\n", ioc_ctl);
+ ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
+ DBG_INIT(" 0x%Lx\n", ioc_ctl);
#endif
- } /* if !PLUTO */
if (IS_ASTRO(sba_dev->iodc)) {
/* PAT_PDC (L-class) also reports the same goofy base */
sba_dev->ioc[0].ioc_hpa = ASTRO_IOC_OFFSET;
num_ioc = 1;
- } else if (IS_PLUTO(sba_dev->iodc)) {
- /* We use a negative value for IOC HPA so it gets
- * corrected when we add it with IKE's IOC offset.
- * Doesnt look clean, but fewer code.
- */
- sba_dev->ioc[0].ioc_hpa = -PLUTO_IOC_OFFSET;
- num_ioc = 1;
} else {
sba_dev->ioc[0].ioc_hpa = sba_dev->ioc[1].ioc_hpa = 0;
num_ioc = 2;
/* flush out the writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
- if (IS_PLUTO(sba_dev->iodc)) {
- sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
- } else {
- sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
- }
+ sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
}
}
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
- { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
/* These two entries commented out because we don't find them in a
* buswalk yet. If/when we do, they would cause us to think we had
* many more SBAs then we really do.
* { HPHW_BCPORT, HVERSION_REV_ANY_ID, ASTRO_ROPES_PORT, 0xc },
* { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_ROPES_PORT, 0xc },
- */
-/* We shall also comment out Pluto Ropes Port since bus walk doesnt
- * report it yet.
- * { HPHW_BCPORT, HVERSION_REV_ANY_ID, PLUTO_ROPES_PORT, 0xc },
*/
{ 0, }
};
int i;
char *version;
+#ifdef DEBUG_SBA_INIT
sba_dump_ranges(dev->hpa);
+#endif
/* Read HW Rev First */
func_class = READ_REG(dev->hpa + SBA_FCLASS);
version = astro_rev;
} else if (IS_IKE(&dev->id)) {
- static char ike_rev[] = "Ike rev ?";
+ static char ike_rev[]="Ike rev ?";
+
ike_rev[8] = '0' + (char) (func_class & 0xff);
version = ike_rev;
- } else if (IS_PLUTO(&dev->id)) {
- static char pluto_rev[]="Pluto ?.?";
- pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
- pluto_rev[8] = '0' + (char) (func_class & 0x0f);
- version = pluto_rev;
} else {
- static char reo_rev[] = "REO rev ?";
+ static char reo_rev[]="REO rev ?";
+
reo_rev[8] = '0' + (char) (func_class & 0xff);
version = reo_rev;
}
if (!global_ioc_cnt) {
global_ioc_cnt = count_parisc_driver(&sba_driver);
- /* Astro and Pluto have one IOC per SBA */
- if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id)))
+ /* Only Astro has one IOC per SBA */
+ if (!IS_ASTRO(&dev->id))
global_ioc_cnt *= 2;
}
printk(KERN_INFO "%s found %s at 0x%lx\n",
MODULE_NAME, version, dev->hpa);
+#ifdef DEBUG_SBA_INIT
+ sba_dump_tlb(dev->hpa);
+#endif
+
sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
if (NULL == sba_dev) {
printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info);
} else if (IS_IKE(&dev->id)) {
create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info);
- } else if (IS_PLUTO(&dev->id)) {
- create_proc_info_entry("Pluto", 0, proc_mckinley_root, sba_proc_info);
} else {
create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info);
}
{
#ifdef CONFIG_SERIAL_8250
int retval;
-#ifdef CONFIG_SERIAL_8250_CONSOLE
extern void serial8250_console_init(void); /* drivers/serial/8250.c */
-#endif
-
+
if (!sio_dev.irq_region)
return; /* superio not present */
return;
}
-#ifdef CONFIG_SERIAL_8250_CONSOLE
serial8250_console_init();
-#endif
-
+
serial[1].iobase = sio_dev.sp2_base;
serial[1].irq = sio_dev.irq_region->data.irqbase + SP2_IRQ;
retval = early_serial_setup(&serial[1]);
#define PARPORT_MAX_SPINTIME_VALUE 1000
static int do_active_device(ctl_table *table, int write, struct file *filp,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void __user *result, size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[256];
if (write) /* can't happen anyway */
return -EACCES;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
#ifdef CONFIG_PARPORT_1284
static int do_autoprobe(ctl_table *table, int write, struct file *filp,
- void __user *result, size_t *lenp, loff_t *ppos)
+ void __user *result, size_t *lenp)
{
struct parport_device_info *info = table->extra2;
const char *str;
if (write) /* permissions stop this */
return -EACCES;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user (result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_base_addr (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_irq (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_dma (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_modes (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[40];
int len = 0;
- if (*ppos) {
+ if (filp->f_pos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
#
# PCI configuration
#
-config PCI_MSI
- bool "Message Signaled Interrupts (MSI and MSI-X)"
+config PCI_USE_VECTOR
+ bool "Vector-based interrupt indexing (MSI)"
depends on (X86_LOCAL_APIC && X86_IO_APIC) || IA64
default n
help
- This allows device drivers to enable MSI (Message Signaled
- Interrupts). Message Signaled Interrupts enable a device to
- generate an interrupt using an inbound Memory Write on its
- PCI bus instead of asserting a device IRQ pin.
+ This replaces the current existing IRQ-based index interrupt scheme
+ with the vector-base index scheme. The advantages of vector base
+ over IRQ base are listed below:
+ 1) Support MSI implementation.
+ 2) Support future IOxAPIC hotplug
+
+ Note that this allows the device drivers to enable MSI, Message
+ Signaled Interrupt, on all MSI capable device functions detected.
+ Message Signal Interrupt enables an MSI-capable hardware device to
+ send an inbound Memory Write on its PCI bus instead of asserting
+ IRQ signal on device IRQ pin.
If you don't know what to do here, say N.
obj-$(CONFIG_PPC64) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
-obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_PCI_USE_VECTOR) += msi.o
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
case PCI_CAP_ID_MSI:
{
int pos;
- u32 mask_bits;
+ unsigned int mask_bits;
pos = entry->mask_base;
- pci_read_config_dword(entry->dev, pos, &mask_bits);
+ entry->dev->bus->ops->read(entry->dev->bus, entry->dev->devfn,
+ pos, 4, &mask_bits);
mask_bits &= ~(1);
mask_bits |= flag;
- pci_write_config_dword(entry->dev, pos, mask_bits);
+ entry->dev->bus->ops->write(entry->dev->bus, entry->dev->devfn,
+ pos, 4, mask_bits);
break;
}
case PCI_CAP_ID_MSIX:
if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
return;
- pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
+ entry->dev->bus->ops->read(entry->dev->bus, entry->dev->devfn,
+ msi_lower_address_reg(pos), 4,
&address.lo_address.value);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) <<
MSI_TARGET_CPU_SHIFT);
entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
- pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
+ entry->dev->bus->ops->write(entry->dev->bus, entry->dev->devfn,
+ msi_lower_address_reg(pos), 4,
address.lo_address.value);
break;
}
static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
{
- struct msi_desc *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (!entry || !entry->dev) {
- spin_unlock_irqrestore(&msi_lock, flags);
- return 0;
- }
- entry->msi_attrib.state = 1; /* Mark it active */
- spin_unlock_irqrestore(&msi_lock, flags);
-
return 0; /* never anything pending */
}
-static void release_msi(unsigned int vector);
+static void pci_disable_msi(unsigned int vector);
static void shutdown_msi_irq(unsigned int vector)
{
- release_msi(vector);
+ pci_disable_msi(vector);
}
#define shutdown_msi_irq_wo_maskbit shutdown_msi_irq
static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
{
- struct msi_desc *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (!entry || !entry->dev) {
- spin_unlock_irqrestore(&msi_lock, flags);
- return 0;
- }
- entry->msi_attrib.state = 1; /* Mark it active */
- spin_unlock_irqrestore(&msi_lock, flags);
-
unmask_MSI_irq(vector);
return 0; /* never anything pending */
}
* which implement the MSI-X Capability Structure.
*/
static struct hw_interrupt_type msix_irq_type = {
- .typename = "PCI-MSI-X",
+ .typename = "PCI MSI-X",
.startup = startup_msi_irq_w_maskbit,
.shutdown = shutdown_msi_irq_w_maskbit,
.enable = enable_msi_irq_w_maskbit,
* Mask-and-Pending Bits.
*/
static struct hw_interrupt_type msi_irq_w_maskbit_type = {
- .typename = "PCI-MSI",
+ .typename = "PCI MSI",
.startup = startup_msi_irq_w_maskbit,
.shutdown = shutdown_msi_irq_w_maskbit,
.enable = enable_msi_irq_w_maskbit,
* Mask-and-Pending Bits.
*/
static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
- .typename = "PCI-MSI",
+ .typename = "PCI MSI",
.startup = startup_msi_irq_wo_maskbit,
.shutdown = shutdown_msi_irq_wo_maskbit,
.enable = enable_msi_irq_wo_maskbit,
msi_address->lo_address.value |= (MSI_TARGET_CPU << MSI_TARGET_CPU_SHIFT);
}
-static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
static int assign_msi_vector(void)
{
static int new_vector_avail = 1;
spin_lock_irqsave(&msi_lock, flags);
if (!new_vector_avail) {
- int free_vector = 0;
-
/*
* vector_irq[] = -1 indicates that this specific vector is:
* - assigned for MSI (since MSI have no associated IRQ) or
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
if (vector_irq[vector] != 0)
continue;
- free_vector = vector;
- if (!msi_desc[vector])
- break;
- else
- continue;
- }
- if (!free_vector) {
+ vector_irq[vector] = -1;
+ nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags);
- return -EBUSY;
+ return vector;
}
- vector_irq[free_vector] = -1;
- nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags);
- if (msi_desc[free_vector] != NULL) {
- struct pci_dev *dev;
- int tail;
-
- /* free all linked vectors before re-assign */
- do {
- spin_lock_irqsave(&msi_lock, flags);
- dev = msi_desc[free_vector]->dev;
- tail = msi_desc[free_vector]->link.tail;
- spin_unlock_irqrestore(&msi_lock, flags);
- msi_free_vector(dev, tail, 1);
- } while (free_vector != tail);
- }
-
- return free_vector;
+ return -EBUSY;
}
vector = assign_irq_vector(AUTO_ASSIGN);
last_alloc_vector = vector;
printk(KERN_INFO "WARNING: MSI INIT FAILURE\n");
return status;
}
- last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
- if (last_alloc_vector < 0) {
- pci_msi_enable = 0;
- printk(KERN_INFO "WARNING: ALL VECTORS ARE BUSY\n");
- status = -EBUSY;
- return status;
- }
- vector_irq[last_alloc_vector] = 0;
- nr_released_vectors++;
printk(KERN_INFO "MSI INIT SUCCESS\n");
return status;
static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
{
- u16 control;
+ u32 control;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, &control);
if (type == PCI_CAP_ID_MSI) {
/* Set enabled bits to single MSI & enable MSI_enable bit */
msi_enable(control, 1);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
} else {
msix_enable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
}
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */
- u16 cmd;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ u32 cmd;
+ dev->bus->ops->read(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
cmd |= PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(dev, PCI_COMMAND, cmd);
+ dev->bus->ops->write(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
}
}
static void disable_msi_mode(struct pci_dev *dev, int pos, int type)
{
- u16 control;
+ u32 control;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, &control);
if (type == PCI_CAP_ID_MSI) {
/* Set enabled bits to single MSI & enable MSI_enable bit */
msi_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
} else {
msix_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_control_reg(pos), 2, control);
}
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */
- u16 cmd;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ u32 cmd;
+ dev->bus->ops->read(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(dev, PCI_COMMAND, cmd);
+ dev->bus->ops->write(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
}
}
-static int msi_lookup_vector(struct pci_dev *dev, int type)
+static int msi_lookup_vector(struct pci_dev *dev)
{
int vector;
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
- msi_desc[vector]->msi_attrib.type != type ||
+ msi_desc[vector]->msi_attrib.entry_nr ||
msi_desc[vector]->msi_attrib.default_vector != dev->irq)
- continue;
+ continue; /* not entry 0, skip */
spin_unlock_irqrestore(&msi_lock, flags);
- /* This pre-assigned MSI vector for this device
+ /* This pre-assigned entry-0 MSI vector for this device
already exits. Override dev->irq with this vector */
dev->irq = vector;
return 0;
if (!dev)
return;
- if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
+ if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) {
+ nr_reserved_vectors++;
nr_msix_devices++;
- else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
+ } else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
nr_reserved_vectors++;
}
struct msg_address address;
struct msg_data data;
int pos, vector;
- u16 control;
+ u32 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ if (!pos)
+ return -EINVAL;
+
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos),
+ 2, &control);
+ if (control & PCI_MSI_FLAGS_ENABLE)
+ return 0;
+
+ if (!msi_lookup_vector(dev)) {
+ /* Lookup Sucess */
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ return 0;
+ }
/* MSI Entry Initialization */
if (!(entry = alloc_msi_entry()))
return -ENOMEM;
kmem_cache_free(msi_cachep, entry);
return -EBUSY;
}
- entry->link.head = vector;
- entry->link.tail = vector;
entry->msi_attrib.type = PCI_CAP_ID_MSI;
- entry->msi_attrib.state = 0; /* Mark it not active */
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
- dev->irq = vector;
+ entry->msi_attrib.default_vector = dev->irq;
+ dev->irq = vector; /* save default pre-assigned ioapic vector */
entry->dev = dev;
if (is_mask_bit_support(control)) {
entry->mask_base = msi_mask_bits_reg(pos,
msi_data_init(&data, vector);
entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- pci_write_config_dword(dev, msi_lower_address_reg(pos),
- address.lo_address.value);
+ dev->bus->ops->write(dev->bus, dev->devfn, msi_lower_address_reg(pos),
+ 4, address.lo_address.value);
if (is_64bit_address(control)) {
- pci_write_config_dword(dev,
- msi_upper_address_reg(pos), address.hi_address);
- pci_write_config_word(dev,
- msi_data_reg(pos, 1), *((u32*)&data));
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_upper_address_reg(pos), 4, address.hi_address);
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_data_reg(pos, 1), 2, *((u32*)&data));
} else
- pci_write_config_word(dev,
- msi_data_reg(pos, 0), *((u32*)&data));
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_data_reg(pos, 0), 2, *((u32*)&data));
if (entry->msi_attrib.maskbit) {
unsigned int maskbits, temp;
/* All MSIs are unmasked by default, Mask them all */
- pci_read_config_dword(dev,
- msi_mask_bits_reg(pos, is_64bit_address(control)),
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msi_mask_bits_reg(pos, is_64bit_address(control)), 4,
&maskbits);
temp = (1 << multi_msi_capable(control));
temp = ((temp - 1) & ~temp);
maskbits |= temp;
- pci_write_config_dword(dev,
- msi_mask_bits_reg(pos, is_64bit_address(control)),
+ dev->bus->ops->write(dev->bus, dev->devfn,
+ msi_mask_bits_reg(pos, is_64bit_address(control)), 4,
maskbits);
}
attach_msi_entry(entry, vector);
* @dev: pointer to the pci_dev data structure of MSI-X device function
*
* Setup the MSI-X capability structure of device funtion with a
- * single MSI-X vector. A return of zero indicates the successful setup of
- * requested MSI-X entries with allocated vectors or non-zero for otherwise.
+ * single MSI-X vector. A return of zero indicates the successful setup
+ * of an entry zero with the new MSI-X vector or non-zero for otherwise.
+ * To request for additional MSI-X vectors, the device drivers are
+ * required to utilize the following supported APIs:
+ * 1) msi_alloc_vectors(...) for requesting one or more MSI-X vectors
+ * 2) msi_free_vectors(...) for releasing one or more MSI-X vectors
+ * back to PCI subsystem before calling free_irq(...)
**/
-static int msix_capability_init(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+static int msix_capability_init(struct pci_dev *dev)
{
- struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
+ struct msi_desc *entry;
struct msg_address address;
struct msg_data data;
- int vector, pos, i, j, nr_entries, temp = 0;
+ int vector = 0, pos, dev_msi_cap, i;
u32 phys_addr, table_offset;
- u16 control;
+ u32 control;
u8 bir;
void *base;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (!pos)
+ return -EINVAL;
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- nr_entries = multi_msix_capable(control);
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE)
+ return 0;
+
+ if (!msi_lookup_vector(dev)) {
+ /* Lookup Sucess */
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ return 0;
+ }
+
+ dev_msi_cap = multi_msix_capable(control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msix_table_offset_reg(pos), 4, &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
phys_addr = pci_resource_start (dev, bir);
phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
if (!request_mem_region(phys_addr,
- nr_entries * PCI_MSIX_ENTRY_SIZE,
- "MSI-X vector table"))
+ dev_msi_cap * PCI_MSIX_ENTRY_SIZE,
+ "MSI-X iomap Failure"))
return -ENOMEM;
- base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- if (base == NULL) {
- release_mem_region(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- return -ENOMEM;
- }
- /* MSI-X Table Initialization */
- for (i = 0; i < nvec; i++) {
- entry = alloc_msi_entry();
- if (!entry)
- break;
- if ((vector = get_msi_vector(dev)) < 0)
- break;
+ base = ioremap_nocache(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
+ if (base == NULL)
+ goto free_region;
+ /* MSI Entry Initialization */
+ entry = alloc_msi_entry();
+ if (!entry)
+ goto free_iomap;
+ if ((vector = get_msi_vector(dev)) < 0)
+ goto free_entry;
- j = entries[i].entry;
- entries[i].vector = vector;
- entry->msi_attrib.type = PCI_CAP_ID_MSIX;
- entry->msi_attrib.state = 0; /* Mark it not active */
- entry->msi_attrib.entry_nr = j;
- entry->msi_attrib.maskbit = 1;
- entry->msi_attrib.default_vector = dev->irq;
- entry->dev = dev;
- entry->mask_base = (unsigned long)base;
- if (!head) {
- entry->link.head = vector;
- entry->link.tail = vector;
- head = entry;
- } else {
- entry->link.head = temp;
- entry->link.tail = tail->link.tail;
- tail->link.tail = vector;
- head->link.head = vector;
- }
- temp = vector;
- tail = entry;
- /* Replace with MSI-X handler */
- irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
- /* Configure MSI-X capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu =
- ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- writel(address.lo_address.value,
- base + j * PCI_MSIX_ENTRY_SIZE +
+ entry->msi_attrib.type = PCI_CAP_ID_MSIX;
+ entry->msi_attrib.entry_nr = 0;
+ entry->msi_attrib.maskbit = 1;
+ entry->msi_attrib.default_vector = dev->irq;
+ dev->irq = vector; /* save default pre-assigned ioapic vector */
+ entry->dev = dev;
+ entry->mask_base = (unsigned long)base;
+ /* Replace with MSI handler */
+ irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
+ /* Configure MSI-X capability structure */
+ msi_address_init(&address);
+ msi_data_init(&data, vector);
+ entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
+ MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+ writel(address.lo_address.value, base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel(address.hi_address, base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel(*(u32*)&data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ /* Initialize all entries from 1 up to 0 */
+ for (i = 1; i < dev_msi_cap; i++) {
+ writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(address.hi_address,
- base + j * PCI_MSIX_ENTRY_SIZE +
+ writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(*(u32*)&data,
- base + j * PCI_MSIX_ENTRY_SIZE +
+ writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
- attach_msi_entry(entry, vector);
}
- if (i != nvec) {
- i--;
- for (; i >= 0; i--) {
- vector = (entries + i)->vector;
- msi_free_vector(dev, vector, 0);
- (entries + i)->vector = 0;
- }
- return -EBUSY;
- }
- /* Set MSI-X enabled bits */
+ attach_msi_entry(entry, vector);
+ /* Set MSI enabled bits */
enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
return 0;
+
+free_entry:
+ kmem_cache_free(msi_cachep, entry);
+free_iomap:
+ iounmap(base);
+free_region:
+ release_mem_region(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
+
+ return ((vector < 0) ? -EBUSY : -ENOMEM);
}
/**
- * pci_enable_msi - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
+ * pci_enable_msi - configure device's MSI(X) capability structure
+ * @dev: pointer to the pci_dev data structure of MSI(X) device function
*
- * Setup the MSI capability structure of device function with
- * a single MSI vector upon its software driver call to request for
- * MSI mode enabled on its hardware device function. A return of zero
- * indicates the successful setup of an entry zero with the new MSI
+ * Setup the MSI/MSI-X capability structure of device function with
+ * a single MSI(X) vector upon its software driver call to request for
+ * MSI(X) mode enabled on its hardware device function. A return of zero
+ * indicates the successful setup of an entry zero with the new MSI(X)
* vector or non-zero for otherwise.
**/
int pci_enable_msi(struct pci_dev* dev)
{
- int pos, temp = dev->irq, status = -EINVAL;
- u16 control;
+ int status = -EINVAL;
if (!pci_msi_enable || !dev)
return status;
- if ((status = msi_init()) < 0)
- return status;
-
- if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
- return -EINVAL;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE)
- return 0; /* Already in MSI mode */
-
- if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
- /* Lookup Sucess */
- unsigned long flags;
+ if (msi_init() < 0)
+ return -ENOMEM;
- spin_lock_irqsave(&msi_lock, flags);
- if (!vector_irq[dev->irq]) {
- msi_desc[dev->irq]->msi_attrib.state = 0;
- vector_irq[dev->irq] = -1;
- nr_released_vectors--;
- spin_unlock_irqrestore(&msi_lock, flags);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
- return 0;
- }
- spin_unlock_irqrestore(&msi_lock, flags);
- dev->irq = temp;
- }
- /* Check whether driver already requested for MSI-X vectors */
- if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- printk(KERN_INFO "Can't enable MSI. Device already had MSI-X vectors assigned\n");
- dev->irq = temp;
- return -EINVAL;
- }
- status = msi_capability_init(dev);
- if (!status) {
- if (!pos)
- nr_reserved_vectors--; /* Only MSI capable */
- else if (nr_msix_devices > 0)
- nr_msix_devices--; /* Both MSI and MSI-X capable,
- but choose enabling MSI */
- }
+ if ((status = msix_capability_init(dev)) == -EINVAL)
+ status = msi_capability_init(dev);
+ if (!status)
+ nr_reserved_vectors--;
return status;
}
-void pci_disable_msi(struct pci_dev* dev)
+static int msi_free_vector(struct pci_dev* dev, int vector);
+static void pci_disable_msi(unsigned int vector)
{
+ int head, tail, type, default_vector;
struct msi_desc *entry;
- int pos, default_vector;
- u16 control;
+ struct pci_dev *dev;
unsigned long flags;
- if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE))
- return;
-
spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[dev->irq];
- if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
+ entry = msi_desc[vector];
+ if (!entry || !entry->dev) {
spin_unlock_irqrestore(&msi_lock, flags);
return;
}
- if (entry->msi_attrib.state) {
- spin_unlock_irqrestore(&msi_lock, flags);
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n",
- dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
- dev->irq);
- BUG_ON(entry->msi_attrib.state > 0);
- } else {
- vector_irq[dev->irq] = 0; /* free it */
- nr_released_vectors++;
- default_vector = entry->msi_attrib.default_vector;
- spin_unlock_irqrestore(&msi_lock, flags);
- /* Restore dev->irq to its default pin-assertion vector */
- dev->irq = default_vector;
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSI);
+ dev = entry->dev;
+ type = entry->msi_attrib.type;
+ head = entry->link.head;
+ tail = entry->link.tail;
+ default_vector = entry->msi_attrib.default_vector;
+ spin_unlock_irqrestore(&msi_lock, flags);
+
+ disable_msi_mode(dev, pci_find_capability(dev, type), type);
+ /* Restore dev->irq to its default pin-assertion vector */
+ dev->irq = default_vector;
+ if (type == PCI_CAP_ID_MSIX && head != tail) {
+ /* Bad driver, which do not call msi_free_vectors before exit.
+ We must do a cleanup here */
+ while (1) {
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[vector];
+ head = entry->link.head;
+ tail = entry->link.tail;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ if (tail == head)
+ break;
+ if (msi_free_vector(dev, entry->link.tail))
+ break;
+ }
}
}
-static void release_msi(unsigned int vector)
+static int msi_alloc_vector(struct pci_dev* dev, int head)
{
struct msi_desc *entry;
+ struct msg_address address;
+ struct msg_data data;
+ int i, offset, pos, dev_msi_cap, vector;
+ u32 low_address, control;
+ unsigned long base = 0L;
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (entry && entry->dev)
- entry->msi_attrib.state = 0; /* Mark it not active */
+ entry = msi_desc[dev->irq];
+ if (!entry) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ return -EINVAL;
+ }
+ base = entry->mask_base;
spin_unlock_irqrestore(&msi_lock, flags);
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos),
+ 2, &control);
+ dev_msi_cap = multi_msix_capable(control);
+ for (i = 1; i < dev_msi_cap; i++) {
+ if (!(low_address = readl(base + i * PCI_MSIX_ENTRY_SIZE)))
+ break;
+ }
+ if (i >= dev_msi_cap)
+ return -EINVAL;
+
+ /* MSI Entry Initialization */
+ if (!(entry = alloc_msi_entry()))
+ return -ENOMEM;
+
+ if ((vector = get_new_vector()) < 0) {
+ kmem_cache_free(msi_cachep, entry);
+ return vector;
+ }
+ entry->msi_attrib.type = PCI_CAP_ID_MSIX;
+ entry->msi_attrib.entry_nr = i;
+ entry->msi_attrib.maskbit = 1;
+ entry->dev = dev;
+ entry->link.head = head;
+ entry->mask_base = base;
+ irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
+ /* Configure MSI-X capability structure */
+ msi_address_init(&address);
+ msi_data_init(&data, vector);
+ entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
+ MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+ offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ writel(address.lo_address.value, base + offset +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel(address.hi_address, base + offset +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel(*(u32*)&data, base + offset + PCI_MSIX_ENTRY_DATA_OFFSET);
+ writel(1, base + offset + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ attach_msi_entry(entry, vector);
+
+ return vector;
}
-static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
+static int msi_free_vector(struct pci_dev* dev, int vector)
{
struct msi_desc *entry;
- int head, entry_nr, type;
+ int entry_nr, type;
unsigned long base = 0L;
unsigned long flags;
}
type = entry->msi_attrib.type;
entry_nr = entry->msi_attrib.entry_nr;
- head = entry->link.head;
base = entry->mask_base;
- msi_desc[entry->link.head]->link.tail = entry->link.tail;
- msi_desc[entry->link.tail]->link.head = entry->link.head;
- entry->dev = NULL;
- if (!reassign) {
- vector_irq[vector] = 0;
- nr_released_vectors++;
+ if (entry->link.tail != entry->link.head) {
+ msi_desc[entry->link.head]->link.tail = entry->link.tail;
+ if (entry->link.tail)
+ msi_desc[entry->link.tail]->link.head = entry->link.head;
}
+ entry->dev = NULL;
+ vector_irq[vector] = 0;
+ nr_released_vectors++;
msi_desc[vector] = NULL;
spin_unlock_irqrestore(&msi_lock, flags);
kmem_cache_free(msi_cachep, entry);
-
if (type == PCI_CAP_ID_MSIX) {
- if (!reassign)
- writel(1, base +
- entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
- if (head == vector) {
- /*
- * Detect last MSI-X vector to be released.
- * Release the MSI-X memory-mapped table.
- */
- int pos, nr_entries;
- u32 phys_addr, table_offset;
- u16 control;
- u8 bir;
-
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, msi_control_reg(pos),
- &control);
- nr_entries = multi_msix_capable(control);
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- phys_addr = pci_resource_start (dev, bir);
- phys_addr += (u32)(table_offset &
- ~PCI_MSIX_FLAGS_BIRMASK);
- iounmap((void*)base);
- release_mem_region(phys_addr,
- nr_entries * PCI_MSIX_ENTRY_SIZE);
- }
- }
+ int offset;
- return 0;
-}
-
-static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
-{
- int vector = head, tail = 0;
- int i = 0, j = 0, nr_entries = 0;
- unsigned long base = 0L;
- unsigned long flags;
-
- spin_lock_irqsave(&msi_lock, flags);
- while (head != tail) {
- nr_entries++;
- tail = msi_desc[vector]->link.tail;
- if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
- j = vector;
- vector = tail;
+ offset = entry_nr * PCI_MSIX_ENTRY_SIZE;
+ writel(1, base + offset + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ writel(0, base + offset + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
}
- if (*nvec > nr_entries) {
- spin_unlock_irqrestore(&msi_lock, flags);
- *nvec = nr_entries;
- return -EINVAL;
- }
- vector = ((j > 0) ? j : head);
- for (i = 0; i < *nvec; i++) {
- j = msi_desc[vector]->msi_attrib.entry_nr;
- msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
- vector_irq[vector] = -1; /* Mark it busy */
- nr_released_vectors--;
- entries[i].vector = vector;
- if (j != (entries + i)->entry) {
- base = msi_desc[vector]->mask_base;
- msi_desc[vector]->msi_attrib.entry_nr =
- (entries + i)->entry;
- writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
- (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
- (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
- base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_DATA_OFFSET);
- }
- vector = msi_desc[vector]->link.tail;
- }
- spin_unlock_irqrestore(&msi_lock, flags);
return 0;
}
/**
- * pci_enable_msix - configure device's MSI-X capability structure
+ * msi_alloc_vectors - allocate additional MSI-X vectors
* @dev: pointer to the pci_dev data structure of MSI-X device function
- * @data: pointer to an array of MSI-X entries
+ * @vector: pointer to an array of new allocated MSI-X vectors
* @nvec: number of MSI-X vectors requested for allocation by device driver
*
- * Setup the MSI-X capability structure of device function with the number
- * of requested vectors upon its software driver call to request for
- * MSI-X mode enabled on its hardware device function. A return of zero
- * indicates the successful configuration of MSI-X capability structure
- * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
- * Or a return of > 0 indicates that driver request is exceeding the number
- * of vectors available. Driver should use the returned value to re-send
- * its request.
+ * Allocate additional MSI-X vectors requested by device driver. A
+ * return of zero indicates the successful setup of MSI-X capability
+ * structure with new allocated MSI-X vectors or non-zero for otherwise.
**/
-int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
+int msi_alloc_vectors(struct pci_dev* dev, int *vector, int nvec)
{
- int status, pos, nr_entries, free_vectors;
- int i, j, temp;
- u16 control;
+ struct msi_desc *entry;
+ int i, head, pos, vec, free_vectors, alloc_vectors;
+ int *vectors = (int *)vector;
+ u32 control;
unsigned long flags;
- if (!pci_msi_enable || !dev || !entries)
+ if (!pci_msi_enable || !dev)
return -EINVAL;
- if ((status = msi_init()) < 0)
- return status;
-
if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
return -EINVAL;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE)
- return -EINVAL; /* Already in MSI-X mode */
-
- nr_entries = multi_msix_capable(control);
- if (nvec > nr_entries)
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2, &control);
+ if (nvec > multi_msix_capable(control))
return -EINVAL;
- /* Check for any invalid entries */
- for (i = 0; i < nvec; i++) {
- if (entries[i].entry >= nr_entries)
- return -EINVAL; /* invalid entry */
- for (j = i + 1; j < nvec; j++) {
- if (entries[i].entry == entries[j].entry)
- return -EINVAL; /* duplicate entry */
- }
- }
- temp = dev->irq;
- if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- /* Lookup Sucess */
- nr_entries = nvec;
- /* Reroute MSI-X table */
- if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
- /* #requested > #previous-assigned */
- dev->irq = temp;
- return nr_entries;
- }
- dev->irq = temp;
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
- return 0;
- }
- /* Check whether driver already requested for MSI vector */
- if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
- printk(KERN_INFO "Can't enable MSI-X. Device already had MSI vector assigned\n");
- dev->irq = temp;
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[dev->irq];
+ if (!entry || entry->dev != dev || /* legal call */
+ entry->msi_attrib.type != PCI_CAP_ID_MSIX || /* must be MSI-X */
+ entry->link.head != entry->link.tail) { /* already multi */
+ spin_unlock_irqrestore(&msi_lock, flags);
return -EINVAL;
}
-
- spin_lock_irqsave(&msi_lock, flags);
/*
* msi_lock is provided to ensure that enough vectors resources are
* available before granting.
free_vectors /= nr_msix_devices;
spin_unlock_irqrestore(&msi_lock, flags);
- if (nvec > free_vectors) {
- if (free_vectors > 0)
- return free_vectors;
- else
- return -EBUSY;
- }
+ if (nvec > free_vectors)
+ return -EBUSY;
- status = msix_capability_init(dev, entries, nvec);
- if (!status && nr_msix_devices > 0)
+ alloc_vectors = 0;
+ head = dev->irq;
+ for (i = 0; i < nvec; i++) {
+ if ((vec = msi_alloc_vector(dev, head)) < 0)
+ break;
+ *(vectors + i) = vec;
+ head = vec;
+ alloc_vectors++;
+ }
+ if (alloc_vectors != nvec) {
+ for (i = 0; i < alloc_vectors; i++) {
+ vec = *(vectors + i);
+ msi_free_vector(dev, vec);
+ }
+ spin_lock_irqsave(&msi_lock, flags);
+ msi_desc[dev->irq]->link.tail = msi_desc[dev->irq]->link.head;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ return -EBUSY;
+ }
+ if (nr_msix_devices > 0)
nr_msix_devices--;
- return status;
+ return 0;
}
-void pci_disable_msix(struct pci_dev* dev)
+/**
+ * msi_free_vectors - reclaim MSI-X vectors to unused state
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @vector: pointer to an array of released MSI-X vectors
+ * @nvec: number of MSI-X vectors requested for release by device driver
+ *
+ * Reclaim MSI-X vectors released by device driver to unused state,
+ * which may be used later on. A return of zero indicates the
+ * success or non-zero for otherwise. Device driver should call this
+ * before calling function free_irq.
+ **/
+int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec)
{
- int pos, temp;
- u16 control;
-
- if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSIX_FLAGS_ENABLE))
- return;
+ struct msi_desc *entry;
+ int i;
+ unsigned long flags;
- temp = dev->irq;
- if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- int state, vector, head, tail = 0, warning = 0;
- unsigned long flags;
+ if (!pci_msi_enable)
+ return -EINVAL;
- vector = head = dev->irq;
- spin_lock_irqsave(&msi_lock, flags);
- while (head != tail) {
- state = msi_desc[vector]->msi_attrib.state;
- if (state)
- warning = 1;
- else {
- vector_irq[vector] = 0; /* free it */
- nr_released_vectors++;
- }
- tail = msi_desc[vector]->link.tail;
- vector = tail;
- }
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[dev->irq];
+ if (!entry || entry->dev != dev ||
+ entry->msi_attrib.type != PCI_CAP_ID_MSIX ||
+ entry->link.head == entry->link.tail) { /* Nothing to free */
spin_unlock_irqrestore(&msi_lock, flags);
- if (warning) {
- dev->irq = temp;
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on all vectors\n",
- dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
- BUG_ON(warning > 0);
- } else {
- dev->irq = temp;
- disable_msi_mode(dev,
- pci_find_capability(dev, PCI_CAP_ID_MSIX),
- PCI_CAP_ID_MSIX);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&msi_lock, flags);
- }
+ for (i = 0; i < nvec; i++) {
+ if (*(vector + i) == dev->irq)
+ continue;/* Don't free entry 0 if mistaken by driver */
+ msi_free_vector(dev, *(vector + i));
}
+
+ return 0;
}
/**
**/
void msi_remove_pci_irq_vectors(struct pci_dev* dev)
{
- int state, pos, temp;
+ struct msi_desc *entry;
+ int type, temp;
unsigned long flags;
if (!pci_msi_enable || !dev)
return;
- temp = dev->irq; /* Save IOAPIC IRQ */
- if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
- spin_lock_irqsave(&msi_lock, flags);
- state = msi_desc[dev->irq]->msi_attrib.state;
+ if (!pci_find_capability(dev, PCI_CAP_ID_MSI)) {
+ if (!pci_find_capability(dev, PCI_CAP_ID_MSIX))
+ return;
+ }
+ temp = dev->irq;
+ if (msi_lookup_vector(dev))
+ return;
+
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[dev->irq];
+ if (!entry || entry->dev != dev) {
spin_unlock_irqrestore(&msi_lock, flags);
- if (state) {
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n",
- dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), dev->irq);
- BUG_ON(state > 0);
- } else /* Release MSI vector assigned to this device */
- msi_free_vector(dev, dev->irq, 0);
- dev->irq = temp; /* Restore IOAPIC IRQ */
+ return;
}
- if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
- !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
- int vector, head, tail = 0, warning = 0;
- unsigned long base = 0L;
+ type = entry->msi_attrib.type;
+ spin_unlock_irqrestore(&msi_lock, flags);
- vector = head = dev->irq;
- while (head != tail) {
+ msi_free_vector(dev, dev->irq);
+ if (type == PCI_CAP_ID_MSIX) {
+ int i, pos, dev_msi_cap;
+ u32 phys_addr, table_offset;
+ u32 control;
+ u8 bir;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2, &control);
+ dev_msi_cap = multi_msix_capable(control);
+ dev->bus->ops->read(dev->bus, dev->devfn,
+ msix_table_offset_reg(pos), 4, &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+ phys_addr = pci_resource_start (dev, bir);
+ phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
+ for (i = FIRST_DEVICE_VECTOR; i < NR_IRQS; i++) {
spin_lock_irqsave(&msi_lock, flags);
- state = msi_desc[vector]->msi_attrib.state;
- tail = msi_desc[vector]->link.tail;
- base = msi_desc[vector]->mask_base;
+ if (!msi_desc[i] || msi_desc[i]->dev != dev) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&msi_lock, flags);
- if (state)
- warning = 1;
- else if (vector != head) /* Release MSI-X vector */
- msi_free_vector(dev, vector, 0);
- vector = tail;
- }
- msi_free_vector(dev, vector, 0);
- if (warning) {
- /* Force to release the MSI-X memory-mapped table */
- u32 phys_addr, table_offset;
- u16 control;
- u8 bir;
-
- pci_read_config_word(dev, msi_control_reg(pos),
- &control);
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- phys_addr = pci_resource_start (dev, bir);
- phys_addr += (u32)(table_offset &
- ~PCI_MSIX_FLAGS_BIRMASK);
- iounmap((void*)base);
- release_mem_region(phys_addr, PCI_MSIX_ENTRY_SIZE *
- multi_msix_capable(control));
- printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on all vectors\n",
- dev->bus->number, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
- BUG_ON(warning > 0);
+ msi_free_vector(dev, i);
}
- dev->irq = temp; /* Restore IOAPIC IRQ */
+ writel(1, entry->mask_base + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ iounmap((void*)entry->mask_base);
+ release_mem_region(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
}
+ dev->irq = temp;
+ nr_reserved_vectors++;
}
EXPORT_SYMBOL(pci_enable_msi);
-EXPORT_SYMBOL(pci_disable_msi);
-EXPORT_SYMBOL(pci_enable_msix);
-EXPORT_SYMBOL(pci_disable_msix);
+EXPORT_SYMBOL(msi_alloc_vectors);
+EXPORT_SYMBOL(msi_free_vectors);
struct {
__u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
__u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 state : 1; /* {0: free, 1: busy} */
- __u8 reserved: 1; /* reserved */
+ __u8 reserved: 2; /* reserved */
__u8 entry_nr; /* specific enabled entry */
__u8 default_vector; /* default pre-assigned vector */
__u8 current_cpu; /* current destination cpu */
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
- if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
- switch (dev->subsystem_device) {
- case 0x1751: /* M2N notebook */
- asus_hides_smbus = 1;
- }
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge },
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc },
#include "cirrus.h"
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Driver for the Cirrus PD6729 PCI-PCMCIA bridge");
-MODULE_AUTHOR("Jun Komuro <komurojun@mbn.nifty.com>");
#define MAX_SOCKETS 2
-/*
- * simple helper functions
- * External clock time, in nanoseconds. 120 ns = 8.33 MHz
- */
+/* simple helper functions */
+/* External clock time, in nanoseconds. 120 ns = 8.33 MHz */
#define to_cycles(ns) ((ns)/120)
static spinlock_t port_lock = SPIN_LOCK_UNLOCKED;
*value |= SS_DETECT;
}
- /*
- * IO cards have a different meaning of bits 0,1
- * Also notice the inverse-logic on the bits
- */
+ /* IO cards have a different meaning of bits 0,1 */
+ /* Also notice the inverse-logic on the bits */
if (indirect_read(socket, I365_INTCTL) & I365_PC_IOCARD) {
/* IO card */
if (!(status & I365_CS_STSCHG))
state->io_irq = 0;
state->csc_mask = 0;
- /*
- * First the power status of the socket
- * PCTRL - Power Control Register
- */
+ /* First the power status of the socket */
+ /* PCTRL - Power Control Register */
reg = indirect_read(socket, I365_POWER);
if (reg & I365_PWR_AUTO)
state->Vpp = 120;
}
- /*
- * Now the IO card, RESET flags and IO interrupt
- * IGENC, Interrupt and General Control
- */
+ /* Now the IO card, RESET flags and IO interrupt */
+ /* IGENC, Interrupt and General Control */
reg = indirect_read(socket, I365_INTCTL);
if ((reg & I365_PC_RESET) == 0)
/* Set the IRQ number */
state->io_irq = socket->socket.pci_irq;
- /*
- * Card status change
- * CSCICR, Card Status Change Interrupt Configuration
- */
+ /* Card status change */
+ /* CSCICR, Card Status Change Interrupt Configuration */
reg = indirect_read(socket, I365_CSCINT);
if (reg & I365_CSC_DETECT)
printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge at 0x%lx on irq %d\n",
pci_resource_start(dev, 0), dev->irq);
printk(KERN_INFO "pd6729: configured as a %d socket device.\n", MAX_SOCKETS);
- /*
- * Since we have no memory BARs some firmware we may not
- * have had PCI_COMMAND_MEM enabled, yet the device needs
- * it.
- */
+ /* Since we have no memory BARs some firmware we may not
+ have had PCI_COMMAND_MEM enabled, yet the device needs
+ it. */
pci_read_config_byte(dev, PCI_COMMAND, &configbyte);
if (!(configbyte & PCI_COMMAND_MEMORY)) {
printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n");
file under either the MPL or the GPL.
======================================================================*/
+/*
+ * Please see linux/Documentation/arm/SA1100/PCMCIA for more information
+ * on the low-level kernel interface.
+ */
#include <linux/module.h>
#include <linux/init.h>
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0,
- event, sizeof(u16), NULL, 0);
+ event, sizeof(u16), 0, 0);
return status;
}
#endif
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- info, 65536, NULL, 0);
+ info, 65536, 0, 0);
return status;
}
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- data, sizeof(struct pnp_isa_config_struc), NULL, 0);
+ data, sizeof(struct pnp_isa_config_struc), 0, 0);
return status;
}
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS,
- data, sizeof(struct escd_info_struc), NULL, 0);
+ data, sizeof(struct escd_info_struc), 0, 0);
return status;
}
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
+ /* Check position. */
+ if (ppos != &filp->f_pos) {
+ /*
+ * "A request was outside the capabilities of the device."
+ * This check uses internal knowledge about how pread and
+ * read work...
+ */
+ DBF_EVENT(6, "TCHAR:ppos wrong\n");
+ return -EOVERFLOW;
+ }
/*
* If the tape isn't terminated yet, do it now. And since we then
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
+ /* Check position */
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ DBF_EVENT(6, "TCHAR:ppos wrong\n");
+ return -EOVERFLOW;
+ }
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- return nonseekable_open(inode, filp);
+ return 0;
}
tape_put_device(device);
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
- * $Revision: 1.115 $
+ * $Revision: 1.114 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
return -ENOMEM;
memset(chp, 0, sizeof(struct channel_path));
+ chps[chpid] = chp;
+
/* fill in status, etc. */
chp->id = chpid;
chp->state = 1;
if (ret) {
printk(KERN_WARNING "%s: could not register %02x\n",
__func__, chpid);
- goto out_free;
+ return ret;
}
ret = device_create_file(&chp->dev, &dev_attr_status);
- if (ret) {
+ if (ret)
device_unregister(&chp->dev);
- goto out_free;
- } else
- chps[chpid] = chp;
- return ret;
-out_free:
- kfree(chp);
+
return ret;
}
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
/* Use with care. */
-#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(int irq);
extern struct subchannel *cio_get_console_subchannel(void);
-#else
-#define cio_is_console(irq) 0
-#define cio_get_console_subchannel() NULL
-#endif
extern int cio_show_msg;
};
if (notify) {
/* Get device online again. */
- cdev->private->state = DEV_STATE_OFFLINE;
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
return;
/*
* drivers/s390/cio/device_ops.c
*
- * $Revision: 1.50 $
+ * $Revision: 1.47 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
-#include <linux/delay.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
if ((ret == -EBUSY) || (ret == -EACCES)) {
/* Try again later. */
spin_unlock_irq(&sch->lock);
- msleep(10);
+ schedule_timeout(1);
spin_lock_irq(&sch->lock);
continue;
}
break;
/* Try again later. */
spin_unlock_irq(&sch->lock);
- msleep(10);
+ schedule_timeout(1);
spin_lock_irq(&sch->lock);
} while (1);
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
ccw_device_path_notoper(cdev);
- if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
- (irb->scsw.dstat & DEV_STAT_CHN_END)) {
+ if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
#include "ioasm.h"
#include "chsc.h"
-#define VERSION_QDIO_C "$Revision: 1.84 $"
+#define VERSION_QDIO_C "$Revision: 1.83 $"
/****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
static debug_info_t *qdio_dbf_sbal;
static debug_info_t *qdio_dbf_trace;
static debug_info_t *qdio_dbf_sense;
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
static debug_info_t *qdio_dbf_slsb_out;
static debug_info_t *qdio_dbf_slsb_in;
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
/* iQDIO stuff: */
static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
qdio_is_outbound_q_done(struct qdio_q *q)
{
int no_used;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
no_used=atomic_read(&q->number_of_buffers_used);
-#ifdef CONFIG_QDIO_DEBUG
if (no_used) {
sprintf(dbf_text,"oqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
QDIO_DBF_TEXT4(0,trace,"oqisdone");
}
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
return (no_used==0);
}
qdio_kick_outbound_q(struct qdio_q *q)
{
int result;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT4(0,trace,"kickoutq");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
if (!q->siga_out)
return;
switch (result) {
case 0:
- /* went smooth this time, reset timestamp */
-#ifdef CONFIG_QDIO_DEBUG
+ /* went smooth this time, reset timestamp */
QDIO_DBF_TEXT3(0,trace,"cc2reslv");
sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
atomic_read(&q->busy_siga_counter));
QDIO_DBF_TEXT3(0,trace,dbf_text);
q->timing.busy_start=0;
-#endif /* CONFIG_QDIO_DEBUG */
break;
case (2|QDIO_SIGA_ERROR_B_BIT_SET):
/* cc=2 and busy bit: */
- atomic_inc(&q->busy_siga_counter);
+ atomic_inc(&q->busy_siga_counter);
/* if the last siga was successful, save
* timestamp here */
break;
}
QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
atomic_read(&q->busy_siga_counter));
QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
/* else fallthrough and report error */
default:
/* for plain cc=1, 2 or 3: */
qdio_kick_outbound_handler(struct qdio_q *q)
{
int start, end, real_end, count;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
start = q->first_element_to_kick;
/* last_move_ftc was just updated */
count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
(QDIO_MAX_BUFFERS_PER_Q-1);
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"kickouth");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (q->state==QDIO_IRQ_STATE_ACTIVE)
q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
int f,f_mod_no;
volatile char *slsb;
int first_not_to_check;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif /* CONFIG_QDIO_DEBUG */
#ifdef QDIO_USE_PROCESSING_STATE
int last_position=-1;
#endif /* QDIO_USE_PROCESSING_STATE */
/* P_ERROR means frontier is reached, break and report error */
case SLSB_P_INPUT_ERROR:
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"inperr%2x",f_mod_no);
QDIO_DBF_TEXT3(1,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
iqdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
no_used=atomic_read(&q->number_of_buffers_used);
/* propagate the change from 82 to 80 through VM */
SYNC_MEMORY;
-#ifdef CONFIG_QDIO_DEBUG
if (no_used) {
sprintf(dbf_text,"iqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
QDIO_DBF_TEXT4(0,trace,"iniqisdo");
}
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
-#endif /* CONFIG_QDIO_DEBUG */
if (!no_used)
return 1;
qdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
no_used=atomic_read(&q->number_of_buffers_used);
* has (probably) not moved (see qdio_inbound_processing)
*/
if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"inqisdon");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
return 1;
} else {
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"inqisntd");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
return 0;
}
}
qdio_kick_inbound_handler(struct qdio_q *q)
{
int count, start, end, real_end, i;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
-#endif
QDIO_DBF_TEXT4(0,trace,"kickinh");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
}
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
q->handler(q->cdev,
qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
{
int i;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT5(0,trace,"newstate");
sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
QDIO_DBF_TEXT5(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
irq_ptr->state=state;
for (i=0;i<irq_ptr->no_input_qs;i++)
int cstat,dstat;
char dbf_text[15];
-#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0, trace, "qint");
sprintf(dbf_text, "%s", cdev->dev.bus_id);
QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (!intparm) {
QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
qdio_irq_check_sense(irq_ptr->irq, irb);
-#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "state:%d", irq_ptr->state);
QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
cstat = irb->scsw.cstat;
dstat = irb->scsw.dstat;
int cc;
struct qdio_q *q;
struct qdio_irq *irq_ptr;
- void *ptr;
-#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15]="SyncXXXX";
-#endif
+ void *ptr;
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
-#ifdef CONFIG_QDIO_DEBUG
*((int*)(&dbf_text[4])) = irq_ptr->irq;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
*((int*)(&dbf_text[0]))=flags;
*((int*)(&dbf_text[4]))=queue_number;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
-#endif /* CONFIG_QDIO_DEBUG */
if (flags&QDIO_FLAG_SYNC_INPUT) {
q=irq_ptr->input_qs[queue_number];
unsigned int count,struct qdio_buffer *buffers)
{
struct qdio_irq *irq_ptr;
-#ifdef CONFIG_QDIO_DEBUG
+
char dbf_text[20];
sprintf(dbf_text,"doQD%04x",cdev->private->irq);
- QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
(count>QDIO_MAX_BUFFERS_PER_Q) ||
if (!irq_ptr)
return -ENODEV;
-#ifdef CONFIG_QDIO_DEBUG
if (callflags&QDIO_FLAG_SYNC_INPUT)
QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
sizeof(void*));
QDIO_DBF_TEXT3(0,trace,dbf_text);
sprintf(dbf_text,"qi%02xct%02x",qidx,count);
QDIO_DBF_TEXT3(0,trace,dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
debug_unregister(qdio_dbf_sense);
if (qdio_dbf_trace)
debug_unregister(qdio_dbf_trace);
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
if (qdio_dbf_slsb_out)
debug_unregister(qdio_dbf_slsb_out);
if (qdio_dbf_slsb_in)
debug_unregister(qdio_dbf_slsb_in);
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
}
static int
debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
QDIO_DBF_SLSB_OUT_INDEX,
QDIO_DBF_SLSB_OUT_NR_AREAS,
goto oom;
debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
return 0;
oom:
QDIO_PRINT_ERR("not enough memory for dbf.\n");
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
-#define VERSION_CIO_QDIO_H "$Revision: 1.26 $"
+#define VERSION_CIO_QDIO_H "$Revision: 1.24 $"
-#ifdef CONFIG_QDIO_DEBUG
+//#define QDIO_DBF_LIKE_HELL
+
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_VERBOSE_LEVEL 9
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_VERBOSE_LEVEL 5
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_USE_PROCESSING_STATE
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_INDEX 2
#define QDIO_DBF_SETUP_NR_AREAS 1
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SETUP_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SETUP_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_INDEX 2
#define QDIO_DBF_SBAL_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SBAL_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TRACE_INDEX 4
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_INDEX 2
#define QDIO_DBF_TRACE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_INDEX 1
#define QDIO_DBF_SENSE_NR_AREAS 1
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SENSE_LEVEL 6
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_IN_INDEX 8
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
#endif /* CONFIG_ARCH_S390X */
-#ifdef CONFIG_QDIO_DEBUG
+#ifdef QDIO_DBF_LIKE_HELL
#define set_slsb(x,y) \
if(q->queue_type==QDIO_TRACE_QTYPE) { \
if(q->is_input_q) { \
QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} \
}
-#else /* CONFIG_QDIO_DEBUG */
+#else /* QDIO_DBF_LIKE_HELL */
#define set_slsb(x,y) qdio_set_slsb(x,y)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* QDIO_DBF_LIKE_HELL */
struct qdio_q {
volatile struct slsb slsb;
/*
*
- * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $)
+ * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $)
*
- * CTC / ESCON network driver - s390 dbf exploit.
+ * Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
- * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $
+ * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
/**
* Debug Facility Stuff
*/
-debug_info_t *ctc_dbf_setup = NULL;
-debug_info_t *ctc_dbf_data = NULL;
-debug_info_t *ctc_dbf_trace = NULL;
+debug_info_t *dbf_setup = NULL;
+debug_info_t *dbf_data = NULL;
+debug_info_t *dbf_trace = NULL;
-DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
+DEFINE_PER_CPU(char[256], dbf_txt_buf);
void
-ctc_unregister_dbf_views(void)
+unregister_dbf_views(void)
{
- if (ctc_dbf_setup)
- debug_unregister(ctc_dbf_setup);
- if (ctc_dbf_data)
- debug_unregister(ctc_dbf_data);
- if (ctc_dbf_trace)
- debug_unregister(ctc_dbf_trace);
+ if (dbf_setup)
+ debug_unregister(dbf_setup);
+ if (dbf_data)
+ debug_unregister(dbf_data);
+ if (dbf_trace)
+ debug_unregister(dbf_trace);
}
int
-ctc_register_dbf_views(void)
+register_dbf_views(void)
{
- ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
+ dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
CTC_DBF_SETUP_INDEX,
CTC_DBF_SETUP_NR_AREAS,
CTC_DBF_SETUP_LEN);
- ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
+ dbf_data = debug_register(CTC_DBF_DATA_NAME,
CTC_DBF_DATA_INDEX,
CTC_DBF_DATA_NR_AREAS,
CTC_DBF_DATA_LEN);
- ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
+ dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
CTC_DBF_TRACE_INDEX,
CTC_DBF_TRACE_NR_AREAS,
CTC_DBF_TRACE_LEN);
- if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
- (ctc_dbf_trace == NULL)) {
- ctc_unregister_dbf_views();
+ if ((dbf_setup == NULL) || (dbf_data == NULL) ||
+ (dbf_trace == NULL)) {
+ unregister_dbf_views();
return -ENOMEM;
}
- debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
+ debug_register_view(dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL);
- debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
- debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
+ debug_register_view(dbf_data, &debug_hex_ascii_view);
+ debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL);
- debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
+ debug_register_view(dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL);
return 0;
}
/*
*
- * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.3 $)
+ * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $)
*
- * CTC / ESCON network driver - s390 dbf exploit.
+ * Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
- * $Revision: 1.3 $ $Date: 2004/07/28 12:27:54 $
+ * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define DBF_TEXT(name,level,text) \
do { \
- debug_text_event(ctc_dbf_##name,level,text); \
+ debug_text_event(dbf_##name,level,text); \
} while (0)
#define DBF_HEX(name,level,addr,len) \
do { \
- debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
+ debug_event(dbf_##name,level,(void*)(addr),len); \
} while (0)
-extern DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
-extern debug_info_t *ctc_dbf_setup;
-extern debug_info_t *ctc_dbf_data;
-extern debug_info_t *ctc_dbf_trace;
+extern DEFINE_PER_CPU(char[256], dbf_txt_buf);
+extern debug_info_t *dbf_setup;
+extern debug_info_t *dbf_data;
+extern debug_info_t *dbf_trace;
#define DBF_TEXT_(name,level,text...) \
do { \
- char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
- sprintf(ctc_dbf_txt_buf, text); \
- debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
- put_cpu_var(ctc_dbf_txt_buf); \
+ char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \
+ sprintf(dbf_txt_buf, text); \
+ debug_text_event(dbf_##name,level,dbf_txt_buf); \
+ put_cpu_var(dbf_txt_buf); \
} while (0)
#define DBF_SPRINTF(name,level,text...) \
do { \
- debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
- debug_sprintf_event(ctc_dbf_trace, level, text ); \
+ debug_sprintf_event(dbf_trace, level, ##text ); \
+ debug_sprintf_event(dbf_trace, level, text ); \
} while (0)
-int ctc_register_dbf_views(void);
+int register_dbf_views(void);
-void ctc_unregister_dbf_views(void);
+void unregister_dbf_views(void);
/**
* some more debug stuff
/*
- * $Id: ctcmain.c,v 1.63 2004/07/28 12:27:54 ptiedem Exp $
+ * $Id: ctcmain.c,v 1.61 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.63 $
+ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.61 $
*
*/
\f
print_banner(void)
{
static int printed = 0;
- char vbuf[] = "$Revision: 1.63 $";
+ char vbuf[] = "$Revision: 1.61 $";
char *version = vbuf;
if (printed)
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
__u16 len = *((__u16 *) pskb->data);
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
skb_put(pskb, 2 + LL_HEADER_LENGTH);
skb_pull(pskb, 2);
pskb->dev = dev;
if (ch->protocol == CTC_PROTO_LINUX_TTY)
ctc_tty_netif_rx(skb);
else
- netif_rx_ni(skb);
+ netif_rx(skb);
/**
* Successful rx; reset logflags
*/
static void inline
ccw_check_return_code(struct channel *ch, int return_code, char *msg)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
switch (return_code) {
case 0:
fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
static void inline
ccw_unit_check(struct channel *ch, unsigned char sense)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
if (ch->protocol != CTC_PROTO_LINUX_TTY)
{
struct sk_buff *skb;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
while ((skb = skb_dequeue(q))) {
atomic_dec(&skb->users);
static __inline__ int
ctc_checkalloc_buffer(struct channel *ch, int warn)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((ch->trans_skb == NULL) ||
(ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
if (ch->trans_skb != NULL)
unsigned long duration;
struct timespec done_stamp = xtime;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
duration =
(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
{
struct channel *ch = (struct channel *) arg;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_TXIDLE);
fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
int check_len;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (len < 8) {
ctc_pr_debug("%s: got packet with length %d < 8\n",
struct channel *ch = (struct channel *) arg;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (fsm_getstate(fi) == CH_STATE_TXIDLE)
ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
__u16 buflen;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
buflen = *((__u16 *) ch->trans_skb->data);
#ifdef DEBUG
int rc;
unsigned long saveflags;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
fsm_newstate(fi, CH_STATE_SETUPWAIT);
int rc;
struct net_device *dev;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ch == NULL) {
ctc_pr_warn("ch_action_start ch=NULL\n");
return;
int rc;
int oldstate;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
if (event == CH_EVENT_STOP)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_STOPPED);
if (ch->trans_skb != NULL) {
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_NOTOP);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
/**
* Special case: Got UC_RCRESET on setmode.
* This means that remote side isn't setup. In this case
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: %s channel restart\n", dev->name,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
if (event == CH_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
fsm_newstate(fi, CH_STATE_RXERR);
ctc_pr_warn("%s: RX initialization failed\n", dev->name);
ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
struct channel *ch2;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
dev->name);
struct net_device *dev = ch->netdev;
unsigned long saveflags;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (ch->retry++ > 3) {
ctc_pr_debug("%s: TX retry failed, restarting channel\n",
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
ctc_pr_debug("%s: RX I/O error\n", dev->name);
struct net_device *dev = ch->netdev;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
ch_action_iofatal(fi, event, arg);
fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
}
{
struct channel *ch = channels;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
#ifdef DEBUG
ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
__func__, id, type);
struct net_device *dev;
struct ctc_priv *priv;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (__ctc_check_irb_error(cdev, irb))
return;
struct ctc_priv *privptr = dev->priv;
int direction;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
fsm_deltimer(&privptr->restart_timer);
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct ctc_priv *privptr = dev->priv;
int direction;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct channel *ch = privptr->channel[direction];
struct net_device *dev = (struct net_device *)arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
ctc_pr_debug("%s: Restarting\n", dev->name);
dev_action_stop(fi, event, arg);
fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:
if (event == DEV_EVENT_RXUP)
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
if (privptr->protocol == CTC_PROTO_LINUX_TTY)
struct ll_header header;
int rc = 0;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
int l = skb->len + LL_HEADER_LENGTH;
static int
ctc_open(struct net_device * dev)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
}
static int
ctc_close(struct net_device * dev)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
return 0;
}
int rc = 0;
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
/**
* Some sanity checks ...
*/
{
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((new_mtu < 576) || (new_mtu > 65527) ||
(new_mtu > (privptr->channel[READ]->max_bufsize -
LL_HEADER_LENGTH - 2)))
struct net_device *ndev;
int bs1;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
struct ctc_priv *priv;
int ll1;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
char *sbuf;
char *p;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!priv)
return;
sbuf = (char *)kmalloc(2048, GFP_KERNEL);
if (!privptr)
return NULL;
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
if (alloc_device) {
dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
if (!dev)
struct ctc_priv *priv;
int value;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = dev->driver_data;
int rc;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!get_device(&cgdev->dev))
return -ENODEV;
int ret;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(setup, 3, __FUNCTION__);
+ DBF_TEXT(setup, 2, __FUNCTION__);
privptr = cgdev->dev.driver_data;
if (!privptr)
struct ctc_priv *priv;
struct net_device *ndev;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = cgdev->dev.driver_data;
struct ctc_priv *priv;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
priv = cgdev->dev.driver_data;
if (!priv)
{
unregister_cu3088_discipline(&ctc_group_driver);
ctc_tty_cleanup();
- ctc_unregister_dbf_views();
+ unregister_dbf_views();
ctc_pr_info("CTC driver unloaded\n");
}
print_banner();
- ret = ctc_register_dbf_views();
+ ret = register_dbf_views();
if (ret){
- ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
+ ctc_pr_crit("ctc_init failed with register_dbf_views rc = %d\n", ret);
return ret;
}
ctc_tty_init();
ret = register_cu3088_discipline(&ctc_group_driver);
if (ret) {
ctc_tty_cleanup();
- ctc_unregister_dbf_views();
+ unregister_dbf_views();
}
return ret;
}
/*
- * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
+ * $Id: ctctty.c,v 1.21 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver, tty interface.
*
#include <linux/tty.h>
#include <linux/serial_reg.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <asm/uaccess.h>
#include <linux/devfs_fs_kernel.h>
#include "ctctty.h"
int len;
struct tty_struct *tty;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
c = TTY_FLIPBUF_SIZE - tty->flip.count;
int ret = 1;
struct tty_struct *tty;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
int c = TTY_FLIPBUF_SIZE - tty->flip.count;
{
int i;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((!driver) || ctc_tty_shuttingdown)
return;
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
int i;
ctc_tty_info *info = NULL;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!skb)
return;
if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
int wake = 1;
int rc;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->netdev) {
if (skb)
kfree_skb(skb);
int skb_res;
struct sk_buff *skb;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
static void
ctc_tty_transmit_status(ctc_tty_info *info)
{
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
info->flags |= CTC_ASYNC_TX_LINESTAT;
unsigned int quot;
int i;
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->tty || !info->tty->termios)
return;
cflag = info->tty->termios->c_cflag;
static int
ctc_tty_startup(ctc_tty_info * info)
{
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (info->flags & CTC_ASYNC_INITIALIZED)
return 0;
#ifdef CTC_DEBUG_MODEM_OPEN
static void
ctc_tty_shutdown(ctc_tty_info * info)
{
- DBF_TEXT(trace, 3, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!(info->flags & CTC_ASYNC_INITIALIZED))
return;
#ifdef CTC_DEBUG_MODEM_OPEN
int total = 0;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 5, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
goto ex;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
ctc_tty_info *info;
unsigned long flags;
- DBF_TEXT(trace, 4, __FUNCTION__);
+ DBF_TEXT(trace, 2, __FUNCTION__);
if (!tty)
goto ex;
spin_lock_irqsave(&ctc_tty_lock, flags);
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
return;
info->mcr &= ~UART_MCR_RTS;
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
return;
info->mcr |= UART_MCR_RTS;
uint result;
ulong flags;
- DBF_TEXT(trace, 4, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, flags);
status = info->lsr;
spin_unlock_irqrestore(&ctc_tty_lock, flags);
uint result;
ulong flags;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
int error;
int retval;
- DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
ctc_tty_change_speed(info);
/* Handle transition to B0 */
unsigned long flags;
int retval;
- DBF_TEXT(trace, 4, __FUNCTION__);
/*
* If the device is in the middle of being closed, then block
* until it's done, and then try again.
int retval,
line;
- DBF_TEXT(trace, 3, __FUNCTION__);
line = tty->index;
if (line < 0 || line > CTC_TTY_MAX_DEVICES)
return -ENODEV;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
ulong flags;
ulong timeout;
- DBF_TEXT(trace, 3, __FUNCTION__);
+
if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
return;
spin_lock_irqsave(&ctc_tty_lock, flags);
*/
timeout = jiffies + HZ;
while (!(info->lsr & UART_LSR_TEMT)) {
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&ctc_tty_lock, flags);
- msleep(500);
+ schedule_timeout(HZ/2);
spin_lock_irqsave(&ctc_tty_lock, flags);
if (time_after(jiffies,timeout))
break;
{
ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
unsigned long saveflags;
- DBF_TEXT(trace, 3, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
return;
ctc_tty_shutdown(info);
unsigned long saveflags;
int again;
- DBF_TEXT(trace, 3, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
if ((!ctc_tty_shuttingdown) && info) {
again = ctc_tty_tint(info);
ctc_tty_info *info;
struct tty_driver *device;
- DBF_TEXT(trace, 2, __FUNCTION__);
driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
if (driver == NULL) {
printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
char *err;
char *p;
- DBF_TEXT(trace, 2, __FUNCTION__);
if ((!dev) || (!dev->name)) {
printk(KERN_WARNING
"ctc_tty_register_netdev called "
unsigned long saveflags;
ctc_tty_info *info = NULL;
- DBF_TEXT(trace, 2, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
if (driver->info[i].netdev == dev) {
ctc_tty_cleanup(void) {
unsigned long saveflags;
- DBF_TEXT(trace, 2, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
ctc_tty_shuttingdown = 1;
spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
/*
- * $Id: iucv.c,v 1.40 2004/08/04 12:29:33 cborntra Exp $
+ * $Id: iucv.c,v 1.38 2004/07/09 15:59:53 mschwide Exp $
*
* IUCV network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.40 $
+ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.38 $
*
*/
\f
#include <asm/io.h>
#include <asm/s390_ext.h>
#include <asm/ebcdic.h>
-#include <asm/smp.h>
#include <asm/ccwdev.h> //for root device stuff
/* FLAGS:
static void
iucv_banner(void)
{
- char vbuf[] = "$Revision: 1.40 $";
+ char vbuf[] = "$Revision: 1.38 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
+ case -ENODEV:
+ err = "No CPU can be reserved";
+ break;
case 0x03:
err = "Directory error";
break;
*/
#include <linux/types.h>
-#include <asm/debug.h>
-
-/**
- * Debug Facility stuff
- */
-#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
-#define IUCV_DBF_SETUP_INDEX 1
-#define IUCV_DBF_SETUP_NR_AREAS 1
-#define IUCV_DBF_SETUP_LEVEL 3
-
-#define IUCV_DBF_DATA_NAME "iucv_data"
-#define IUCV_DBF_DATA_LEN 128
-#define IUCV_DBF_DATA_INDEX 1
-#define IUCV_DBF_DATA_NR_AREAS 1
-#define IUCV_DBF_DATA_LEVEL 2
-
-#define IUCV_DBF_TRACE_NAME "iucv_trace"
-#define IUCV_DBF_TRACE_LEN 16
-#define IUCV_DBF_TRACE_INDEX 2
-#define IUCV_DBF_TRACE_NR_AREAS 1
-#define IUCV_DBF_TRACE_LEVEL 3
-
-#define IUCV_DBF_TEXT(name,level,text) \
- do { \
- debug_text_event(iucv_dbf_##name,level,text); \
- } while (0)
-
-#define IUCV_DBF_HEX(name,level,addr,len) \
- do { \
- debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
- } while (0)
-
-extern DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
-
-#define IUCV_DBF_TEXT_(name,level,text...) \
- do { \
- char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
- sprintf(iucv_dbf_txt_buf, text); \
- debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
- put_cpu_var(iucv_dbf_txt_buf); \
- } while (0)
-
-#define IUCV_DBF_SPRINTF(name,level,text...) \
- do { \
- debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
- debug_sprintf_event(iucv_dbf_trace, level, text ); \
- } while (0)
-
-/**
- * some more debug stuff
- */
-#define IUCV_HEXDUMP16(importance,header,ptr) \
-PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
- *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
- *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
- *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
- *(((char*)ptr)+12),*(((char*)ptr)+13), \
- *(((char*)ptr)+14),*(((char*)ptr)+15)); \
-PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)+16),*(((char*)ptr)+17), \
- *(((char*)ptr)+18),*(((char*)ptr)+19), \
- *(((char*)ptr)+20),*(((char*)ptr)+21), \
- *(((char*)ptr)+22),*(((char*)ptr)+23), \
- *(((char*)ptr)+24),*(((char*)ptr)+25), \
- *(((char*)ptr)+26),*(((char*)ptr)+27), \
- *(((char*)ptr)+28),*(((char*)ptr)+29), \
- *(((char*)ptr)+30),*(((char*)ptr)+31));
-
-static inline void
-iucv_hex_dump(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; i++) {
- if (i && !(i % 16))
- printk("\n");
- printk("%02x ", *(buf + i));
- }
- printk("\n");
-}
-/**
- * end of debug stuff
- */
-
#define uchar unsigned char
#define ushort unsigned short
#define ulong unsigned long
* Frank Pavlic (pavlic@de.ibm.com) and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*
- * $Revision: 1.85 $ $Date: 2004/08/04 11:05:43 $
+ * $Revision: 1.83 $ $Date: 2004/06/30 12:48:14 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/igmp.h>
-#include <linux/delay.h>
#include <net/arp.h>
#include <net/ip.h>
/**
* initialization string for output
*/
-#define VERSION_LCS_C "$Revision: 1.85 $"
+#define VERSION_LCS_C "$Revision: 1.83 $"
static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
static char debug_buffer[255];
card->dev->name);
return 0;
}
- msleep(3000);
+ schedule_timeout(3 * HZ);
}
PRINT_ERR("Error in Reseting LCS card!\n");
return -EIO;
/*
- * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $
+ * $Id: netiucv.c,v 1.57 2004/06/30 09:26:40 braunu Exp $
*
* IUCV network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV network driver $Revision: 1.63 $
+ * RELEASE-TAG: IUCV network driver $Revision: 1.57 $
*
*/
\f
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
\f
-#define PRINTK_HEADER " iucv: " /* for debugging */
-
-static struct device_driver netiucv_driver = {
- .name = "netiucv",
- .bus = &iucv_bus,
-};
-
/**
* Per connection profiling data
*/
/**
* Linked list of all connection structs.
*/
-static struct iucv_connection *iucv_connections;
+static struct iucv_connection *connections;
/**
* Representation of event-data for the
* match exactly as specified in order to give connection_pending()
* control.
*/
-static __u8 netiucv_mask[] = {
+static __u8 mask[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
\f
-/**
- * Debug Facility Stuff
- */
-static debug_info_t *iucv_dbf_setup = NULL;
-static debug_info_t *iucv_dbf_data = NULL;
-static debug_info_t *iucv_dbf_trace = NULL;
-
-DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
-
-static void
-iucv_unregister_dbf_views(void)
-{
- if (iucv_dbf_setup)
- debug_unregister(iucv_dbf_setup);
- if (iucv_dbf_data)
- debug_unregister(iucv_dbf_data);
- if (iucv_dbf_trace)
- debug_unregister(iucv_dbf_trace);
-}
-static int
-iucv_register_dbf_views(void)
-{
- iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
- IUCV_DBF_SETUP_INDEX,
- IUCV_DBF_SETUP_NR_AREAS,
- IUCV_DBF_SETUP_LEN);
- iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
- IUCV_DBF_DATA_INDEX,
- IUCV_DBF_DATA_NR_AREAS,
- IUCV_DBF_DATA_LEN);
- iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
- IUCV_DBF_TRACE_INDEX,
- IUCV_DBF_TRACE_NR_AREAS,
- IUCV_DBF_TRACE_LEN);
-
- if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
- (iucv_dbf_trace == NULL)) {
- iucv_unregister_dbf_views();
- return -ENOMEM;
- }
- debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
-
- debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
- debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
-
- debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
-
- return 0;
-}
-
/**
* Callback-wrappers, called from lowlevel iucv layer.
*****************************************************************************/
struct sk_buff *skb;
ll_header *header = (ll_header *)pskb->data;
- if (!header->next)
+ if (header->next == 0)
break;
skb_pull(pskb, NETIUCV_HDRLEN);
offset += header->next;
header->next -= NETIUCV_HDRLEN;
if (skb_tailroom(pskb) < header->next) {
- PRINT_WARN("%s: Illegal next field in iucv header: "
+ printk(KERN_WARNING
+ "%s: Illegal next field in iucv header: "
"%d > %d\n",
dev->name, header->next, skb_tailroom(pskb));
- IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
- header->next, skb_tailroom(pskb));
return;
}
skb_put(pskb, header->next);
pskb->mac.raw = pskb->data;
skb = dev_alloc_skb(pskb->len);
if (!skb) {
- PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
+ printk(KERN_WARNING
+ "%s Out of memory in netiucv_unpack_skb\n",
dev->name);
- IUCV_DBF_TEXT(data, 2,
- "Out of memory in netiucv_unpack_skb\n");
privptr->stats.rx_dropped++;
return;
}
struct iucv_event *ev = (struct iucv_event *)arg;
struct iucv_connection *conn = ev->conn;
iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
- struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
+ struct netiucv_priv *privptr = (struct netiucv_priv *)conn->netdev->priv;
__u32 msglen = eib->ln1msg2.ipbfln1f;
int rc;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (!conn->netdev) {
/* FRITZ: How to tell iucv LL to drop the msg? */
- PRINT_WARN("Received data for unlinked connection\n");
- IUCV_DBF_TEXT(data, 2,
- "Received data for unlinked connection\n");
+ printk(KERN_WARNING
+ "Received data for unlinked connection\n");
return;
}
if (msglen > conn->max_buffsize) {
/* FRITZ: How to tell iucv LL to drop the msg? */
privptr->stats.rx_dropped++;
- PRINT_WARN("msglen %d > max_buffsize %d\n",
- msglen, conn->max_buffsize);
- IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
- msglen, conn->max_buffsize);
return;
}
conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
conn->rx_buff->len = 0;
rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
conn->rx_buff->data, msglen, NULL, NULL, NULL);
- if (rc || msglen < 5) {
+ if (rc != 0 || msglen < 5) {
privptr->stats.rx_errors++;
- PRINT_WARN("iucv_receive returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
+ printk(KERN_INFO "iucv_receive returned %08x\n", rc);
return;
}
netiucv_unpack_skb(conn, conn->rx_buff);
unsigned long saveflags;
ll_header header;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (conn && conn->netdev && conn->netdev->priv)
privptr = (struct netiucv_priv *)conn->netdev->priv;
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc) {
+ if (rc != 0) {
conn->prof.tx_pending--;
fsm_newstate(fi, CONN_STATE_IDLE);
if (privptr)
privptr->stats.tx_errors += txpackets;
- PRINT_WARN("iucv_send returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ printk(KERN_INFO "iucv_send returned %08x\n",
+ rc);
} else {
if (privptr) {
privptr->stats.tx_packets += txpackets;
__u16 msglimit;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
conn->handle, conn, NULL, &msglimit);
- if (rc) {
- PRINT_WARN("%s: IUCV accept failed with error %d\n",
+ if (rc != 0) {
+ printk(KERN_WARNING
+ "%s: IUCV accept failed with error %d\n",
netdev->name, rc);
- IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
}
fsm_newstate(fi, CONN_STATE_IDLE);
iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
iucv_sever(eib->ippathid, udata);
if (eib->ippathid != conn->pathid) {
- PRINT_INFO("%s: IR Connection Pending; "
- "pathid %d does not match original pathid %d\n",
+ printk(KERN_INFO
+ "%s: IR Connection Pending; pathid %d does not match original pathid %d\n",
netdev->name, eib->ippathid, conn->pathid);
- IUCV_DBF_TEXT_(data, 2,
- "connreject: IR pathid %d, conn. pathid %d\n",
- eib->ippathid, conn->pathid);
iucv_sever(conn->pathid, udata);
}
}
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE);
if (eib->ippathid != conn->pathid) {
- PRINT_INFO("%s: IR Connection Complete; "
- "pathid %d does not match original pathid %d\n",
+ printk(KERN_INFO
+ "%s: IR Connection Complete; pathid %d does not match original pathid %d\n",
netdev->name, eib->ippathid, conn->pathid);
- IUCV_DBF_TEXT_(data, 2,
- "connack: IR pathid %d, conn. pathid %d\n",
- eib->ippathid, conn->pathid);
conn->pathid = eib->ippathid;
}
netdev->tx_queue_len = eib->ipmsglim;
struct iucv_connection *conn = (struct iucv_connection *)arg;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
iucv_sever(conn->pathid, udata);
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
__u8 udata[16];
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
iucv_sever(conn->pathid, udata);
- PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
- IUCV_DBF_TEXT(data, 2,
- "conn_action_connsever: Remote dropped connection\n");
+ printk(KERN_INFO "%s: Remote dropped connection\n",
+ netdev->name);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
__u16 msglimit;
int rc;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
- if (!conn->handle) {
- IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
+ if (conn->handle == 0) {
conn->handle =
- iucv_register_program(iucvMagic, conn->userid,
- netiucv_mask,
+ iucv_register_program(iucvMagic, conn->userid, mask,
&netiucv_ops, conn);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- if (!conn->handle) {
+ if (conn->handle <= 0) {
fsm_newstate(fi, CONN_STATE_REGERR);
- conn->handle = NULL;
- IUCV_DBF_TEXT(setup, 2,
- "NULL from iucv_register_program\n");
+ conn->handle = 0;
return;
}
- PRINT_DEBUG("%s('%s'): registered successfully\n",
+ pr_debug("%s('%s'): registered successfully\n",
conn->netdev->name, conn->userid);
}
- PRINT_DEBUG("%s('%s'): connecting ...\n",
+ pr_debug("%s('%s'): connecting ...\n",
conn->netdev->name, conn->userid);
/* We must set the state before calling iucv_connect because the callback
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
- conn->userid, iucv_host, 0, NULL, &msglimit,
- conn->handle, conn);
+ conn->userid, iucv_host, 0, NULL, &msglimit, conn->handle,
+ conn);
switch (rc) {
case 0:
conn->netdev->tx_queue_len = msglimit;
CONN_EVENT_TIMER, conn);
return;
case 11:
- PRINT_INFO("%s: User %s is currently not available.\n",
+ printk(KERN_NOTICE
+ "%s: User %s is currently not available.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
return;
case 12:
- PRINT_INFO("%s: User %s is currently not ready.\n",
+ printk(KERN_NOTICE
+ "%s: User %s is currently not ready.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
return;
case 13:
- PRINT_WARN("%s: Too many IUCV connections.\n",
+ printk(KERN_WARNING
+ "%s: Too many IUCV connections.\n",
conn->netdev->name);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 14:
- PRINT_WARN(
+ printk(KERN_WARNING
"%s: User %s has too many IUCV connections.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
- PRINT_WARN(
+ printk(KERN_WARNING
"%s: No IUCV authorization in CP directory.\n",
conn->netdev->name);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
default:
- PRINT_WARN("%s: iucv_connect returned error %d\n",
+ printk(KERN_WARNING
+ "%s: iucv_connect returned error %d\n",
conn->netdev->name, rc);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
}
- IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
- IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ conn->handle = 0;
}
static void
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->handle)
- IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ conn->handle = 0;
netiucv_purge_skb_queue(&conn->commit_queue);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
struct iucv_connection *conn = ev->conn;
struct net_device *netdev = conn->netdev;
- PRINT_WARN("%s: Cannot connect without username\n",
+ printk(KERN_WARNING
+ "%s: Cannot connect without username\n",
netdev->name);
- IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
}
static const fsm_node conn_fsm[] = {
struct netiucv_priv *privptr = dev->priv;
struct iucv_event ev;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
ev.conn = privptr->conn;
fsm_newstate(fi, DEV_STATE_STARTWAIT);
struct netiucv_priv *privptr = dev->priv;
struct iucv_event ev;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
ev.conn = privptr->conn;
struct net_device *dev = (struct net_device *)arg;
struct netiucv_priv *privptr = dev->priv;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT:
fsm_newstate(fi, DEV_STATE_RUNNING);
- PRINT_INFO("%s: connected with remote side %s\n",
+ printk(KERN_INFO
+ "%s: connected with remote side %s\n",
dev->name, privptr->conn->userid);
- IUCV_DBF_TEXT(setup, 3,
- "connection is up and running\n");
break;
case DEV_STATE_STOPWAIT:
- PRINT_INFO(
- "%s: got connection UP event during shutdown!\n",
+ printk(KERN_INFO
+ "%s: got connection UP event during shutdown!!\n",
dev->name);
- IUCV_DBF_TEXT(data, 2,
- "dev_action_connup: in DEV_STATE_STOPWAIT\n");
break;
}
}
static void
dev_action_conndown(fsm_instance *fi, int event, void *arg)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
break;
case DEV_STATE_STOPWAIT:
fsm_newstate(fi, DEV_STATE_STOPPED);
- IUCV_DBF_TEXT(setup, 3, "connection is down\n");
break;
}
}
spin_lock_irqsave(&conn->collect_lock, saveflags);
if (conn->collect_len + l >
- (conn->max_buffsize - NETIUCV_HDRLEN)) {
+ (conn->max_buffsize - NETIUCV_HDRLEN))
rc = -EBUSY;
- IUCV_DBF_TEXT(data, 2,
- "EBUSY from netiucv_transmit_skb\n");
- } else {
+ else {
atomic_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
- PRINT_WARN("%s: Could not allocate tx_skb\n",
+ printk(KERN_WARNING
+ "%s: Could not allocate tx_skb\n",
conn->netdev->name);
- IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
rc = -ENOMEM;
return rc;
} else {
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc) {
+ if (rc != 0) {
struct netiucv_priv *privptr;
fsm_newstate(conn->fsm, CONN_STATE_IDLE);
conn->prof.tx_pending--;
skb_pull(skb, NETIUCV_HDRLEN);
skb_trim(skb, skb->len - NETIUCV_HDRLEN);
}
- PRINT_WARN("iucv_send returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ printk(KERN_INFO "iucv_send returned %08x\n",
+ rc);
} else {
if (copied)
dev_kfree_skb(skb);
*/
static int
netiucv_open(struct net_device *dev) {
- fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
+ fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
}
int rc = 0;
struct netiucv_priv *privptr = dev->priv;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
/**
* Some sanity checks ...
*/
if (skb == NULL) {
- PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
- IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
+ printk(KERN_WARNING "%s: NULL sk_buff passed\n", dev->name);
privptr->stats.tx_dropped++;
return 0;
}
- if (skb_headroom(skb) < NETIUCV_HDRLEN) {
- PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
+ if (skb_headroom(skb) < (NETIUCV_HDRLEN)) {
+ printk(KERN_WARNING
+ "%s: Got sk_buff with head room < %ld bytes\n",
dev->name, NETIUCV_HDRLEN);
- IUCV_DBF_TEXT(data, 2,
- "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
return 0;
return 0;
}
- if (netiucv_test_and_set_busy(dev)) {
- IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
+ if (netiucv_test_and_set_busy(dev))
return -EBUSY;
- }
+
dev->trans_start = jiffies;
- if (netiucv_transmit_skb(privptr->conn, skb))
+ if (netiucv_transmit_skb(privptr->conn, skb) != 0)
rc = 1;
netiucv_clear_busy(dev);
return rc;
static struct net_device_stats *
netiucv_stats (struct net_device * dev)
{
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return &((struct netiucv_priv *)dev->priv)->stats;
}
static int
netiucv_change_mtu (struct net_device * dev, int new_mtu)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
- IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
+ if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX))
return -EINVAL;
- }
dev->mtu = new_mtu;
return 0;
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
}
char username[10];
int i;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
- PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
- IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int)count);
+ printk(KERN_WARNING
+ "netiucv: username too long (%d)!\n", (int)count);
return -EINVAL;
}
/* trailing lf, grr */
break;
} else {
- PRINT_WARN("netiucv: Invalid char %c in username!\n",
- *p);
- IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n",
- *p);
+ printk(KERN_WARNING
+ "netiucv: Invalid character in username!\n");
return -EINVAL;
}
}
username[i++] = ' ';
username[9] = '\0';
- if (memcmp(username, priv->conn->userid, 8)) {
+ if (memcmp(username, priv->conn->userid, 8) != 0) {
/* username changed */
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN(
+ printk(KERN_WARNING
"netiucv: device %s active, connected to %s\n",
dev->bus_id, priv->conn->userid);
- PRINT_WARN("netiucv: user cannot be updated\n");
- IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+ printk(KERN_WARNING
+ "netiucv: user cannot be updated\n");
return -EBUSY;
}
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
}
char *e;
int bs1;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count >= 39)
return -EINVAL;
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- PRINT_WARN("netiucv: Invalid character in buffer!\n");
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ printk(KERN_WARNING
+ "netiucv: Invalid character in buffer!\n");
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
- PRINT_WARN("netiucv: Given buffer size %d too large.\n",
- bs1);
- IUCV_DBF_TEXT_(setup, 2,
- "buffer_write: buffer size %d too large\n",
+ printk(KERN_WARNING
+ "netiucv: Given buffer size %d too large.\n",
bs1);
+
return -EINVAL;
}
if ((ndev->flags & IFF_RUNNING) &&
- (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
- PRINT_WARN("netiucv: Given buffer size %d too small.\n",
- bs1);
- IUCV_DBF_TEXT_(setup, 2,
- "buffer_write: buffer size %d too small\n",
- bs1);
+ (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2)))
return -EINVAL;
- }
- if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
- PRINT_WARN("netiucv: Given buffer size %d too small.\n",
- bs1);
- IUCV_DBF_TEXT_(setup, 2,
- "buffer_write: buffer size %d too small\n",
- bs1);
+ if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN))
return -EINVAL;
- }
priv->conn->max_buffsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
dev_fsm_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
}
conn_fsm_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
}
maxmulti_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
}
maxmulti_write (struct device *dev, const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
+
priv->conn->prof.maxmulti = 0;
return count;
}
maxcq_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.maxcqueue = 0;
return count;
}
sdoio_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.doios_single = 0;
return count;
}
mdoio_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
priv->conn->prof.doios_multi = 0;
return count;
}
txlen_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.txlen = 0;
return count;
}
txtime_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
+
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_time = 0;
return count;
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_pending = 0;
return count;
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
{
struct netiucv_priv *priv = dev->driver_data;
- IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_max_pending = 0;
return count;
}
{
int ret;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
+
ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
if (ret)
return ret;
static inline void
netiucv_remove_files(struct device *dev)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
}
int ret;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (dev) {
memset(dev, 0, sizeof(struct device));
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
- dev->driver = &netiucv_driver;
} else
return -ENOMEM;
ret = netiucv_add_files(dev);
if (ret)
goto out_unreg;
- priv->dev = dev;
dev->driver_data = priv;
+ priv->dev = dev;
return 0;
out_unreg:
static void
netiucv_unregister_device(struct device *dev)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
netiucv_remove_files(dev);
device_unregister(dev);
}
/**
* Allocate and initialize a new connection structure.
- * Add it to the list of netiucv connections;
+ * Add it to the list of connections;
*/
static struct iucv_connection *
netiucv_new_connection(struct net_device *dev, char *username)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &connections;
struct iucv_connection *conn =
(struct iucv_connection *)
kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
/**
* Release a connection structure and remove it from the
- * list of netiucv connections.
+ * list of connections.
*/
static void
netiucv_remove_connection(struct iucv_connection *conn)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &connections;
+
+ pr_debug("%s() called\n", __FUNCTION__);
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (conn == NULL)
return;
while (*clist) {
if (*clist == conn) {
*clist = conn->next;
- if (conn->handle) {
+ if (conn->handle != 0) {
iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ conn->handle = 0;
}
fsm_deltimer(&conn->timer);
kfree_fsm(conn->fsm);
{
struct netiucv_priv *privptr;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (!dev)
return;
netiucv_remove_connection(privptr->conn);
if (privptr->fsm)
kfree_fsm(privptr->fsm);
- privptr->conn = NULL; privptr->fsm = NULL;
+ privptr->conn = 0; privptr->fsm = 0;
/* privptr gets freed by free_netdev() */
}
free_netdev(dev);
netiucv_setup_netdevice);
if (!dev)
return NULL;
- if (dev_alloc_name(dev, dev->name) < 0) {
- free_netdev(dev);
- return NULL;
- }
- privptr = (struct netiucv_priv *)dev->priv;
+ privptr = (struct netiucv_priv *)dev->priv;
privptr->fsm = init_fsm("netiucvdev", dev_state_names,
dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
- if (!privptr->fsm) {
+ if (privptr->fsm == NULL) {
free_netdev(dev);
return NULL;
}
if (!privptr->conn) {
kfree_fsm(privptr->fsm);
free_netdev(dev);
- IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
return NULL;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
int i, ret;
struct net_device *dev;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
- PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
- IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+ printk(KERN_WARNING
+ "netiucv: username too long (%d)!\n", (int)count);
return -EINVAL;
}
/* trailing lf, grr */
break;
} else {
- PRINT_WARN("netiucv: Invalid character in username!\n");
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
+ printk(KERN_WARNING
+ "netiucv: Invalid character in username!\n");
return -EINVAL;
}
}
username[9] = '\0';
dev = netiucv_init_netdevice(username);
if (!dev) {
- PRINT_WARN(
+ printk(KERN_WARNING
"netiucv: Could not allocate network device structure "
"for user '%s'\n", netiucv_printname(username));
- IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
}
-
- if ((ret = netiucv_register_device(dev))) {
- IUCV_DBF_TEXT_(setup, 2,
- "ret %d from netiucv_register_device\n", ret);
+
+ if ((ret = register_netdev(dev))) {
goto out_free_ndev;
}
- /* sysfs magic */
- SET_NETDEV_DEV(dev,
- (struct device*)((struct netiucv_priv*)dev->priv)->dev);
-
- if ((ret = register_netdev(dev))) {
- netiucv_unregister_device((struct device*)
- ((struct netiucv_priv*)dev->priv)->dev);
+ if ((ret = netiucv_register_device(dev))) {
+ unregister_netdev(dev);
goto out_free_ndev;
}
- PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev, (struct device*)((struct netiucv_priv*)dev->priv)->dev);
+ printk(KERN_INFO "%s: '%s'\n", dev->name, netiucv_printname(username));
return count;
out_free_ndev:
- PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
- IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
+ printk(KERN_WARNING
+ "netiucv: Could not register '%s'\n", dev->name);
netiucv_free_netdevice(dev);
return ret;
}
static ssize_t
remove_write (struct device_driver *drv, const char *buf, size_t count)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &connections;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
char *p;
int i;
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ pr_debug("%s() called\n", __FUNCTION__);
if (count >= IFNAMSIZ)
count = IFNAMSIZ-1;
continue;
}
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN(
+ printk(KERN_WARNING
"netiucv: net device %s active with peer %s\n",
ndev->name, priv->conn->userid);
- PRINT_WARN("netiucv: %s cannot be removed\n",
+ printk(KERN_WARNING
+ "netiucv: %s cannot be removed\n",
ndev->name);
- IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
return -EBUSY;
}
unregister_netdev(ndev);
netiucv_unregister_device(dev);
return count;
}
- PRINT_WARN("netiucv: net device %s unknown\n", name);
- IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
+ printk(KERN_WARNING
+ "netiucv: net device %s unknown\n", name);
return -EINVAL;
}
DRIVER_ATTR(remove, 0200, NULL, remove_write);
+static struct device_driver netiucv_driver = {
+ .name = "netiucv",
+ .bus = &iucv_bus,
+};
+
static void
netiucv_banner(void)
{
- char vbuf[] = "$Revision: 1.63 $";
+ char vbuf[] = "$Revision: 1.57 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
*p = '\0';
} else
version = " ??? ";
- PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
+ printk(KERN_INFO "NETIUCV driver Version%s initialized\n", version);
}
static void __exit
netiucv_exit(void)
{
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- while (iucv_connections) {
- struct net_device *ndev = iucv_connections->netdev;
+ while (connections) {
+ struct net_device *ndev = connections->netdev;
struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
struct device *dev = priv->dev;
driver_remove_file(&netiucv_driver, &driver_attr_connection);
driver_remove_file(&netiucv_driver, &driver_attr_remove);
driver_unregister(&netiucv_driver);
- iucv_unregister_dbf_views();
- PRINT_INFO("NETIUCV driver unloaded\n");
+ printk(KERN_INFO "NETIUCV driver unloaded\n");
return;
}
{
int ret;
- ret = iucv_register_dbf_views();
- if (ret) {
- PRINT_WARN("netiucv_init failed, "
- "iucv_register_dbf_views rc = %d\n", ret);
- return ret;
- }
- IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
ret = driver_register(&netiucv_driver);
- if (ret) {
- PRINT_ERR("NETIUCV: failed to register driver.\n");
- IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
- iucv_unregister_dbf_views();
+ if (ret != 0) {
+ printk(KERN_ERR "NETIUCV: failed to register driver.\n");
return ret;
}
/* Add entry for specifying connections. */
ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
- if (!ret) {
+ if (ret == 0) {
ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
netiucv_banner();
} else {
- PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
- IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
+ printk(KERN_ERR "NETIUCV: failed to add driver attribute.\n");
driver_unregister(&netiucv_driver);
- iucv_unregister_dbf_views();
}
return ret;
}
#include "qeth_mpc.h"
-#define VERSION_QETH_H "$Revision: 1.113 $"
+#define VERSION_QETH_H "$Revision: 1.111 $"
#ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6"
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
-#define atomic_swap(a,b) xchg((int *)a.counter, b)
-
/*
* Common IO related definitions
*/
struct qeth_card;
-enum qeth_out_q_states {
- QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- QETH_OUT_Q_LOCKED_FLUSH,
-};
-
struct qeth_qdio_out_q {
struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int queue_no;
struct qeth_card *card;
- atomic_t state;
+ spinlock_t lock;
volatile int do_pack;
/*
* index of buffer to be filled by driver; state EMPTY or PACKING
#ifndef __QETH_FS_H__
#define __QETH_FS_H__
-#define VERSION_QETH_FS_H "$Revision: 1.9 $"
+#define VERSION_QETH_FS_H "$Revision: 1.8 $"
extern const char *VERSION_QETH_PROC_C;
extern const char *VERSION_QETH_SYS_C;
return "HSTR";
case QETH_LINK_TYPE_GBIT_ETH:
return "OSD_1000";
- case QETH_LINK_TYPE_10GBIT_ETH:
- return "OSD_10GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
/*
*
- * linux/drivers/s390/net/qeth_main.c ($Revision: 1.130 $)
+ * linux/drivers/s390/net/qeth_main.c ($Revision: 1.125 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Frank Pavlic (pavlic@de.ibm.com) and
* Thomas Spatzier <tspat@de.ibm.com>
*
- * $Revision: 1.130 $ $Date: 2004/08/05 11:21:50 $
+ * $Revision: 1.125 $ $Date: 2004/06/29 17:28:24 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include "qeth_mpc.h"
#include "qeth_fs.h"
-#define VERSION_QETH_C "$Revision: 1.130 $"
+#define VERSION_QETH_C "$Revision: 1.125 $"
static const char *version = "qeth S/390 OSA-Express driver";
/**
}
add_timer(&timer);
wait_event(reply->wait_q, reply->received);
- del_timer_sync(&timer);
+ del_timer(&timer);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
QETH_DBF_TEXT(qerr,2,"unexeob");
QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_irq(skb);
card->stats.rx_errors++;
return NULL;
}
qeth_rebuild_skb(card, skb, hdr);
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_irq(skb);
continue;
}
skb->dev = card->dev;
static inline struct qeth_buffer_pool_entry *
qeth_get_buffer_pool_entry(struct qeth_card *card)
{
- struct qeth_buffer_pool_entry *entry;
+ struct qeth_buffer_pool_entry *entry, *tmp;
QETH_DBF_TEXT(trace, 6, "gtbfplen");
- if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
- entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
- struct qeth_buffer_pool_entry, list);
+ entry = NULL;
+ list_for_each_entry_safe(entry, tmp,
+ &card->qdio.in_buf_pool.entry_list, list){
list_del_init(&entry->list);
- return entry;
+ break;
}
- return NULL;
+ return entry;
}
static inline void
buf->buffer->element[i].flags = 0;
while ((skb = skb_dequeue(&buf->skb_list))){
atomic_dec(&skb->users);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_irq(skb);
}
}
buf->next_element_to_fill = 0;
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
queue->card->stats.tx_errors += count;
- /* this must not happen under normal circumstances. if it
- * happens something is really wrong -> recover */
- qeth_schedule_recovery(queue->card);
+ /* ok, since do_QDIO went wrong the buffers have not been given
+ * to the hardware. they still belong to us, so we can clear
+ * them and reuse then, i.e. set back next_buf_to_fill*/
+ for (i = index; i < index + count; ++i) {
+ buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
+ qeth_clear_output_buffer(queue, buf);
+ }
+ queue->next_buf_to_fill = index;
return;
}
atomic_add(count, &queue->used_buffers);
}
/*
- * Switched to packing state if the number of used buffers on a queue
- * reaches a certain limit.
+ * switches between PACKING and non-PACKING state if needed.
+ * has to be called holding queue->lock
*/
-static inline void
-qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
+static inline int
+qeth_switch_packing_state(struct qeth_qdio_out_q *queue)
{
+ struct qeth_qdio_out_buffer *buffer;
+ int flush_count = 0;
+
+ QETH_DBF_TEXT(trace, 6, "swipack");
if (!queue->do_pack) {
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
#endif
queue->do_pack = 1;
}
- }
-}
-
-/*
- * Switches from packing to non-packing mode. If there is a packing
- * buffer on the queue this buffer will be prepared to be flushed.
- * In that case 1 is returned to inform the caller. If no buffer
- * has to be flushed, zero is returned.
- */
-static inline int
-qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
-{
- struct qeth_qdio_out_buffer *buffer;
- int flush_count = 0;
-
- if (queue->do_pack) {
+ } else {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
return flush_count;
}
-/*
- * Called to flush a packing buffer if no more pci flags are on the queue.
- * Checks if there is a packing buffer and prepares it to be flushed.
- * In that case returns 1, otherwise zero.
- */
-static inline int
-qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
+static inline void
+qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue, int under_int)
{
struct qeth_qdio_out_buffer *buffer;
+ int index;
- buffer = &queue->bufs[queue->next_buf_to_fill];
+ index = queue->next_buf_to_fill;
+ buffer = &queue->bufs[index];
if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)){
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
- return 1;
- }
- return 0;
-}
-
-static inline void
-qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
-{
- int index;
- int flush_cnt = 0;
-
- /*
- * check if weed have to switch to non-packing mode or if
- * we have to get a pci flag out on the queue
- */
- if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
- !atomic_read(&queue->set_pci_flags_count)){
- if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
- QETH_OUT_Q_UNLOCKED) {
- /*
- * If we get in here, there was no action in
- * do_send_packet. So, we check if there is a
- * packing buffer to be flushed here.
- */
- /* TODO: try if we get a performance improvement
- * by calling netif_stop_queue here */
- /* save start index for flushing */
- index = queue->next_buf_to_fill;
- flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
- if (!flush_cnt &&
- !atomic_read(&queue->set_pci_flags_count))
- flush_cnt +=
- qeth_flush_buffers_on_no_pci(queue);
- /* were done with updating critical queue members */
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- /* flushing can be done outside the lock */
- if (flush_cnt)
- qeth_flush_buffers(queue, 1, index, flush_cnt);
- }
+ qeth_flush_buffers(queue, under_int, index, 1);
}
}
qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
- /* check if we need to do something on this outbound queue */
- qeth_check_outbound_queue(queue);
netif_wake_queue(card->dev);
#ifdef CONFIG_QETH_PERF_STATS
card->qdio.out_qs[i]->do_pack = 0;
atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- atomic_set(&card->qdio.out_qs[i]->state,
- QETH_OUT_Q_UNLOCKED);
+ spin_lock_init(&card->qdio.out_qs[i]->lock);
}
return 0;
}
card->perf_stats.outbound_start_time = qeth_get_micros();
#endif
/*
- * We only call netif_stop_queue in case of errors. Since we've
- * got our own synchronization on queues we can keep the stack's
- * queue running.
+ * dev_queue_xmit should ensure that we are called packet
+ * after packet
*/
- if ((rc = qeth_send_packet(card, skb)))
- netif_stop_queue(dev);
+ netif_stop_queue(dev);
+ if (!(rc = qeth_send_packet(card, skb)))
+ netif_wake_queue(dev);
#ifdef CONFIG_QETH_PERF_STATS
card->perf_stats.outbound_time += qeth_get_micros() -
QETH_DBF_TEXT(trace, 6, "dosndpfa");
- /* spin until we get the queue ... */
- while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- &queue->state));
- /* ... now we've got the queue */
+ spin_lock(&queue->lock);
index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ spin_unlock(&queue->lock);
return -EBUSY;
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
qeth_flush_buffers(queue, 0, index, 1);
+ spin_unlock(&queue->lock);
return 0;
}
QETH_DBF_TEXT(trace, 6, "dosndpkt");
- /* spin until we get the queue ... */
- while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- &queue->state));
+ spin_lock(&queue->lock);
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ spin_unlock(&queue->lock);
return -EBUSY;
}
- /* check if we need to switch packing state of this queue */
- qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack){
/* does packet fit in current buffer? */
if((QETH_MAX_BUFFER_ELEMENTS(card) -
/* we did a step forward, so check buffer state again */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
+ qeth_flush_buffers(queue, 0, start_index, 1);
+ spin_unlock(&queue->lock);
/* return EBUSY because we sent old packet, not
* the current one */
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
}
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
}
- /*
- * queue->state will go from LOCKED -> UNLOCKED or from
- * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
- * (switch packing state or flush buffer to get another pci flag out).
- * In that case we will enter this loop
- */
- while (atomic_dec_return(&queue->state)){
- /* check if we can go back to non-packing state */
- flush_count += qeth_switch_to_nonpacking_if_needed(queue);
- /*
- * check if we need to flush a packing buffer to get a pci
- * flag out on the queue
- */
- if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
- flush_count += qeth_flush_buffers_on_no_pci(queue);
- }
- /* at this point the queue is UNLOCKED again */
-out:
+ /* check if we need to switch packing state of this queue */
+ flush_count += qeth_switch_packing_state(queue);
+
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
+ if (!atomic_read(&queue->set_pci_flags_count))
+ qeth_flush_buffers_on_no_pci(queue, 0);
+
+ spin_unlock(&queue->lock);
return rc;
}
switch(regnum){
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
- if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
- (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+ if(card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
/*
*
- * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.33 $)
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.32 $)
*
* Linux on zSeries OSA Express and HiperSockets support
* This file contains code related to sysfs.
#include "qeth_mpc.h"
#include "qeth_fs.h"
-const char *VERSION_QETH_SYS_C = "$Revision: 1.33 $";
+const char *VERSION_QETH_SYS_C = "$Revision: 1.32 $";
/*****************************************************************************/
/* */
(card->state != CARD_STATE_RECOVER))
return -EPERM;
- i = simple_strtoul(buf, &tmp, 10);
+ i = simple_strtoul(buf, &tmp, 16);
if ((i < 0) || (i > MAX_ADD_HHLEN)) {
PRINT_WARN("add_hhlen out of range\n");
return -EINVAL;
*/
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_AUX_REVISION "$Revision: 1.115 $"
+#define ZFCP_AUX_REVISION "$Revision: 1.114 $"
#include "zfcp_ext.h"
/* written against the module interface */
static int __init zfcp_module_init(void);
+int zfcp_reboot_handler(struct notifier_block *, unsigned long, void *);
+
/* FCP related */
static void zfcp_ns_gid_pn_handler(unsigned long);
/* initialise configuration rw lock */
rwlock_init(&zfcp_data.config_lock);
+ zfcp_data.reboot_notifier.notifier_call = zfcp_reboot_handler;
+ register_reboot_notifier(&zfcp_data.reboot_notifier);
+
/* save address of data structure managing the driver module */
zfcp_data.scsi_host_template.module = THIS_MODULE;
goto out;
out_ccw_register:
+ unregister_reboot_notifier(&zfcp_data.reboot_notifier);
misc_deregister(&zfcp_cfdc_misc);
out_misc_register:
#ifdef CONFIG_S390_SUPPORT
return retval;
}
+/*
+ * This function is called automatically by the kernel whenever a reboot or a
+ * shut-down is initiated and zfcp is still loaded
+ *
+ * locks: zfcp_data.config_sema is taken prior to shutting down the module
+ * and removing all structures
+ * returns: NOTIFY_DONE in all cases
+ */
+int
+zfcp_reboot_handler(struct notifier_block *notifier, unsigned long code,
+ void *ptr)
+{
+ zfcp_ccw_unregister();
+ return NOTIFY_DONE;
+}
+
+
/*
* function: zfcp_cfdc_dev_ioctl
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define ZFCP_CCW_C_REVISION "$Revision: 1.56 $"
+#define ZFCP_CCW_C_REVISION "$Revision: 1.55 $"
#include "zfcp_ext.h"
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
-static void zfcp_ccw_shutdown(struct device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
- .driver = {
- .shutdown = zfcp_ccw_shutdown,
- },
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
ccw_driver_unregister(&zfcp_ccw_driver);
}
-/**
- * zfcp_ccw_shutdown - gets called on reboot/shutdown
- *
- * Makes sure that QDIO queues are down when the system gets stopped.
- */
-static void
-zfcp_ccw_shutdown(struct device *dev)
-{
- struct zfcp_adapter *adapter;
-
- adapter = dev_get_drvdata(dev);
- zfcp_erp_adapter_shutdown(adapter, 0);
- zfcp_erp_wait(adapter);
-}
-
#undef ZFCP_LOG_AREA
#define ZFCP_DEF_H
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_DEF_REVISION "$Revision: 1.81 $"
+#define ZFCP_DEF_REVISION "$Revision: 1.75 $"
/*************************** INCLUDES *****************************************/
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
+#include <linux/reboot.h>
#include <linux/mempool.h>
#include <linux/syscalls.h>
#include <linux/ioctl.h>
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION "4.1.3"
+#define ZFCP_VERSION "4.0.0"
static inline void *
zfcp_sg_to_address(struct scatterlist *list)
lists */
struct semaphore config_sema; /* serialises configuration
changes */
+ struct notifier_block reboot_notifier; /* used to register cleanup
+ functions */
atomic_t loglevel; /* current loglevel */
char init_busid[BUS_ID_SIZE];
wwn_t init_wwpn;
if (ZFCP_LOG_CHECK(level)) { \
_zfcp_hex_dump(addr, count); \
}
+/*
+ * Not yet optimal but useful:
+ * Waits until the condition is met or the timeout occurs.
+ * The condition may be a function call. This allows to
+ * execute some additional instructions in addition
+ * to a simple condition check.
+ * The timeout is modified on exit and holds the remaining time.
+ * Thus it is zero if a timeout ocurred, i.e. the condition was
+ * not met in the specified interval.
+ */
+#define __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
+do { \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ while (!(condition) && timeout) \
+ timeout = schedule_timeout(timeout); \
+ current->state = TASK_RUNNING; \
+} while (0);
+
+#define ZFCP_WAIT_EVENT_TIMEOUT(waitqueue, timeout, condition) \
+do { \
+ wait_queue_t entry; \
+ init_waitqueue_entry(&entry, current); \
+ add_wait_queue(&waitqueue, &entry); \
+ __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
+ remove_wait_queue(&waitqueue, &entry); \
+} while (0);
#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_ERP_REVISION "$Revision: 1.61 $"
+#define ZFCP_ERP_REVISION "$Revision: 1.56 $"
#include "zfcp_ext.h"
int retval = 0;
if (send_els->status != 0) {
- ZFCP_LOG_NORMAL("ELS request timed out, force physical port "
- "reopen of port 0x%016Lx on adapter %s\n",
+ ZFCP_LOG_NORMAL("ELS request timed out, physical port reopen "
+ "of port 0x%016Lx on adapter %s failed\n",
port->wwpn, zfcp_get_busid_by_port(port));
debug_text_event(port->adapter->erp_dbf, 3, "forcreop");
retval = zfcp_erp_port_forced_reopen(port, 0);
zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
+ unsigned long timeout;
struct zfcp_adapter *adapter = erp_action->adapter;
retval = zfcp_erp_adapter_strategy_close(erp_action);
ZFCP_LOG_INFO("Waiting to allow the adapter %s "
"to recover itself\n",
zfcp_get_busid_by_adapter(adapter));
- msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
+ /*
+ * SUGGESTION: substitute by
+ * timeout = ZFCP_TYPE2_RECOVERY_TIME;
+ * __ZFCP_WAIT_EVENT_TIMEOUT(timeout, 0);
+ */
+ timeout = ZFCP_TYPE2_RECOVERY_TIME;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
}
return retval;
failed_qdio_activate:
debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
while (qdio_shutdown(adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
- msleep(1000);
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
failed_qdio_establish:
debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
while (qdio_shutdown(adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
- msleep(1000);
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
/*
ZFCP_LOG_DEBUG("host connection still initialising... "
"waiting and retrying...\n");
/* sleep a little bit before retry */
- msleep(jiffies_to_msecs(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP));
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP);
}
} while ((retries--) &&
atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
*/
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_FSF_C_REVISION "$Revision: 1.55 $"
+#define ZFCP_FSF_C_REVISION "$Revision: 1.49 $"
#include "zfcp_ext.h"
ZFCP_LOG_DEBUG("fsf req list of adapter %s not yet empty\n",
zfcp_get_busid_by_adapter(adapter));
/* wait for woken intiators to clean up their requests */
- msleep(jiffies_to_msecs(ZFCP_FSFREQ_CLEANUP_TIMEOUT));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(ZFCP_FSFREQ_CLEANUP_TIMEOUT);
}
/* consistency check */
{
int retval = 0;
unsigned long lock_flags;
- volatile struct qdio_buffer_element *sbale;
/* setup new FSF request */
retval = zfcp_fsf_req_create(erp_action->adapter,
goto out;
}
- sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
- erp_action->fsf_req->sbal_curr, 0);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
-
/* mark port as being closed */
atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
&erp_action->port->status);
unsigned long *lock_flags)
{
int condition;
+ unsigned long timeout = ZFCP_SBAL_TIMEOUT;
struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
- wait_event_interruptible_timeout(adapter->request_wq,
- (condition =
- zfcp_fsf_req_create_sbal_check
- (lock_flags, req_queue, 1)),
- ZFCP_SBAL_TIMEOUT);
+ ZFCP_WAIT_EVENT_TIMEOUT(adapter->request_wq, timeout,
+ (condition =
+ (zfcp_fsf_req_create_sbal_check)
+ (lock_flags, req_queue, 1)));
if (!condition) {
return -EIO;
}
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
- ret = -EIO;
goto failed_sbals;
}
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.41 $"
+#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.40 $"
#include "zfcp_ext.h"
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
char *endp;
- int retval = 0;
+ int retval = -EINVAL;
down(&zfcp_data.config_sema);
}
fcp_lun = simple_strtoull(buf, &endp, 0);
- if ((endp + 1) < (buf + count)) {
- retval = -EINVAL;
+ if ((endp + 1) < (buf + count))
goto out;
- }
write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <asm/oplib.h>
#include <asm/ebus.h>
#define __KERNEL_SYSCALLS__
read_unlock(&tasklist_lock);
if (!found)
break;
- msleep(1000);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ);
+ current->state = TASK_RUNNING;
}
kenvctrld_task = NULL;
}
wd_dev.initialized = 1;
}
- return(nonseekable_open(inode, f));
+ return(0);
}
static int wd_release(struct inode *inode, struct file *file)
return(-EINVAL);
}
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
wd_pingtimer(pTimer);
return 1;
if (!found)
break;
- msleep(1000);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ);
}
kenvctrld_task = NULL;
}
static int riowd_open(struct inode *inode, struct file *filp)
{
- nonseekable_open(inode, filp);
return 0;
}
static ssize_t riowd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (count) {
riowd_pingtimer();
return 1;
void __init sun4_dvma_init(void)
{
struct sbus_dma *dma;
+ struct sbus_dma *dchain;
struct resource r;
if(sun4_dma_physaddr) {
u32 current_time_ms;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
- void __user *argp = (void __user *)arg;
/* Only let one of these through at a time */
if (down_interruptible(&tw_dev->ioctl_sem)) {
}
/* First copy down the driver command */
- if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
+ if (copy_from_user(&driver_command, (void *)arg, sizeof(TW_Ioctl_Driver_Command)))
goto out2;
/* Check data buffer size */
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, (void *)arg, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
goto out3;
/* See which ioctl we are doing */
twa_get_request_id(tw_dev, &request_id);
/* Flag internal command */
- tw_dev->srb[request_id] = NULL;
+ tw_dev->srb[request_id] = 0;
/* Flag chrdev ioctl */
tw_dev->chrdev_request_id = request_id;
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user((void *)arg, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
/* clear all the negotiated parameters */
__shost_for_each_device(SDp, host)
- SDp->hostdata = NULL;
+ SDp->hostdata = 0;
/* clear all the slots and their pending commands */
for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on (PCI || ISA || MCA) && SCSI && (BROKEN || !SPARC64)
+ depends on (PCI || ISA || MCA) && SCSI
---help---
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
config SCSI_EATA
tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
- depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
+ depends on (ISA || EISA || PCI) && SCSI
---help---
This driver supports all EATA/DMA-compliant SCSI host adapters. DPT
ISA and all EISA I/O addresses are probed looking for the "EATA"
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
+ depends on (ISA || EISA || PCI) && SCSI
---help---
Formerly called GDT SCSI Disk Array Controller Support.
* and see if we can do an information transfer,
* with failures we will restart.
*/
- hostdata->selecting = NULL;
+ hostdata->selecting = 0;
/* RvC: have to preset this to indicate a new command is being performed */
if (!NCR5380_select(instance, tmp,
to go to sleep */
}
- hostdata->selecting = NULL;/* clear this pointer, because we passed the
+ hostdata->selecting = 0; /* clear this pointer, because we passed the
waiting period */
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
* only used for debugging.
*/
-#ifdef DBG
+#if DBG
#define FIB_COUNTER_INCREMENT(counter) (counter)++
#else
#define FIB_COUNTER_INCREMENT(counter)
#endif /* !PCMCIA */
static int registered_count=0;
-static struct Scsi_Host *aha152x_host[2];
+static struct Scsi_Host *aha152x_host[2] = {0, 0};
static Scsi_Host_Template aha152x_driver_template;
/*
void (*end)(struct Scsi_Host *);
int spio;
} states[] = {
- { "idle", NULL, NULL, NULL, 0},
- { "unknown", NULL, NULL, NULL, 0},
- { "seldo", NULL, seldo_run, NULL, 0},
- { "seldi", NULL, seldi_run, NULL, 0},
- { "selto", NULL, selto_run, NULL, 0},
- { "busfree", NULL, busfree_run, NULL, 0},
+ { "idle", 0, 0, 0, 0},
+ { "unknown", 0, 0, 0, 0},
+ { "seldo", 0, seldo_run, 0, 0},
+ { "seldi", 0, seldi_run, 0, 0},
+ { "selto", 0, selto_run, 0, 0},
+ { "busfree", 0, busfree_run, 0, 0},
{ "msgo", msgo_init, msgo_run, msgo_end, 1},
{ "cmd", cmd_init, cmd_run, cmd_end, 1},
- { "msgi", NULL, msgi_run, msgi_end, 1},
- { "status", NULL, status_run, NULL, 1},
+ { "msgi", 0, msgi_run, msgi_end, 1},
+ { "status", 0, status_run, 0, 1},
{ "datai", datai_init, datai_run, datai_end, 0},
{ "datao", datao_init, datao_run, datao_end, 0},
- { "parerr", NULL, parerr_run, NULL, 0},
- { "rsti", NULL, rsti_run, NULL, 0},
+ { "parerr", 0, parerr_run, 0, 0},
+ { "rsti", 0, rsti_run, 0, 0},
};
/* setup & interrupt */
if(aha152x_host[i] && aha152x_host[i]->irq==irqno)
return aha152x_host[i];
- return NULL;
+ return 0;
}
static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs)
goto out_host_put;
}
- if( scsi_add_host(shpnt, NULL) ) {
+ if( scsi_add_host(shpnt, 0) ) {
free_irq(shpnt->irq, shpnt);
printk(KERN_ERR "aha152x%d: failed to add host.\n", shpnt->host_no);
goto out_host_put;
return shpnt;
out_host_put:
- aha152x_host[registered_count]=NULL;
+ aha152x_host[registered_count]=0;
scsi_host_put(shpnt);
- return NULL;
+ return 0;
}
void aha152x_release(struct Scsi_Host *shpnt)
}
}
- SCNEXT(SCpnt) = NULL;
+ SCNEXT(SCpnt) = 0;
SCSEM(SCpnt) = sem;
/* setup scratch area
}
#endif
- return aha152x_internal_queue(SCpnt, NULL, 0, done);
+ return aha152x_internal_queue(SCpnt, 0, 0, done);
}
DO_UNLOCK(flags);
kfree(SCpnt->host_scribble);
- SCpnt->host_scribble=NULL;
+ SCpnt->host_scribble=0;
return SUCCESS;
}
SCpnt->cmd_len = 0;
SCpnt->use_sg = 0;
- SCpnt->request_buffer = NULL;
+ SCpnt->request_buffer = 0;
SCpnt->request_bufflen = 0;
init_timer(&timer);
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
kfree(SCpnt->host_scribble);
- SCpnt->host_scribble=NULL;
+ SCpnt->host_scribble=0;
ret = SUCCESS;
} else {
next = SCNEXT(ptr);
} else {
printk(DEBUG_LEAD "queue corrupted at %p\n", CMDINFO(ptr), ptr);
- next = NULL;
+ next = 0;
}
if (!ptr->device->soft_reset) {
remove_SC(SCs, ptr);
HOSTDATA(shpnt)->commands--;
kfree(ptr->host_scribble);
- ptr->host_scribble=NULL;
+ ptr->host_scribble=0;
}
ptr = next;
"aha152x: unable to verify geometry for disk with >1GB.\n"
" Using default translation. Please verify yourself.\n"
" Perhaps you need to enable extended translation in the driver.\n"
- " See Documentation/scsi/aha152x.txt for details.\n");
+ " See /usr/src/linux/Documentation/scsi/aha152x.txt for details.\n");
}
} else {
info_array[0] = info[0];
printk(ERR_LEAD "there's already a completed command %p - will cause abort\n", CMDINFO(CURRENT_SC), DONE_SC);
DONE_SC = CURRENT_SC;
- CURRENT_SC = NULL;
+ CURRENT_SC = 0;
DONE_SC->result = error;
} else
printk(KERN_ERR "aha152x: done() called outside of command\n");
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
CURRENT_SC->SCp.phase |= 1 << 16;
- CURRENT_SC = NULL;
+ CURRENT_SC = 0;
} else {
done(shpnt, DID_ERROR << 16);
if(!(DONE_SC->SCp.Status & not_issued)) {
Scsi_Cmnd *ptr = DONE_SC;
- DONE_SC=NULL;
+ DONE_SC=0;
#if 0
DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
#endif
ptr->request_bufflen = sizeof(ptr->sense_buffer);
DO_UNLOCK(flags);
- aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
+ aha152x_internal_queue(ptr, 0, check_condition, ptr->scsi_done);
DO_LOCK(flags);
#if 0
} else {
int lun=DONE_SC->device->lun & 0x7;
#endif
Scsi_Cmnd *ptr = DONE_SC;
- DONE_SC=NULL;
+ DONE_SC=0;
/* turn led off, when no commands are in the driver */
HOSTDATA(shpnt)->commands--;
if(ptr->scsi_done != reset_done) {
kfree(ptr->host_scribble);
- ptr->host_scribble=NULL;
+ ptr->host_scribble=0;
}
DO_UNLOCK(flags);
DO_LOCK(flags);
}
- DONE_SC=NULL;
+ DONE_SC=0;
#if defined(AHA152X_STAT)
} else {
HOSTDATA(shpnt)->busfree_without_done_command++;
append_SC(&ISSUE_SC, CURRENT_SC);
DO_UNLOCK(flags);
- CURRENT_SC = NULL;
+ CURRENT_SC = 0;
}
if(!DISCONNECTED_SC) {
remove_SC(&DISCONNECTED_SC, ptr);
kfree(ptr->host_scribble);
- ptr->host_scribble=NULL;
+ ptr->host_scribble=0;
ptr->result = DID_RESET << 16;
ptr->scsi_done(ptr);
printk(KERN_DEBUG "none\n");
printk(KERN_DEBUG "disconnected_SC:\n");
- for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL)
+ for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : 0)
show_command(ptr);
disp_ports(shpnt);
if(thislength<0) {
DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: output too short\n");
- *start = NULL;
+ *start = 0;
return 0;
}
aha152x_config conf;
#endif
#ifdef __ISAPNP__
- struct pnp_dev *dev=NULL, *pnpdev[2] = {NULL, NULL};
+ struct pnp_dev *dev=0, *pnpdev[2] = {0, 0};
#endif
if ( setup_count ) {
#if defined(__ISAPNP__)
} else if( pnpdev[i] ) {
HOSTDATA(shpnt)->pnpdev=pnpdev[i];
- pnpdev[i]=NULL;
+ pnpdev[i]=0;
#endif
}
} else {
for(i=0; i<ARRAY_SIZE(setup); i++) {
aha152x_release(aha152x_host[i]);
- aha152x_host[i]=NULL;
+ aha152x_host[i]=0;
}
}
my_done = SCtmp->scsi_done;
if (SCtmp->host_scribble) {
kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = NULL;
+ SCtmp->host_scribble = 0;
}
/* Fetch the sense data, and tuck it away, in the required slot. The
Adaptec automatically fetches it, and there is no guarantee that
struct ahd_devinfo *devinfo,
u_int lun, cam_status status,
char *message, int verbose_level);
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
static void ahd_setup_target_msgin(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo,
struct scb *scb);
ahd->msgin_index = 0;
}
}
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
else {
if (bus_phase == P_MESGOUT) {
ahd->msg_type =
tstate = ahd->enabled_targets[i];
if (tstate != NULL) {
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
int j;
for (j = 0; j < AHD_NUM_LUNS; j++) {
free(tstate, M_DEVBUF);
}
}
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
if (ahd->black_hole != NULL) {
xpt_free_path(ahd->black_hole->path);
free(ahd->black_hole, M_DEVBUF);
ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
-#ifdef NEEDS_MORE_TESTING
+#if NEEDS_MORE_TESTING
/*
* Always enable abort on incoming L_Qs if this feature is
* supported. We use this to catch invalid SCB references.
if (match != 0)
match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
if (match != 0) {
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
int group;
group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
/* Make sure the sequencer is in a safe location. */
ahd_clear_critical_section(ahd);
-#ifdef AHD_TARGET_MODE
+#if AHD_TARGET_MODE
if ((ahd->flags & AHD_TARGETROLE) != 0) {
ahd_run_tqinfifo(ahd, /*paused*/TRUE);
}
}
break;
-#ifdef AIC7XXX_NOT_YET
+#if AIC7XXX_NOT_YET
case TRACEPOINT2:
{
printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no,
printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to "
"Programmed I/O.\n");
iounmap((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
- temp_p->maddr = NULL;
+ temp_p->maddr = 0;
if(temp_p->base == 0)
{
printk("aic7xxx: <%s> at PCI %d/%d/%d\n",
temp_p->pause = hcntrl | PAUSE | INTEN;
temp_p->base = base;
temp_p->mbase = 0;
- temp_p->maddr = NULL;
+ temp_p->maddr = 0;
temp_p->pci_bus = 0;
temp_p->pci_device_fn = slot;
aic_outb(temp_p, hcntrl | PAUSE, HCNTRL);
u32 reply_size = 0;
u32 __user *user_msg = arg;
u32 __user * user_reply = NULL;
- void *sg_list[pHba->sg_tablesize];
+ ulong sg_list[pHba->sg_tablesize];
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
u32 i = 0;
u32 rcode = 0;
- void *p = NULL;
+ ulong p = 0;
ulong flags = 0;
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
- p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
- if(!p) {
+ p = (ulong)kmalloc(sg_size, GFP_KERNEL|ADDR32);
+ if(p == 0) {
printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
pHba->name,sg_size,i,sg_count);
rcode = -ENOMEM;
/* Copy in the user's SG buffer if necessary */
if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
// TODO 64bit fix
- if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
+ if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
rcode = -EFAULT;
goto cleanup;
}
}
//TODO 64bit fix
- sg[i].addr_bus = (u32)virt_to_bus(p);
+ sg[i].addr_bus = (u32)virt_to_bus((void*)p);
}
}
if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
sg_size = sg[j].flag_count & 0xffffff;
// TODO 64bit fix
- if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
+ if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
+ printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
rcode = -EFAULT;
goto cleanup;
}
while(sg_index) {
if(sg_list[--sg_index]) {
if (rcode != -ETIME && rcode != -EINTR)
- kfree(sg_list[sg_index]);
+ kfree((void*)(sg_list[sg_index]));
}
}
return rcode;
u32 base;
int i;
-#ifdef CHECKPAL
+#if CHECKPAL
u8 pal1, pal2, pal3;
#endif
if (EISAbases[i]) { /* Still a possibility ? */
base = 0x1c88 + (i * 0x1000);
-#ifdef CHECKPAL
+#if CHECKPAL
pal1 = inb((u16) base - 8);
pal2 = inb((u16) base - 7);
pal3 = inb((u16) base - 6);
}
/* Nothing found here so we take it from the list */
EISAbases[i] = 0;
-#ifdef CHECKPAL
+#if CHECKPAL
}
#endif
}
EISAbases[x] = 0;
}
}
-#ifdef CHECK_BLINK
+#if CHECK_BLINK
else if (check_blink_state(base)) {
printk("eata_pio: HBA is in BLINK state.\n" "Consult your HBAs manual to correct this.\n");
}
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
+static inline void dma_clear(struct NCR_ESP *esp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
* via PIO.
*/
-static inline void dma_clear(struct NCR_ESP *esp)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned long *t;
-
- ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
- dregs->ctrl_reg = ctrl_data;
-
- t = (unsigned long *)(esp->edev);
-
- dregs->clear_strobe = 0;
- *t = 0 ;
-}
-
/***************************************************************** Detection */
int __init fastlane_esp_detect(Scsi_Host_Template *tpnt)
{
dregs->ctrl_reg = ctrl_data;
}
+static inline void dma_clear(struct NCR_ESP *esp)
+{
+ struct fastlane_dma_registers *dregs =
+ (struct fastlane_dma_registers *) (esp->dregs);
+ unsigned long *t;
+
+ ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
+ dregs->ctrl_reg = ctrl_data;
+
+ t = (unsigned long *)(esp->edev);
+
+ dregs->clear_strobe = 0;
+ *t = 0 ;
+}
+
static void dma_ints_off(struct NCR_ESP *esp)
{
static int fdomain_isa_detect( int *irq, int *iobase )
{
-#ifndef PCMCIA
int i, j;
int base = 0xdeadbeef;
int flag = 0;
*iobase = base;
return 1; /* success */
-#else
- return 0;
-#endif
}
/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
if (!(overrides[current_override].NCR5380_map_name))
continue;
- ports = NULL;
+ ports = 0;
switch (overrides[current_override].board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA;
.drives = LIST_HEAD_INIT(idescsi_driver.drives),
};
-static int ide_scsi_warned;
-
static int idescsi_ide_open(struct inode *inode, struct file *filp)
{
ide_drive_t *drive = inode->i_bdev->bd_disk->private_data;
drive->usage++;
- if (!ide_scsi_warned++) {
- printk(KERN_WARNING "ide-scsi: Warning this device driver is only intended for specialist devices.\n");
- printk(KERN_WARNING "ide-scsi: Do not use for cd burning, use /dev/hdX directly instead.\n");
- }
return 0;
}
unsigned int cmd, unsigned long arg)
{
struct block_device *bdev = inode->i_bdev;
- return generic_ide_ioctl(file, bdev, cmd, arg);
+ return generic_ide_ioctl(bdev, cmd, arg);
}
static struct block_device_operations idescsi_ops = {
return -ENODEV;
}
-static int imm_adjust_queue(struct scsi_device *device)
-{
- blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
- return 0;
-}
-
static struct scsi_host_template imm_template = {
.module = THIS_MODULE,
.proc_name = "imm",
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
- .slave_alloc = imm_adjust_queue,
};
/***************************************************************************
static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
/* filled in by 'insmod' */
-static char *setup_strings;
+static char *setup_strings = 0;
+#ifdef MODULE_PARM
MODULE_PARM(setup_strings, "s");
+#endif
static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
{
*/
cmd = (Scsi_Cmnd *) hostdata->input_Q;
- prev = NULL;
+ prev = 0;
while (cmd) {
if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
break;
*/
tmp = (Scsi_Cmnd *) hostdata->input_Q;
- prev = NULL;
+ prev = 0;
while (tmp) {
if (tmp == cmd) {
if (prev)
*/
if (!done_setup && setup_strings)
- in2000_setup(setup_strings, NULL);
+ in2000_setup(setup_strings, 0);
detect_count = 0;
for (bios = 0; bios_tab[bios]; bios++) {
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
- case ATA_PROT_ATAPI:
/* check status of DMA engine */
host_stat = ata_bmdma_status(ap);
VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat);
}
free_irq(host_set->irq, host_set);
- if (host_set->ops->host_stop)
- host_set->ops->host_stop(host_set);
if (host_set->mmio_base)
iounmap(host_set->mmio_base);
+ if (host_set->ops->host_stop)
+ host_set->ops->host_stop(host_set);
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
*/
if (cp == tp->nego_cp)
- tp->nego_cp = NULL;
+ tp->nego_cp = 0;
/*
** If auto-sense performed, change scsi status.
if (cp == lp->held_ccb) {
xpt_que_splice(&lp->skip_ccbq, &lp->wait_ccbq);
xpt_que_init(&lp->skip_ccbq);
- lp->held_ccb = NULL;
+ lp->held_ccb = 0;
}
}
} else {
script_ofs = dsp;
script_size = 0;
- script_base = NULL;
+ script_base = 0;
script_name = "mem";
}
if (!(cmd & 6)) {
cp = np->header.cp;
if (CCB_PHYS(cp, phys) != dsa)
- cp = NULL;
+ cp = 0;
} else {
cp = np->ccb;
while (cp && (CCB_PHYS (cp, phys) != dsa))
** try to find the interrupted script command,
** and the address at which to continue.
*/
- vdsp = NULL;
+ vdsp = 0;
nxtdsp = 0;
if (dsp > np->p_script &&
dsp <= np->p_script + sizeof(struct script)) {
u_char scntl3;
u_char chg, ofs, per, fak, wide;
u_char num = INB (nc_dsps);
- struct ccb *cp=NULL;
+ struct ccb *cp=0;
u_long dsa = INL (nc_dsa);
u_char target = INB (nc_sdid) & 0x0f;
struct tcb *tp = &np->target[target];
if (cp->magic) {
PRINT_LUN(np, tn, ln);
printk ("ccb free list corrupted (@%p)\n", cp);
- cp = NULL;
+ cp = 0;
}
else {
xpt_insque_tail(qp, &lp->wait_ccbq);
{
struct tcb *tp = &np->target[tn];
struct lcb *lp = tp->lp[ln];
- struct ccb *cp = NULL;
+ struct ccb *cp = 0;
/*
** Allocate memory for this CCB.
NCR_LOCK_NCB(np, flags);
ncr_exception(np);
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
NCR_LOCK_NCB(np, flags);
ncr_timeout(np);
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
if (done_list) {
sts = ncr_reset_bus(np, cmd, 1);
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
ncr_flush_done_cmds(done_list);
sts = ncr_abort_command(np, cmd);
out:
done_list = np->done_list;
- np->done_list = NULL;
+ np->done_list = 0;
NCR_UNLOCK_NCB(np, flags);
ncr_flush_done_cmds(done_list);
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
#endif
- cmd->next_wcmd = NULL;
+ cmd->next_wcmd = 0;
if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
else {
while ((wcmd->next_wcmd) != 0)
if (cmd == *pcmd) {
if (to_remove) {
*pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
- cmd->next_wcmd = NULL;
+ cmd->next_wcmd = 0;
}
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
}
pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
}
- return NULL;
+ return 0;
}
static void process_waiting_list(struct ncb *np, int sts)
struct scsi_cmnd *waiting_list, *wcmd;
waiting_list = np->waiting_list;
- np->waiting_list = NULL;
+ np->waiting_list = 0;
#ifdef DEBUG_WAITING_LIST
if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
#endif
while ((wcmd = waiting_list) != 0) {
waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
- wcmd->next_wcmd = NULL;
+ wcmd->next_wcmd = 0;
if (sts == DID_OK) {
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
int length, int func)
{
struct host_data *host_data;
- struct ncb *ncb = NULL;
+ struct ncb *ncb = 0;
int retv;
#ifdef DEBUG_PROC_INFO
**==========================================================
*/
#ifdef MODULE
-char *ncr53c8xx; /* command line passed by insmod */
+char *ncr53c8xx = 0; /* command line passed by insmod */
MODULE_PARM(ncr53c8xx, "s");
#endif
int unit, struct ncr_device *device)
{
struct host_data *host_data;
- struct ncb *np = NULL;
- struct Scsi_Host *instance = NULL;
+ struct ncb *np = 0;
+ struct Scsi_Host *instance = 0;
u_long flags = 0;
int i;
thislength = pos - (buffer + offset);
if(thislength < 0) {
- *start = NULL;
+ *start = 0;
return 0;
}
goto out;
}
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ retval = (-ENXIO);
+ goto out;
+ }
+
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
goto out;
}
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ retval = (-ENXIO);
+ goto out;
+ }
+
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
int dev = TAPE_NR(inode);
int mode = TAPE_MODE(inode);
- nonseekable_open(inode, filp);
write_lock(&os_scsi_tapes_lock);
if (dev >= osst_max_dev || os_scsi_tapes == NULL ||
(STp = os_scsi_tapes[dev]) == NULL || !STp->device) {
qla1280_req_pkt(struct scsi_qla_host *ha)
{
struct device_reg *reg = ha->iobase;
- request_t *pkt = NULL;
+ request_t *pkt = 0;
int cnt;
uint32_t timer;
{
struct device_reg *reg = ha->iobase;
struct response *pkt;
- struct srb *sp = NULL;
+ struct srb *sp = 0;
uint16_t mailbox[MAILBOX_REGISTER_COUNT];
uint16_t *wptr;
uint32_t index;
if (index < MAX_OUTSTANDING_COMMANDS)
sp = ha->outstanding_cmds[index];
else
- sp = NULL;
+ sp = 0;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = NULL;
+ ha->outstanding_cmds[index] = 0;
/* Save ISP completion status */
CMD_RESULT(sp->cmd) = 0;
}
/* Free outstanding command slot. */
- ha->outstanding_cmds[handle] = NULL;
+ ha->outstanding_cmds[handle] = 0;
cmd = sp->cmd;
if (handle < MAX_OUTSTANDING_COMMANDS)
sp = ha->outstanding_cmds[handle];
else
- sp = NULL;
+ sp = 0;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[handle] = NULL;
+ ha->outstanding_cmds[handle] = 0;
/* Bad payload or header */
if (pkt->entry_status & (BIT_3 + BIT_2)) {
sp = ha->outstanding_cmds[index];
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = NULL;
+ ha->outstanding_cmds[index] = 0;
if (ha->actthreads)
ha->actthreads--;
/* Validate handle. */
if (pkt->handle < MAX_OUTSTANDING_COMMANDS) {
sp = ha->outstanding_cmds[pkt->handle];
- ha->outstanding_cmds[pkt->handle] = NULL;
+ ha->outstanding_cmds[pkt->handle] = 0;
} else
sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle] = NULL;
+ ha->outstanding_cmds[pkt->handle] = 0;
if (ha->actthreads)
ha->actthreads--;
sp->lun_queue->out_cnt--;
CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle1] = NULL;
+ ha->outstanding_cmds[pkt->handle1] = 0;
add_to_done_queue(ha, sp);
}
* If you do not delete the provisions above, a recipient may use your
* version of this file under either the OSL or the GPL.
*
- * 0.02
- * - Added support for CK804 SATA controller.
- *
- * 0.01
- * - Initial revision.
*/
#include <linux/config.h>
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "0.02"
+#define DRV_VERSION "0.01"
#define NV_PORTS 2
#define NV_PIO_MASK 0x1f
#define NV_PORT1_SCR_REG_OFFSET 0x40
#define NV_INT_STATUS 0x10
-#define NV_INT_STATUS_CK804 0x440
#define NV_INT_STATUS_PDEV_INT 0x01
#define NV_INT_STATUS_PDEV_PM 0x02
#define NV_INT_STATUS_PDEV_ADDED 0x04
NV_INT_STATUS_SDEV_HOTPLUG)
#define NV_INT_ENABLE 0x11
-#define NV_INT_ENABLE_CK804 0x441
#define NV_INT_ENABLE_PDEV_MASK 0x01
#define NV_INT_ENABLE_PDEV_PM 0x02
#define NV_INT_ENABLE_PDEV_ADDED 0x04
#define NV_INT_CONFIG 0x12
#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI
-// For PCI config register 20
-#define NV_MCP_SATA_CFG_20 0x50
-#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04
-
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void nv_host_stop (struct ata_host_set *host_set);
-static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
-static void nv_disable_hotplug(struct ata_host_set *host_set);
-static void nv_check_hotplug(struct ata_host_set *host_set);
-static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
-static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
-static void nv_check_hotplug_ck804(struct ata_host_set *host_set);
-
-enum nv_host_type
-{
- NFORCE2,
- NFORCE3,
- CK804
-};
static struct pci_device_id nv_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0, } /* terminate list */
};
-#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
-
-struct nv_host_desc
-{
- enum nv_host_type host_type;
- unsigned long host_flags;
- void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
- void (*disable_hotplug)(struct ata_host_set *host_set);
- void (*check_hotplug)(struct ata_host_set *host_set);
-
-};
-static struct nv_host_desc nv_device_tbl[] = {
- {
- .host_type = NFORCE2,
- .host_flags = 0x00000000,
- .enable_hotplug = nv_enable_hotplug,
- .disable_hotplug= nv_disable_hotplug,
- .check_hotplug = nv_check_hotplug,
- },
- {
- .host_type = NFORCE3,
- .host_flags = 0x00000000,
- .enable_hotplug = nv_enable_hotplug,
- .disable_hotplug= nv_disable_hotplug,
- .check_hotplug = nv_check_hotplug,
- },
- { .host_type = CK804,
- .host_flags = NV_HOST_FLAGS_SCR_MMIO,
- .enable_hotplug = nv_enable_hotplug_ck804,
- .disable_hotplug= nv_disable_hotplug_ck804,
- .check_hotplug = nv_check_hotplug_ck804,
- },
-};
-
-struct nv_host
-{
- struct nv_host_desc *host_desc;
-};
-
static struct pci_driver nv_pci_driver = {
.name = DRV_NAME,
.id_table = nv_pci_tbl,
irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
- struct nv_host *host = host_set->private_data;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
+ u8 intr_status;
+ u8 intr_enable;
spin_lock_irqsave(&host_set->lock, flags);
handled += ata_host_intr(ap, qc);
}
- }
+ intr_status = inb(ap->ioaddr.scr_addr + NV_INT_STATUS);
+ intr_enable = inb(ap->ioaddr.scr_addr + NV_INT_ENABLE);
+
+ // Clear interrupt status.
+ outb(0xff, ap->ioaddr.scr_addr + NV_INT_STATUS);
+
+ if (intr_status & NV_INT_STATUS_HOTPLUG) {
+ if (intr_status & NV_INT_STATUS_PDEV_ADDED) {
+ printk(KERN_WARNING "ata%u: "
+ "Primary device added\n", ap->id);
+ }
+
+ if (intr_status & NV_INT_STATUS_PDEV_REMOVED) {
+ printk(KERN_WARNING "ata%u: "
+ "Primary device removed\n", ap->id);
+ }
- if (host->host_desc->check_hotplug)
- host->host_desc->check_hotplug(host_set);
+ if (intr_status & NV_INT_STATUS_SDEV_ADDED) {
+ printk(KERN_WARNING "ata%u: "
+ "Secondary device added\n", ap->id);
+ }
+
+ if (intr_status & NV_INT_STATUS_SDEV_REMOVED) {
+ printk(KERN_WARNING "ata%u: "
+ "Secondary device removed\n", ap->id);
+ }
+ }
+ }
spin_unlock_irqrestore(&host_set->lock, flags);
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
- struct ata_host_set *host_set = ap->host_set;
- struct nv_host *host = host_set->private_data;
-
if (sc_reg > SCR_CONTROL)
return 0xffffffffU;
- if (host->host_desc->host_flags & NV_HOST_FLAGS_SCR_MMIO)
- return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
- else
- return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{
- struct ata_host_set *host_set = ap->host_set;
- struct nv_host *host = host_set->private_data;
-
if (sc_reg > SCR_CONTROL)
return;
- if (host->host_desc->host_flags & NV_HOST_FLAGS_SCR_MMIO)
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
- else
- outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void nv_host_stop (struct ata_host_set *host_set)
{
- struct nv_host *host = host_set->private_data;
+ int i;
- // Disable hotplug event interrupts.
- if (host->host_desc->disable_hotplug)
- host->host_desc->disable_hotplug(host_set);
+ for (i=0; i<host_set->n_ports; i++) {
+ u8 intr_mask;
- kfree(host);
+ // Disable hotplug event interrupts.
+ intr_mask = inb(host_set->ports[i]->ioaddr.scr_addr +
+ NV_INT_ENABLE);
+ intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
+ outb(intr_mask, host_set->ports[i]->ioaddr.scr_addr +
+ NV_INT_ENABLE);
+ }
}
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version = 0;
- struct nv_host *host;
struct ata_probe_ent *probe_ent = NULL;
+ int i;
int rc;
if (!printed_version++)
goto err_out_regions;
}
- host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
- if (!host) {
- rc = -ENOMEM;
- goto err_out_free_ent;
- }
-
- host->host_desc = &nv_device_tbl[ent->driver_data];
-
memset(probe_ent, 0, sizeof(*probe_ent));
INIT_LIST_HEAD(&probe_ent->node);
ATA_FLAG_SATA_RESET |
ATA_FLAG_SRST |
ATA_FLAG_NO_LEGACY;
-
probe_ent->port_ops = &nv_ops;
probe_ent->n_ports = NV_PORTS;
probe_ent->irq = pdev->irq;
pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
probe_ent->port[0].bmdma_addr =
pci_resource_start(pdev, 4) | NV_PORT0_BMDMA_REG_OFFSET;
+ probe_ent->port[0].scr_addr =
+ pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
ata_std_ports(&probe_ent->port[1]);
pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
probe_ent->port[1].bmdma_addr =
pci_resource_start(pdev, 4) | NV_PORT1_BMDMA_REG_OFFSET;
+ probe_ent->port[1].scr_addr =
+ pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
- probe_ent->private_data = host;
-
- if (host->host_desc->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
- unsigned long base;
+ pci_set_master(pdev);
- probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),
- pci_resource_len(pdev, 5));
- if (probe_ent->mmio_base == NULL)
- goto err_out_free_ent;
+ rc = ata_device_add(probe_ent);
+ if (rc != NV_PORTS)
+ goto err_out_regions;
- base = (unsigned long)probe_ent->mmio_base;
+ // Enable hotplug event interrupts.
+ for (i=0; i<probe_ent->n_ports; i++) {
+ u8 intr_mask;
- probe_ent->port[0].scr_addr =
- base + NV_PORT0_SCR_REG_OFFSET;
- probe_ent->port[1].scr_addr =
- base + NV_PORT1_SCR_REG_OFFSET;
- } else {
+ outb(NV_INT_STATUS_HOTPLUG, probe_ent->port[i].scr_addr +
+ NV_INT_STATUS);
- probe_ent->port[0].scr_addr =
- pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
- probe_ent->port[1].scr_addr =
- pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
+ intr_mask = inb(probe_ent->port[i].scr_addr + NV_INT_ENABLE);
+ intr_mask |= NV_INT_ENABLE_HOTPLUG;
+ outb(intr_mask, probe_ent->port[i].scr_addr + NV_INT_ENABLE);
}
- pci_set_master(pdev);
-
- // Enable hotplug event interrupts.
- if (host->host_desc->enable_hotplug)
- host->host_desc->enable_hotplug(probe_ent);
-
- rc = ata_device_add(probe_ent);
- if (rc != NV_PORTS)
- goto err_out_free_ent;
-
kfree(probe_ent);
return 0;
-err_out_free_ent:
- kfree(probe_ent);
-
err_out_regions:
pci_release_regions(pdev);
return rc;
}
-static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
-{
- u8 intr_mask;
-
- outb(NV_INT_STATUS_HOTPLUG,
- (unsigned long)probe_ent->mmio_base + NV_INT_STATUS);
-
- intr_mask = inb((unsigned long)probe_ent->mmio_base + NV_INT_ENABLE);
- intr_mask |= NV_INT_ENABLE_HOTPLUG;
-
- outb(intr_mask, (unsigned long)probe_ent->mmio_base + NV_INT_ENABLE);
-}
-
-static void nv_disable_hotplug(struct ata_host_set *host_set)
-{
- u8 intr_mask;
-
- intr_mask = inb((unsigned long)host_set->mmio_base + NV_INT_ENABLE);
-
- intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-
- outb(intr_mask, (unsigned long)host_set->mmio_base + NV_INT_ENABLE);
-}
-
-static void nv_check_hotplug(struct ata_host_set *host_set)
-{
- u8 intr_status;
-
- intr_status = inb((unsigned long)host_set->mmio_base + NV_INT_STATUS);
-
- // Clear interrupt status.
- outb(0xff, (unsigned long)host_set->mmio_base + NV_INT_STATUS);
-
- if (intr_status & NV_INT_STATUS_HOTPLUG) {
- if (intr_status & NV_INT_STATUS_PDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device added\n");
-
- if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device removed\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device added\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device removed\n");
- }
-}
-
-static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
-{
- u8 intr_mask;
- u8 regval;
-
- pci_read_config_byte(probe_ent->pdev, NV_MCP_SATA_CFG_20, ®val);
- regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
- pci_write_config_byte(probe_ent->pdev, NV_MCP_SATA_CFG_20, regval);
-
- writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
-
- intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
- intr_mask |= NV_INT_ENABLE_HOTPLUG;
-
- writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
-}
-
-static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
-{
- u8 intr_mask;
- u8 regval;
-
- intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
-
- intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
-
- writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
-
- pci_read_config_byte(host_set->pdev, NV_MCP_SATA_CFG_20, ®val);
- regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
- pci_write_config_byte(host_set->pdev, NV_MCP_SATA_CFG_20, regval);
-}
-
-static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
-{
- u8 intr_status;
-
- intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
-
- // Clear interrupt status.
- writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
-
- if (intr_status & NV_INT_STATUS_HOTPLUG) {
- if (intr_status & NV_INT_STATUS_PDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device added\n");
-
- if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Primary device removed\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_ADDED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device added\n");
-
- if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
- printk(KERN_WARNING "nv_sata: "
- "Secondary device removed\n");
- }
-}
-
static int __init nv_init(void)
{
return pci_module_init(&nv_pci_driver);
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdp, cmd, p);
default:
- error = scsi_cmd_ioctl(filp, disk, cmd, p);
+ error = scsi_cmd_ioctl(disk, cmd, p);
if (error != -ENOTTY)
return error;
}
int res;
int retval;
- nonseekable_open(inode, filp);
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
sdp = sg_get_dev(dev);
if ((!sdp) || (!sdp->device))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
sdp->disk->disk_name, (int) count));
+ if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_WRITE, buf, count)))
return k;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
+ if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_READ, buf, count)))
return k; /* protects following copy_from_user()s + get_user()s */
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdev, cmd, (void __user *)arg);
}
- return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+ return cdrom_ioctl(&cd->cdi, inode, cmd, arg);
}
static int sr_block_media_changed(struct gendisk *disk)
""
};
+ /* Set read only initially */
+ set_disk_ro(cd->disk, 1);
+
/* allocate a request for the TEST_UNIT_READY */
SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
if (!SRpnt) {
if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) !=
(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) {
cd->device->writeable = 1;
+ set_disk_ro(cd->disk, 0);
}
scsi_release_request(SRpnt);
int dev = TAPE_NR(inode);
char *name;
- nonseekable_open(inode, filp);
write_lock(&st_dev_arr_lock);
if (dev >= st_dev_max || scsi_tapes == NULL ||
((STp = scsi_tapes[dev]) == NULL)) {
}
\f
/* The checks common to both reading and writing */
-static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count)
+static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t *ppos)
{
ssize_t retval = 0;
goto out;
}
+ if (ppos != &filp->f_pos) {
+ /* "A request was outside the capabilities of the device." */
+ retval = (-ENXIO);
+ goto out;
+ }
+
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
if (down_interruptible(&STp->lock))
return -ERESTARTSYS;
- retval = rw_checks(STp, filp, count);
+ retval = rw_checks(STp, filp, count, ppos);
if (retval || count == 0)
goto out;
if (down_interruptible(&STp->lock))
return -ERESTARTSYS;
- retval = rw_checks(STp, filp, count);
+ retval = rw_checks(STp, filp, count, ppos);
if (retval || count == 0)
goto out;
case SCSI_IOCTL_GET_BUS_NUMBER:
break;
default:
- i = scsi_cmd_ioctl(file, STp->disk, cmd_in, p);
+ i = scsi_cmd_ioctl(STp->disk, cmd_in, p);
if (i != -ENOTTY)
return i;
break;
for (i = 0; i < st_dev_max; i++) {
tpnt = scsi_tapes[i];
if (tpnt != NULL && tpnt->device == SDp) {
- scsi_tapes[i] = NULL;
+ scsi_tapes[i] = 0;
st_nr_dev--;
write_unlock(&st_dev_arr_lock);
devfs_unregister_tape(tpnt->disk->number);
return &sym_fw1;
#endif
else
- return NULL;
+ return 0;
}
/*
}
/* Revert everything */
- SYM_UCMD_PTR(cmd)->eh_wait = NULL;
+ SYM_UCMD_PTR(cmd)->eh_wait = 0;
cmd->scsi_done = ep->old_done;
/* Wake up the eh thread if it wants to sleep */
/* On error, restore everything and cross fingers :) */
if (sts) {
- SYM_UCMD_PTR(cmd)->eh_wait = NULL;
+ SYM_UCMD_PTR(cmd)->eh_wait = 0;
cmd->scsi_done = ep->old_done;
to_do = SYM_EH_DO_IGNORE;
}
char **start, off_t offset, int length, int func)
{
struct host_data *host_data;
- struct sym_hcb *np = NULL;
+ struct sym_hcb *np = 0;
int retv;
host_data = (struct host_data *) host->hostdata;
static struct sym_driver_setup
sym_driver_safe_setup __initdata = SYM_LINUX_DRIVER_SAFE_SETUP;
#ifdef MODULE
-char *sym53c8xx; /* command line passed by insmod */
+char *sym53c8xx = 0; /* command line passed by insmod */
MODULE_PARM(sym53c8xx, "s");
#endif
static __inline m_addr_t sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
{
- void *vaddr = NULL;
+ void *vaddr = 0;
dma_addr_t baddr = 0;
vaddr = pci_alloc_consistent(mp->dev_dmat,SYM_MEM_CLUSTER_SIZE, &baddr);
} else {
script_ofs = dsp;
script_size = 0;
- script_base = NULL;
+ script_base = 0;
script_name = "mem";
}
return chip;
}
- return NULL;
+ return 0;
}
#if SYM_CONF_DMA_ADDRESSING_MODE == 2
* try to find the interrupted script command,
* and the address at which to continue.
*/
- vdsp = NULL;
+ vdsp = 0;
nxtdsp = 0;
if (dsp > np->scripta_ba &&
dsp <= np->scripta_ba + np->scripta_sz) {
* we are not in race.
*/
i = 0;
- cp = NULL;
+ cp = 0;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_BUSY &&
* abort for this target.
*/
i = 0;
- cp = NULL;
+ cp = 0;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_DISCONNECT)
else if (dp_scr == SCRIPTA_BA (np, pm1_data))
pm = &cp->phys.pm1;
else
- pm = NULL;
+ pm = 0;
if (pm) {
dp_scr = scr_to_cpu(pm->ret);
* used for negotiation, clear this info in the tcb.
*/
if (cp == tp->nego_cp)
- tp->nego_cp = NULL;
+ tp->nego_cp = 0;
#ifdef SYM_CONF_IARB_SUPPORT
/*
/*
* Make this CCB available.
*/
- cp->cam_ccb = NULL;
+ cp->cam_ccb = 0;
cp->host_status = HS_IDLE;
sym_remque(&cp->link_ccbq);
sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
*/
static ccb_p sym_alloc_ccb(hcb_p np)
{
- ccb_p cp = NULL;
+ ccb_p cp = 0;
int hcode;
/*
* queue to the controller.
*/
if (np->actccbs >= SYM_CONF_MAX_START)
- return NULL;
+ return 0;
/*
* Allocate memory for this CCB.
sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF");
sym_mfree_dma(cp, sizeof(*cp), "CCB");
}
- return NULL;
+ return 0;
}
/*
* allocation for not probed LUNs.
*/
if (!sym_is_bit(tp->lun_map, ln))
- return NULL;
+ return 0;
/*
* Initialize the target control block if not yet.
lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
if (!lp->cb_tags) {
sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
- lp->itlq_tbl = NULL;
+ lp->itlq_tbl = 0;
goto fail;
}
/*
* Look up our CCB control block.
*/
- cp = NULL;
+ cp = 0;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp2->cam_ccb == ccb) {
* LUN(s) > 0.
*/
#if SYM_CONF_MAX_LUN <= 1
-#define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : NULL
+#define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0
#else
#define sym_lp(np, tp, lun) \
- (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL
+ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
#endif
/*
m_link_p h = mp->h;
if (size > SYM_MEM_CLUSTER_SIZE)
- return NULL;
+ return 0;
while (size > s) {
s <<= 1;
if (s == SYM_MEM_CLUSTER_SIZE) {
h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
if (h[j].next)
- h[j].next->next = NULL;
+ h[j].next->next = 0;
break;
}
++j;
j -= 1;
s >>= 1;
h[j].next = (m_link_p) (a+s);
- h[j].next->next = NULL;
+ h[j].next->next = 0;
}
}
#ifdef DEBUG
#ifdef SYM_MEM_FREE_UNUSED
static struct sym_m_pool mp0 =
- {NULL, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster};
+ {0, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster};
#else
static struct sym_m_pool mp0 =
- {NULL, ___mp0_get_mem_cluster};
+ {0, ___mp0_get_mem_cluster};
#endif
/*
/* Create a new memory DMAable pool (when fetch failed) */
static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
{
- m_pool_p mp = NULL;
+ m_pool_p mp = 0;
mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
if (mp) {
}
if (mp)
__sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
- return NULL;
+ return 0;
}
#ifdef SYM_MEM_FREE_UNUSED
void *__sym_calloc_dma_unlocked(m_pool_ident_t dev_dmat, int size, char *name)
{
m_pool_p mp;
- void *m = NULL;
+ void *m = 0;
mp = ___get_dma_pool(dev_dmat);
if (!mp)
{
m_pool_p mp;
int hc = VTOB_HASH_CODE(m);
- m_vtob_p vp = NULL;
+ m_vtob_p vp = 0;
m_addr_t a = ((m_addr_t) m) & ~SYM_MEM_CLUSTER_MASK;
mp = ___get_dma_pool(dev_dmat);
if (elem != head)
__sym_que_del(head, elem->flink);
else
- elem = NULL;
+ elem = 0;
return elem;
}
u_char *gpcntl)
{
OUTB (nc_gpcntl, *gpcntl & 0xfe);
- S24C16_do_bit(np, NULL, write_bit, gpreg);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
OUTB (nc_gpcntl, *gpcntl);
}
int x;
for (x = 0; x < 8; x++)
- S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg);
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
S24C16_read_ack(np, ack_data, gpreg, gpcntl);
}
if (elem != head)
__xpt_que_del(head, elem->flink);
else
- elem = NULL;
+ elem = 0;
return elem;
}
m_link_s *h = mp->h;
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
- return NULL;
+ return 0;
while (size > s) {
s <<= 1;
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
h[j].next = (m_link_s *) M_GETP();
if (h[j].next)
- h[j].next->next = NULL;
+ h[j].next->next = 0;
break;
}
++j;
j -= 1;
s >>= 1;
h[j].next = (m_link_s *) (a+s);
- h[j].next->next = NULL;
+ h[j].next->next = 0;
}
}
#ifdef DEBUG
--mp->nump;
}
-static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
/*
* DMAable pools.
{
u_long flags;
struct m_pool *mp;
- void *m = NULL;
+ void *m = 0;
NCR_LOCK_DRIVER(flags);
mp = ___get_dma_pool(bush);
u_long flags;
m_pool_s *mp;
int hc = VTOB_HASH_CODE(m);
- m_vtob_s *vp = NULL;
+ m_vtob_s *vp = 0;
m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
NCR_LOCK_DRIVER(flags);
pdev = pACB->pdev;
pci_read_config_word(pdev, PCI_STATUS, &pstat);
printk ("DC390: Register dump: PCI Status: %04x\n", pstat);
- printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n");
+ printk ("DC390: In case of driver trouble read linux/Documentation/scsi/tmscsim.txt\n");
}
return SCSI_ABORT_NOT_RUNNING;
if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
- config.mscp[mscp_index].SCint = NULL;
+ config.mscp[mscp_index].SCint = 0;
done = config.mscp[mscp_index].done;
- config.mscp[mscp_index].done = NULL;
+ config.mscp[mscp_index].done = 0;
SCpnt->result = DID_ABORT << 16;
/* Take the host lock to guard against scsi layer re-entry */
{
config.mscp[i].SCint->result = DID_RESET << 16;
config.mscp[i].done(config.mscp[i].SCint);
- config.mscp[i].done = NULL;
+ config.mscp[i].done = 0;
}
- config.mscp[i].SCint = NULL;
+ config.mscp[i].SCint = 0;
}
#endif
if (icm_status == 3) {
void (*done)(Scsi_Cmnd *) = mscp->done;
if (done) {
- mscp->done = NULL;
+ mscp->done = 0;
mscp->SCint->result = DID_ABORT << 16;
done(mscp->SCint);
}
once we call done, we may get another command queued before this
interrupt service routine can return. */
done = mscp->done;
- mscp->done = NULL;
+ mscp->done = 0;
/* Let the higher levels know that we're done */
switch (mscp->adapter_status)
SCtmp->result = status | mscp->target_status;
- SCtmp->host_scribble = NULL;
+ SCtmp->host_scribble = 0;
/* Free up mscp block for next command */
#if ULTRASTOR_MAX_CMDS == 1
static int __devinit pci_xircom_init(struct pci_dev *dev)
{
- msleep(100);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/10);
return 0;
}
# The new 8250/16550 serial drivers
config SERIAL_8250
tristate "8250/16550 and compatible serial support"
- depends on (BROKEN || !SPARC64)
select SERIAL_CORE
---help---
This selects whether you want to include the driver for the standard
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_MPSC
+ bool "Marvell MPSC serial port support"
+ depends on PPC32 && MV64X60
+ select SERIAL_CORE
+ help
+ Say Y here if you want to use the Marvell MPSC serial controller.
+
+config SERIAL_MPSC_CONSOLE
+ bool "Support for console on Marvell MPSC serial port"
+ depends on SERIAL_MPSC
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you want to support a serial console on a Marvell MPSC.
+
config SERIAL_PXA
bool "PXA serial port support"
depends on ARM && ARCH_PXA
help
Select the is option to use SMC2 as a serial port
-config SERIAL_SGI_L1_CONSOLE
- bool "SGI Altix L1 serial console support"
- depends on IA64_GENERIC || IA64_SGI_SN2
- select SERIAL_CORE
- help
- If you have an SGI Altix and you would like to use the system
- controller serial port as your console (you want this!),
- say Y. Otherwise, say N.
-
-config SERIAL_MPC52xx
- tristate "Freescale MPC52xx family PSC serial support"
- depends on PPC_MPC52xx
- select SERIAL_CORE
- help
- This drivers support the MPC52xx PSC serial ports. If you would
- like to use them, you must answer Y or M to this option. Not that
- for use as console, it must be included in kernel and not as a
- module.
-
-config SERIAL_MPC52xx_CONSOLE
- bool "Console on a Freescale MPC52xx family PSC serial port"
- depends on SERIAL_MPC52xx=y
- select SERIAL_CORE_CONSOLE
- help
- Select this options if you'd like to use one of the PSC serial port
- of the Freescale MPC52xx family as a console.
-
-config SERIAL_MPC52xx_CONSOLE_BAUD
- int "Freescale MPC52xx family PSC serial port baud"
- depends on SERIAL_MPC52xx_CONSOLE=y
- default "9600"
- help
- Select the MPC52xx console baud rate.
- This value is only used if the bootloader doesn't pass in the
- console baudrate
-
endmenu
+
# $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $
#
+obj-$(CONFIG_SERIAL_MPSC) += mpsc/
+
serial-8250-y :=
serial-8250-$(CONFIG_SERIAL_8250_ACPI) += 8250_acpi.o
serial-8250-$(CONFIG_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_BAST_SIO) += bast_sio.o
-obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
-obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
/**************************************************************/
static int cpm_uart_tx_pump(struct uart_port *port);
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo);
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo);
-static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval);
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int sbits, u16 sval);
/**************************************************************/
pr_debug("CPM uart[%d]:start tx\n", port->line);
+ /* if in the middle of discarding return */
+ if (IS_DISCARDING(pinfo))
+ return;
+
if (IS_SMC(pinfo)) {
if (smcp->smc_smcm & SMCM_TX)
return;
static int cpm_uart_startup(struct uart_port *port)
{
int retval;
- struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
pr_debug("CPM uart[%d]:startup\n", port->line);
if (retval)
return retval;
- /* Startup rx-int */
- if (IS_SMC(pinfo)) {
- pinfo->smcp->smc_smcm |= SMCM_RX;
- pinfo->smcp->smc_smcmr |= SMCMR_REN;
- } else {
- pinfo->sccp->scc_sccm |= UART_SCCM_RX;
- }
-
return 0;
}
}
/* Shut them really down and reinit buffer descriptors */
- cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
- cpm_uart_initbd(pinfo);
+ cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
}
}
{
int baud;
unsigned long flags;
- u16 cval, scval, prev_mode;
+ u16 cval, scval;
int bits, sbits;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
- volatile smc_t *smcp = pinfo->smcp;
- volatile scc_t *sccp = pinfo->sccp;
+ int line = pinfo - cpm_uart_ports;
+ volatile cbd_t *bdp;
pr_debug("CPM uart[%d]:set_termios\n", port->line);
+ spin_lock_irqsave(&port->lock, flags);
+ /* disable uart interrupts */
+ if (IS_SMC(pinfo))
+ pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
+ else
+ pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
+ pinfo->flags |= FLAG_DISCARDING;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* if previous configuration exists wait for tx to finish */
+ if (pinfo->baud != 0 && pinfo->bits != 0) {
+
+ /* point to the last txed bd */
+ bdp = pinfo->tx_cur;
+ if (bdp == pinfo->tx_bd_base)
+ bdp = pinfo->tx_bd_base + (pinfo->tx_nrfifos - 1);
+ else
+ bdp--;
+
+ /* wait for it to be transmitted */
+ while ((bdp->cbd_sc & BD_SC_READY) != 0)
+ schedule();
+
+ /* and delay for the hw fifo to drain */
+ udelay((3 * 1000000 * pinfo->bits) / pinfo->baud);
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Send the CPM an initialize command. */
+ cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
+
+ /* Stop uart */
+ if (IS_SMC(pinfo))
+ pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
+ else
+ pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ /* Send the CPM an initialize command. */
+ cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
/* Character length programmed into the mode register is the
spin_lock_irqsave(&port->lock, flags);
+ cpm_set_brg(pinfo->brg - 1, baud);
+
/* Start bit has not been added (so don't, because we would just
* subtract it later), and we need to add one for the number of
* stops bits (there is always at least one).
*/
bits++;
- if (IS_SMC(pinfo)) {
- /* Set the mode register. We want to keep a copy of the
- * enables, because we want to put them back if they were
- * present.
- */
- prev_mode = smcp->smc_smcmr;
- smcp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
- smcp->smc_smcmr |= (prev_mode & (SMCMR_REN | SMCMR_TEN));
- } else {
- sccp->scc_psmr = (sbits << 12) | scval;
- }
- cpm_set_brg(pinfo->brg - 1, baud);
+ /* re-init */
+ if (IS_SMC(pinfo))
+ cpm_uart_init_smc(pinfo, bits, cval);
+ else
+ cpm_uart_init_scc(pinfo, sbits, scval);
+
+ pinfo->baud = baud;
+ pinfo->bits = bits;
+
+ pinfo->flags &= ~FLAG_DISCARDING;
spin_unlock_irqrestore(&port->lock, flags);
}
return 1;
}
-/*
- * init buffer descriptors
- */
-static void cpm_uart_initbd(struct uart_cpm_port *pinfo)
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int bits, u16 scval)
{
- int i;
+ int line = pinfo - cpm_uart_ports;
+ volatile scc_t *scp;
+ volatile scc_uart_t *sup;
u8 *mem_addr;
volatile cbd_t *bdp;
+ int i;
- pr_debug("CPM uart[%d]:initbd\n", pinfo->port.line);
+ pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
+
+ scp = pinfo->sccp;
+ sup = pinfo->sccup;
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
+ pinfo->rx_cur = pinfo->rx_bd_base;
mem_addr = pinfo->mem_addr;
- bdp = pinfo->rx_cur = pinfo->rx_bd_base;
- for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) {
+ for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) {
bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT;
+ bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP);
mem_addr += pinfo->rx_fifosize;
}
-
- bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT;
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
- bdp = pinfo->tx_cur = pinfo->tx_bd_base;
- for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) {
+ pinfo->tx_cur = pinfo->tx_bd_base;
+ for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) {
bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_INTRPT;
+ bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP);
mem_addr += pinfo->tx_fifosize;
+ bdp++;
}
-
- bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_WRAP | BD_SC_INTRPT;
-}
-
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo)
-{
- int line = pinfo - cpm_uart_ports;
- volatile scc_t *scp;
- volatile scc_uart_t *sup;
-
- pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
-
- scp = pinfo->sccp;
- sup = pinfo->sccup;
/* Store address */
pinfo->sccup->scc_genscc.scc_rbase = (unsigned char *)pinfo->rx_bd_base - DPRAM_BASE;
(SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16);
/* Enable rx interrupts and clear all pending events. */
- scp->scc_sccm = 0;
+ scp->scc_sccm = UART_SCCM_RX;
scp->scc_scce = 0xffff;
scp->scc_dsr = 0x7e7e;
- scp->scc_psmr = 0x3000;
+ scp->scc_psmr = (bits << 12) | scval;
scp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
}
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval)
{
int line = pinfo - cpm_uart_ports;
volatile smc_t *sp;
volatile smc_uart_t *up;
+ volatile u8 *mem_addr;
+ volatile cbd_t *bdp;
+ int i;
pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line);
sp = pinfo->smcp;
up = pinfo->smcup;
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ mem_addr = pinfo->mem_addr;
+ pinfo->rx_cur = pinfo->rx_bd_base;
+ for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) {
+ bdp->cbd_bufaddr = virt_to_bus(mem_addr);
+ bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP);
+ mem_addr += pinfo->rx_fifosize;
+ }
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
+ pinfo->tx_cur = pinfo->tx_bd_base;
+ for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) {
+ bdp->cbd_bufaddr = virt_to_bus(mem_addr);
+ bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP);
+ mem_addr += pinfo->tx_fifosize;
+ }
+
/* Store address */
pinfo->smcup->smc_rbase = (u_char *)pinfo->rx_bd_base - DPRAM_BASE;
pinfo->smcup->smc_tbase = (u_char *)pinfo->tx_bd_base - DPRAM_BASE;
cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
- /* Set UART mode, 8 bit, no parity, one stop.
- * Enable receive and transmit.
- */
- sp->smc_smcmr = smcr_mk_clen(9) | SMCMR_SM_UART;
+ /* Set UART mode, according to the parameters */
+ sp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
/* Enable only rx interrupts clear all pending events. */
- sp->smc_smcm = 0;
+ sp->smc_smcm = SMCM_RX;
sp->smc_smce = 0xff;
sp->smc_smcmr |= (SMCMR_REN | SMCMR_TEN);
if (pinfo->set_lineif)
pinfo->set_lineif(pinfo);
- if (IS_SMC(pinfo)) {
- pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
- pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
- } else {
- pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
- pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
- }
-
ret = cpm_uart_allocbuf(pinfo, 0);
-
if (ret)
return ret;
- cpm_uart_initbd(pinfo);
-
return 0;
}
volatile cbd_t *bdp, *bdbase;
volatile unsigned char *cp;
+ if (IS_DISCARDING(pinfo))
+ return;
+
/* Get the address of the host memory buffer.
*/
bdp = pinfo->tx_cur;
if (pinfo->set_lineif)
pinfo->set_lineif(pinfo);
- if (IS_SMC(pinfo)) {
- pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
- pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
- } else {
- pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
- pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
- }
-
ret = cpm_uart_allocbuf(pinfo, 1);
-
if (ret)
return ret;
- cpm_uart_initbd(pinfo);
-
- if (IS_SMC(pinfo))
- cpm_uart_init_smc(pinfo);
- else
- cpm_uart_init_scc(pinfo);
-
uart_set_options(port, co, baud, parity, bits, flow);
return 0;
{
int dpmemsz, memsz;
u8 *dp_mem;
- uint dp_offset;
+ uint dp_addr;
u8 *mem_addr;
- dma_addr_t dma_addr = 0;
+ dma_addr_t dma_addr;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_offset = cpm_dpalloc(dpmemsz, 8);
- if (IS_DPERR(dp_offset)) {
+ dp_mem = m8xx_cpm_dpalloc(dpmemsz);
+ if (dp_mem == NULL) {
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
- dp_mem = cpm_dpram_addr(dp_offset);
+ dp_addr = m8xx_cpm_dpram_offset(dp_mem);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
GFP_KERNEL);
if (mem_addr == NULL) {
- cpm_dpfree(dp_offset);
+ m8xx_cpm_dpfree(dp_mem);
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate coherent memory\n");
return -ENOMEM;
}
- pinfo->dp_addr = dp_offset;
+ pinfo->dp_addr = dp_addr;
pinfo->mem_addr = mem_addr;
pinfo->dma_addr = dma_addr;
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
- cpm_dpfree(pinfo->dp_addr);
+ m8xx_cpm_dpfree(m8xx_cpm_dpram_addr(pinfo->dp_addr));
}
/* Setup any dynamic params in the uart desc */
static inline void cpm_set_brg(int brg, int baud)
{
- cpm_setbrg(brg, baud);
+ m8xx_cpm_setbrg(brg, baud);
}
static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup)
{
int dpmemsz, memsz;
u8 *dp_mem;
- uint dp_offset;
+ uint dp_addr;
u8 *mem_addr;
dma_addr_t dma_addr = 0;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_offset = cpm_dpalloc(dpmemsz, 8);
- if (IS_DPERR(dp_offset)) {
+ dp_mem = cpm2_dpalloc(dpmemsz, 8);
+ if (dp_mem == NULL) {
printk(KERN_ERR
- "cpm_uart_cpm.c: could not allocate buffer descriptors\n");
+ "cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
- dp_mem = cpm_dpram_addr(dp_offset);
+ dp_addr = cpm2_dpram_offset(dp_mem);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
GFP_KERNEL);
if (mem_addr == NULL) {
- cpm_dpfree(dp_offset);
+ cpm2_dpfree(dp_mem);
printk(KERN_ERR
- "cpm_uart_cpm.c: could not allocate coherent memory\n");
+ "cpm_uart_cpm1.c: could not allocate coherent memory\n");
return -ENOMEM;
}
- pinfo->dp_addr = dp_offset;
+ pinfo->dp_addr = dp_addr;
pinfo->mem_addr = mem_addr;
pinfo->dma_addr = dma_addr;
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
- cpm_dpfree(pinfo->dp_addr);
+ cpm2_dpfree(&pinfo->dp_addr);
}
/* Setup any dynamic params in the uart desc */
static inline void cpm_set_brg(int brg, int baud)
{
- cpm_setbrg(brg, baud);
+ cpm2_setbrg(brg, baud);
}
static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup)
+++ /dev/null
-/*
- * drivers/serial/mpc52xx_uart.c
- *
- * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
- *
- * FIXME According to the usermanual the status bits in the status register
- * are only updated when the peripherals access the FIFO and not when the
- * CPU access them. So since we use this bits to know when we stop writing
- * and reading, they may not be updated in-time and a race condition may
- * exists. But I haven't be able to prove this and I don't care. But if
- * any problem arises, it might worth checking. The TX/RX FIFO Stats
- * registers should be used in addition.
- * Update: Actually, they seem updated ... At least the bits we use.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Some of the code has been inspired/copied from the 2.4 code written
- * by Dale Farnsworth <dfarnsworth@mvista.com>.
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/* OCP Usage :
- *
- * This drivers uses the OCP model. To load the serial driver for one of the
- * PSCs, just add this to the core_ocp table :
- *
- * {
- * .vendor = OCP_VENDOR_FREESCALE,
- * .function = OCP_FUNC_PSC_UART,
- * .index = 0,
- * .paddr = MPC52xx_PSC1,
- * .irq = MPC52xx_PSC1_IRQ,
- * .pm = OCP_CPM_NA,
- * },
- *
- * This is for PSC1, replace the paddr and irq according to the PSC you want to
- * use. The driver all necessary registers to place the PSC in uart mode without
- * DCD. However, the pin multiplexing aren't changed and should be set either
- * by the bootloader or in the platform init code.
- * The index field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2,
- * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so
- * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for
- * the console code : without this 1:1 mapping, at early boot time, when we are
- * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be
- * mapped to because OCP stuff is not yet initialized.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/sysrq.h>
-#include <linux/console.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/ocp.h>
-
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-
-#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/serial_core.h>
-
-
-
-#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
-
-
-static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM];
- /* Rem: - We use the read_status_mask as a shadow of
- * psc->mpc52xx_psc_imr
- * - It's important that is array is all zero on start as we
- * use it to know if it's initialized or not ! If it's not sure
- * it's cleared, then a memset(...,0,...) should be added to
- * the console_init
- */
-
-#define PSC(port) ((struct mpc52xx_psc *)((port)->membase))
-
-
-/* Forward declaration of the interruption handling routine */
-static irqreturn_t mpc52xx_uart_int(int irq,void *dev_id,struct pt_regs *regs);
-
-
-/* Simple macro to test if a port is console or not. This one is taken
- * for serial_core.c and maybe should be moved to serial_core.h ? */
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line)
-#else
-#define uart_console(port) (0)
-#endif
-
-
-/* ======================================================================== */
-/* UART operations */
-/* ======================================================================== */
-
-static unsigned int
-mpc52xx_uart_tx_empty(struct uart_port *port)
-{
- int status = in_be16(&PSC(port)->mpc52xx_psc_status);
- return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
-}
-
-static void
-mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- /* Not implemented */
-}
-
-static unsigned int
-mpc52xx_uart_get_mctrl(struct uart_port *port)
-{
- /* Not implemented */
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
-}
-
-static void
-mpc52xx_uart_stop_tx(struct uart_port *port, unsigned int tty_stop)
-{
- /* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_start_tx(struct uart_port *port, unsigned int tty_start)
-{
- /* port->lock taken by caller */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
-{
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
-
- port->x_char = ch;
- if (ch) {
- /* Make sure tx interrupts are on */
- /* Truly necessary ??? They should be anyway */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
- }
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void
-mpc52xx_uart_stop_rx(struct uart_port *port)
-{
- /* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_enable_ms(struct uart_port *port)
-{
- /* Not implemented */
-}
-
-static void
-mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
-{
- unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
-
- if ( ctl == -1 )
- out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK);
- else
- out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static int
-mpc52xx_uart_startup(struct uart_port *port)
-{
- struct mpc52xx_psc *psc = PSC(port);
-
- /* Reset/activate the port, clear and enable interrupts */
- out_8(&psc->command,MPC52xx_PSC_RST_RX);
- out_8(&psc->command,MPC52xx_PSC_RST_TX);
-
- out_be32(&psc->sicr,0); /* UART mode DCD ignored */
-
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
-
- out_8(&psc->rfcntl, 0x00);
- out_be16(&psc->rfalarm, 0x1ff);
- out_8(&psc->tfcntl, 0x07);
- out_be16(&psc->tfalarm, 0x80);
-
- port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
- out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
-
- out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
- out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
-
- return 0;
-}
-
-static void
-mpc52xx_uart_shutdown(struct uart_port *port)
-{
- struct mpc52xx_psc *psc = PSC(port);
-
- /* Shut down the port, interrupt and all */
- out_8(&psc->command,MPC52xx_PSC_RST_RX);
- out_8(&psc->command,MPC52xx_PSC_RST_TX);
-
- port->read_status_mask = 0;
- out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
-}
-
-static void
-mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
- struct termios *old)
-{
- struct mpc52xx_psc *psc = PSC(port);
- unsigned long flags;
- unsigned char mr1, mr2;
- unsigned short ctr;
- unsigned int j, baud, quot;
-
- /* Prepare what we're gonna write */
- mr1 = 0;
-
- switch (new->c_cflag & CSIZE) {
- case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS;
- break;
- case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS;
- break;
- case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS;
- break;
- case CS8:
- default: mr1 |= MPC52xx_PSC_MODE_8_BITS;
- }
-
- if (new->c_cflag & PARENB) {
- mr1 |= (new->c_cflag & PARODD) ?
- MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
- } else
- mr1 |= MPC52xx_PSC_MODE_PARNONE;
-
-
- mr2 = 0;
-
- if (new->c_cflag & CSTOPB)
- mr2 |= MPC52xx_PSC_MODE_TWO_STOP;
- else
- mr2 |= ((new->c_cflag & CSIZE) == CS5) ?
- MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
- MPC52xx_PSC_MODE_ONE_STOP;
-
-
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
- ctr = quot & 0xffff;
-
- /* Get the lock */
- spin_lock_irqsave(&port->lock, flags);
-
- /* Update the per-port timeout */
- uart_update_timeout(port, new->c_cflag, baud);
-
- /* Do our best to flush TX & RX, so we don't loose anything */
- /* But we don't wait indefinitly ! */
- j = 5000000; /* Maximum wait */
- /* FIXME Can't receive chars since set_termios might be called at early
- * boot for the console, all stuff is not yet ready to receive at that
- * time and that just makes the kernel oops */
- /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
- udelay(1);
-
- if (!j)
- printk( KERN_ERR "mpc52xx_uart.c: "
- "Unable to flush RX & TX fifos in-time in set_termios."
- "Some chars may have been lost.\n" );
-
- /* Reset the TX & RX */
- out_8(&psc->command,MPC52xx_PSC_RST_RX);
- out_8(&psc->command,MPC52xx_PSC_RST_TX);
-
- /* Send new mode settings */
- out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
- out_8(&psc->mode,mr1);
- out_8(&psc->mode,mr2);
- out_8(&psc->ctur,ctr >> 8);
- out_8(&psc->ctlr,ctr & 0xff);
-
- /* Reenable TX & RX */
- out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
- out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
-
- /* We're all set, release the lock */
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char *
-mpc52xx_uart_type(struct uart_port *port)
-{
- return port->type == PORT_MPC52xx ? "MPC52xx PSC" : NULL;
-}
-
-static void
-mpc52xx_uart_release_port(struct uart_port *port)
-{
- if (port->flags & UPF_IOREMAP) { /* remapped by us ? */
- iounmap(port->membase);
- port->membase = NULL;
- }
-}
-
-static int
-mpc52xx_uart_request_port(struct uart_port *port)
-{
- if (port->flags & UPF_IOREMAP) /* Need to remap ? */
- port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc));
-
- return port->membase != NULL ? 0 : -EBUSY;
-}
-
-static void
-mpc52xx_uart_config_port(struct uart_port *port, int flags)
-{
- if ( (flags & UART_CONFIG_TYPE) &&
- (mpc52xx_uart_request_port(port) == 0) )
- port->type = PORT_MPC52xx;
-}
-
-static int
-mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- if ( ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx )
- return -EINVAL;
-
- if ( (ser->irq != port->irq) ||
- (ser->io_type != SERIAL_IO_MEM) ||
- (ser->baud_base != port->uartclk) ||
- // FIXME Should check addresses/irq as well ?
- (ser->hub6 != 0 ) )
- return -EINVAL;
-
- return 0;
-}
-
-
-static struct uart_ops mpc52xx_uart_ops = {
- .tx_empty = mpc52xx_uart_tx_empty,
- .set_mctrl = mpc52xx_uart_set_mctrl,
- .get_mctrl = mpc52xx_uart_get_mctrl,
- .stop_tx = mpc52xx_uart_stop_tx,
- .start_tx = mpc52xx_uart_start_tx,
- .send_xchar = mpc52xx_uart_send_xchar,
- .stop_rx = mpc52xx_uart_stop_rx,
- .enable_ms = mpc52xx_uart_enable_ms,
- .break_ctl = mpc52xx_uart_break_ctl,
- .startup = mpc52xx_uart_startup,
- .shutdown = mpc52xx_uart_shutdown,
- .set_termios = mpc52xx_uart_set_termios,
-/* .pm = mpc52xx_uart_pm, Not supported yet */
-/* .set_wake = mpc52xx_uart_set_wake, Not supported yet */
- .type = mpc52xx_uart_type,
- .release_port = mpc52xx_uart_release_port,
- .request_port = mpc52xx_uart_request_port,
- .config_port = mpc52xx_uart_config_port,
- .verify_port = mpc52xx_uart_verify_port
-};
-
-
-/* ======================================================================== */
-/* Interrupt handling */
-/* ======================================================================== */
-
-static inline int
-mpc52xx_uart_int_rx_chars(struct uart_port *port, struct pt_regs *regs)
-{
- struct tty_struct *tty = port->info->tty;
- unsigned char ch;
- unsigned short status;
-
- /* While we can read, do so ! */
- while ( (status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
- MPC52xx_PSC_SR_RXRDY) {
-
- /* If we are full, just stop reading */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
- /* Get the char */
- ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8);
-
- /* Handle sysreq char */
-#ifdef SUPPORT_SYSRQ
- if (uart_handle_sysrq_char(port, ch, regs)) {
- port->sysrq = 0;
- continue;
- }
-#endif
-
- /* Store it */
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = 0;
- port->icount.rx++;
-
- if ( status & (MPC52xx_PSC_SR_PE |
- MPC52xx_PSC_SR_FE |
- MPC52xx_PSC_SR_RB |
- MPC52xx_PSC_SR_OE) ) {
-
- if (status & MPC52xx_PSC_SR_RB) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
- uart_handle_break(port);
- } else if (status & MPC52xx_PSC_SR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
- else if (status & MPC52xx_PSC_SR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
- if (status & MPC52xx_PSC_SR_OE) {
- /*
- * Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- if (tty->flip.count < (TTY_FLIPBUF_SIZE-1)) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
-
- /* Clear error condition */
- out_8(&PSC(port)->command,MPC52xx_PSC_RST_ERR_STAT);
-
- }
-
- tty->flip.char_buf_ptr++;
- tty->flip.flag_buf_ptr++;
- tty->flip.count++;
-
- }
-
- tty_flip_buffer_push(tty);
-
- return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
-}
-
-static inline int
-mpc52xx_uart_int_tx_chars(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->info->xmit;
-
- /* Process out of band chars */
- if (port->x_char) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return 1;
- }
-
- /* Nothing to do ? */
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- mpc52xx_uart_stop_tx(port,0);
- return 0;
- }
-
- /* Send chars */
- while (in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- }
-
- /* Wake up */
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- /* Maybe we're done after all */
- if (uart_circ_empty(xmit)) {
- mpc52xx_uart_stop_tx(port,0);
- return 0;
- }
-
- return 1;
-}
-
-static irqreturn_t
-mpc52xx_uart_int(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct uart_port *port = (struct uart_port *) dev_id;
- unsigned long pass = ISR_PASS_LIMIT;
- unsigned int keepgoing;
- unsigned short status;
-
- if ( irq != port->irq ) {
- printk( KERN_WARNING
- "mpc52xx_uart_int : " \
- "Received wrong int %d. Waiting for %d\n",
- irq, port->irq);
- return IRQ_NONE;
- }
-
- spin_lock(&port->lock);
-
- /* While we have stuff to do, we continue */
- do {
- /* If we don't find anything to do, we stop */
- keepgoing = 0;
-
- /* Read status */
- status = in_be16(&PSC(port)->mpc52xx_psc_isr);
- status &= port->read_status_mask;
-
- /* Do we need to receive chars ? */
- /* For this RX interrupts must be on and some chars waiting */
- if ( status & MPC52xx_PSC_IMR_RXRDY )
- keepgoing |= mpc52xx_uart_int_rx_chars(port, regs);
-
- /* Do we need to send chars ? */
- /* For this, TX must be ready and TX interrupt enabled */
- if ( status & MPC52xx_PSC_IMR_TXRDY )
- keepgoing |= mpc52xx_uart_int_tx_chars(port);
-
- /* Limit number of iteration */
- if ( !(--pass) )
- keepgoing = 0;
-
- } while (keepgoing);
-
- spin_unlock(&port->lock);
-
- return IRQ_HANDLED;
-}
-
-
-/* ======================================================================== */
-/* Console ( if applicable ) */
-/* ======================================================================== */
-
-#ifdef CONFIG_SERIAL_MPC52xx_CONSOLE
-
-static void __init
-mpc52xx_console_get_options(struct uart_port *port,
- int *baud, int *parity, int *bits, int *flow)
-{
- struct mpc52xx_psc *psc = PSC(port);
- unsigned char mr1;
-
- /* Read the mode registers */
- out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
- mr1 = in_8(&psc->mode);
-
- /* CT{U,L}R are write-only ! */
- *baud = __res.bi_baudrate ?
- __res.bi_baudrate : CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
-
- /* Parse them */
- switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
- case MPC52xx_PSC_MODE_5_BITS: *bits = 5; break;
- case MPC52xx_PSC_MODE_6_BITS: *bits = 6; break;
- case MPC52xx_PSC_MODE_7_BITS: *bits = 7; break;
- case MPC52xx_PSC_MODE_8_BITS:
- default: *bits = 8;
- }
-
- if (mr1 & MPC52xx_PSC_MODE_PARNONE)
- *parity = 'n';
- else
- *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
-}
-
-static void
-mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_port *port = &mpc52xx_uart_ports[co->index];
- struct mpc52xx_psc *psc = PSC(port);
- unsigned int i, j;
-
- /* Disable interrupts */
- out_be16(&psc->mpc52xx_psc_imr, 0);
-
- /* Wait the TX buffer to be empty */
- j = 5000000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
- udelay(1);
-
- /* Write all the chars */
- for ( i=0 ; i<count ; i++ ) {
-
- /* Send the char */
- out_8(&psc->mpc52xx_psc_buffer_8, *s);
-
- /* Line return handling */
- if ( *s++ == '\n' )
- out_8(&psc->mpc52xx_psc_buffer_8, '\r');
-
- /* Wait the TX buffer to be empty */
- j = 20000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXEMP) && --j)
- udelay(1);
- }
-
- /* Restore interrupt state */
- out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
-}
-
-static int __init
-mpc52xx_console_setup(struct console *co, char *options)
-{
- struct uart_port *port = &mpc52xx_uart_ports[co->index];
-
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM)
- return -EINVAL;
-
- /* Basic port init. Needed since we use some uart_??? func before
- * real init for early access */
- port->lock = SPIN_LOCK_UNLOCKED;
- port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
- port->ops = &mpc52xx_uart_ops;
- port->mapbase = MPC52xx_PSCx(co->index);
-
- /* We ioremap ourself */
- port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc));
- if (port->membase == NULL) {
- release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
- return -EBUSY;
- }
-
- /* Setup the port parameters accoding to options */
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, co, baud, parity, bits, flow);
-}
-
-
-extern struct uart_driver mpc52xx_uart_driver;
-
-static struct console mpc52xx_console = {
- .name = "ttyS",
- .write = mpc52xx_console_write,
- .device = uart_console_device,
- .setup = mpc52xx_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */
- .data = &mpc52xx_uart_driver,
-};
-
-
-static int __init
-mpc52xx_console_init(void)
-{
- register_console(&mpc52xx_console);
- return 0;
-}
-
-console_initcall(mpc52xx_console_init);
-
-#define MPC52xx_PSC_CONSOLE &mpc52xx_console
-#else
-#define MPC52xx_PSC_CONSOLE NULL
-#endif
-
-
-/* ======================================================================== */
-/* UART Driver */
-/* ======================================================================== */
-
-static struct uart_driver mpc52xx_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "mpc52xx_psc_uart",
- .dev_name = "ttyS",
- .devfs_name = "ttyS",
- .major = TTY_MAJOR,
- .minor = 64,
- .nr = MPC52xx_PSC_MAXNUM,
- .cons = MPC52xx_PSC_CONSOLE,
-};
-
-
-/* ======================================================================== */
-/* OCP Driver */
-/* ======================================================================== */
-
-static int __devinit
-mpc52xx_uart_probe(struct ocp_device *ocp)
-{
- struct uart_port *port = NULL;
- int idx, ret;
-
- /* Get the corresponding port struct */
- idx = ocp->def->index;
- if (idx < 0 || idx >= MPC52xx_PSC_MAXNUM)
- return -EINVAL;
-
- port = &mpc52xx_uart_ports[idx];
-
- /* Init the port structure */
- port->lock = SPIN_LOCK_UNLOCKED;
- port->mapbase = ocp->def->paddr;
- port->irq = ocp->def->irq;
- port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
- port->fifosize = 255; /* Should be 512 ! But it can't be */
- /* stored in a unsigned char */
- port->iotype = UPIO_MEM;
- port->flags = UPF_BOOT_AUTOCONF |
- ( uart_console(port) ? 0 : UPF_IOREMAP );
- port->line = idx;
- port->ops = &mpc52xx_uart_ops;
- port->read_status_mask = 0;
-
- /* Requests the mem & irqs */
- /* Unlike other serial drivers, we reserve the resources here, so we
- * can detect early if multiple drivers uses the same PSC. Special
- * care must be taken with the console PSC
- */
- ret = request_irq(
- port->irq, mpc52xx_uart_int,
- SA_INTERRUPT | SA_SAMPLE_RANDOM, "mpc52xx_psc_uart", port);
- if (ret)
- goto error;
-
- ret = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
- "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
- if (ret)
- goto free_irq;
-
- /* Add the port to the uart sub-system */
- ret = uart_add_one_port(&mpc52xx_uart_driver, port);
- if (ret)
- goto release_mem;
-
- ocp_set_drvdata(ocp, (void*)port);
-
- return 0;
-
-
-free_irq:
- free_irq(port->irq, mpc52xx_uart_int);
-
-release_mem:
- release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
-
-error:
- if (uart_console(port))
- printk( "mpc52xx_uart.c: Error during resource alloction for "
- "the console port !!! Check that the console PSC is "
- "not used by another OCP driver !!!\n" );
-
- return ret;
-}
-
-static void
-mpc52xx_uart_remove(struct ocp_device *ocp)
-{
- struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp);
-
- ocp_set_drvdata(ocp, NULL);
-
- if (port) {
- uart_remove_one_port(&mpc52xx_uart_driver, port);
- release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
- free_irq(port->irq, mpc52xx_uart_int);
- }
-}
-
-#ifdef CONFIG_PM
-static int
-mpc52xx_uart_suspend(struct ocp_device *ocp, u32 state)
-{
- struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp);
-
- uart_suspend_port(&mpc52xx_uart_driver, port);
-
- return 0;
-}
-
-static int
-mpc52xx_uart_resume(struct ocp_device *ocp)
-{
- struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp);
-
- uart_resume_port(&mpc52xx_uart_driver, port);
-
- return 0;
-}
-#endif
-
-static struct ocp_device_id mpc52xx_uart_ids[] __devinitdata = {
- { .vendor = OCP_VENDOR_FREESCALE, .function = OCP_FUNC_PSC_UART },
- { .vendor = OCP_VENDOR_INVALID /* Terminating entry */ }
-};
-
-MODULE_DEVICE_TABLE(ocp, mpc52xx_uart_ids);
-
-static struct ocp_driver mpc52xx_uart_ocp_driver = {
- .name = "mpc52xx_psc_uart",
- .id_table = mpc52xx_uart_ids,
- .probe = mpc52xx_uart_probe,
- .remove = mpc52xx_uart_remove,
-#ifdef CONFIG_PM
- .suspend = mpc52xx_uart_suspend,
- .resume = mpc52xx_uart_resume,
-#endif
-};
-
-
-/* ======================================================================== */
-/* Module */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_uart_init(void)
-{
- int ret;
-
- printk(KERN_INFO "Serial: MPC52xx PSC driver\n");
-
- ret = uart_register_driver(&mpc52xx_uart_driver);
- if (ret)
- return ret;
-
- ret = ocp_register_driver(&mpc52xx_uart_ocp_driver);
-
- return ret;
-}
-
-static void __exit
-mpc52xx_uart_exit(void)
-{
- ocp_unregister_driver(&mpc52xx_uart_ocp_driver);
- uart_unregister_driver(&mpc52xx_uart_driver);
-}
-
-
-module_init(mpc52xx_uart_init);
-module_exit(mpc52xx_uart_exit);
-
-MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
-MODULE_DESCRIPTION("Freescale MPC52xx PSC UART");
-MODULE_LICENSE("GPL");
--- /dev/null
+#
+# Make file for the Marvell MPSC driver.
+#
+
+obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
+obj-$(CONFIG_PPC32) += mpsc_ppc32.o
--- /dev/null
+/*
+ * drivers/serial/mpsc/mpsc.c
+ *
+ * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
+ * GT64260, MV64340, MV64360, GT96100, ... ).
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * Based on an old MPSC driver that was in the linuxppc tree. It appears to
+ * have been created by Chris Zankel (formerly of MontaVista) but there
+ * is no proper Copyright so I'm not sure. Parts were, apparently, also
+ * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
+ * by Russell King.
+ *
+ * 2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+/*
+ * The MPSC interface is much like a typical network controller's interface.
+ * That is, you set up separate rings of descriptors for transmitting and
+ * receiving data. There is also a pool of buffers with (one buffer per
+ * descriptor) that incoming data are dma'd into or outgoing data are dma'd
+ * out of.
+ *
+ * The MPSC requires two other controllers to be able to work. The Baud Rate
+ * Generator (BRG) provides a clock at programmable frequencies which determines
+ * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
+ * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
+ * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
+ * transmit and receive "engines" going (i.e., indicate data has been
+ * transmitted or received).
+ *
+ * NOTES:
+ *
+ * 1) Some chips have an erratum where several regs cannot be
+ * read. To work around that, we keep a local copy of those regs in
+ * 'mpsc_port_info_t' and use the *_M macros when accessing those regs.
+ *
+ * 2) Some chips have an erratum where the chip will hang when the SDMA ctlr
+ * accesses system mem in a cache coherent region. This *should* be a
+ * show-stopper when coherency is turned on but it seems to work okay as
+ * long as there are no snoop hits. Therefore, there are explicit cache
+ * management macros in addition to the dma_* calls--the dma_* calls don't
+ * do cache mgmt on coherent systems--to manage the cache ensuring there
+ * are no snoop hits.
+ *
+ * 3) AFAICT, hardware flow control isn't supported by the controller --MAG.
+ */
+
+#include "mpsc.h"
+
+/*
+ * Define how this driver is known to the outside (we've been assigned a
+ * range on the "Low-density serial ports" major).
+ */
+#define MPSC_MAJOR 204
+#define MPSC_MINOR_START 5 /* XXXX */
+#define MPSC_DRIVER_NAME "MPSC"
+#define MPSC_DEVFS_NAME "ttym/"
+#define MPSC_DEV_NAME "ttyM"
+#define MPSC_VERSION "1.00"
+
+static mpsc_port_info_t mpsc_ports[MPSC_NUM_CTLRS];
+
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ ******************************************************************************
+ *
+ * Baud Rate Generator Routines (BRG)
+ *
+ ******************************************************************************
+ */
+static void
+mpsc_brg_init(mpsc_port_info_t *pi, u32 clk_src)
+{
+ if (pi->brg_can_tune) {
+ MPSC_MOD_FIELD_M(pi, brg, BRG_BCR, 1, 25, 0);
+ }
+
+ MPSC_MOD_FIELD_M(pi, brg, BRG_BCR, 4, 18, clk_src);
+ MPSC_MOD_FIELD(pi, brg, BRG_BTR, 16, 0, 0);
+ return;
+}
+
+static void
+mpsc_brg_enable(mpsc_port_info_t *pi)
+{
+ MPSC_MOD_FIELD_M(pi, brg, BRG_BCR, 1, 16, 1);
+ return;
+}
+
+static void
+mpsc_brg_disable(mpsc_port_info_t *pi)
+{
+ MPSC_MOD_FIELD_M(pi, brg, BRG_BCR, 1, 16, 0);
+ return;
+}
+
+static inline void
+mpsc_set_baudrate(mpsc_port_info_t *pi, u32 baud)
+{
+ /*
+ * To set the baud, we adjust the CDV field in the BRG_BCR reg.
+ * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
+ * However, the input clock is divided by 16 in the MPSC b/c of how
+ * 'MPSC_MMCRH' was set up so we have to divide 'clk' used in our
+ * calculation by 16 to account for that. So the real calculation
+ * that accounts for the way the mpsc is set up is:
+ * CDV = (clk / (baud*32)) - 1 ==> CDV = (clk / (baud << 5)) -1.
+ */
+ u32 cdv = (pi->port.uartclk/(baud << 5)) - 1;
+
+ mpsc_brg_disable(pi);
+ MPSC_MOD_FIELD_M(pi, brg, BRG_BCR, 16, 0, cdv);
+ mpsc_brg_enable(pi);
+
+ return;
+}
+
+/*
+ ******************************************************************************
+ *
+ * Serial DMA Routines (SDMA)
+ *
+ ******************************************************************************
+ */
+
+static void
+mpsc_sdma_burstsize(mpsc_port_info_t *pi, u32 burst_size)
+{
+ u32 v;
+
+ DBG("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
+ pi->port.line, burst_size);
+
+ burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
+
+ if (burst_size < 2) v = 0x0; /* 1 64-bit word */
+ else if (burst_size < 4) v = 0x1; /* 2 64-bit words */
+ else if (burst_size < 8) v = 0x2; /* 4 64-bit words */
+ else v = 0x3; /* 8 64-bit words */
+
+ MPSC_MOD_FIELD(pi, sdma, SDMA_SDC, 2, 12, v);
+ return;
+}
+
+static void
+mpsc_sdma_init(mpsc_port_info_t *pi, u32 burst_size)
+{
+ DBG("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line, burst_size);
+
+ MPSC_MOD_FIELD(pi, sdma, SDMA_SDC, 10, 0, 0x03f);
+ mpsc_sdma_burstsize(pi, burst_size);
+ return;
+}
+
+static inline u32
+mpsc_sdma_intr_mask(mpsc_port_info_t *pi, u32 mask)
+{
+ u32 old, v;
+
+ DBG("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
+
+ old = v = MPSC_READ_M(pi, sdma_intr, SDMA_INTR_MASK);
+ mask &= 0xf;
+ if (pi->port.line) mask <<= 8;
+ v &= ~mask;
+ MPSC_WRITE_M(pi, sdma_intr, SDMA_INTR_MASK, v);
+
+ if (pi->port.line) old >>= 8;
+ return old & 0xf;
+}
+
+static inline void
+mpsc_sdma_intr_unmask(mpsc_port_info_t *pi, u32 mask)
+{
+ u32 v;
+
+ DBG("mpsc_sdma_intr_unmask[%d]: clk_src: 0x%x\n", pi->port.line, mask);
+
+ v = MPSC_READ_M(pi, sdma_intr, SDMA_INTR_MASK);
+ mask &= 0xf;
+ if (pi->port.line) mask <<= 8;
+ v |= mask;
+ MPSC_WRITE_M(pi, sdma_intr, SDMA_INTR_MASK, v);
+ return;
+}
+
+static inline void
+mpsc_sdma_intr_ack(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
+ MPSC_WRITE(pi, sdma_intr, SDMA_INTR_CAUSE, 0);
+ return;
+}
+
+static inline void
+mpsc_sdma_set_rx_ring(mpsc_port_info_t *pi, mpsc_rx_desc_t *rxre_p)
+{
+ DBG("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
+ pi->port.line, (uint)rxre_p);
+
+ MPSC_WRITE(pi, sdma, SDMA_SCRDP, (u32)rxre_p);
+ return;
+}
+
+static inline void
+mpsc_sdma_set_tx_ring(mpsc_port_info_t *pi, volatile mpsc_tx_desc_t *txre_p)
+{
+ MPSC_WRITE(pi, sdma, SDMA_SFTDP, (int)txre_p);
+ MPSC_WRITE(pi, sdma, SDMA_SCTDP, (int)txre_p);
+ return;
+}
+
+static inline void
+mpsc_sdma_cmd(mpsc_port_info_t *pi, u32 val)
+{
+ u32 v;
+
+ v = MPSC_READ(pi, sdma, SDMA_SDCM);
+ if (val)
+ v |= val;
+ else
+ v = 0;
+ MPSC_WRITE(pi, sdma, SDMA_SDCM, v);
+ return;
+}
+
+static inline void
+mpsc_sdma_start_tx(mpsc_port_info_t *pi, volatile mpsc_tx_desc_t *txre_p)
+{
+ mpsc_sdma_set_tx_ring(pi, txre_p);
+ mpsc_sdma_cmd(pi, SDMA_SDCM_TXD);
+ return;
+}
+
+static inline void
+mpsc_sdma_stop(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
+
+ /* Abort any SDMA transfers */
+ mpsc_sdma_cmd(pi, 0);
+ mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
+
+ /* Clear the SDMA current and first TX and RX pointers */
+ mpsc_sdma_set_tx_ring(pi, 0);
+ mpsc_sdma_set_rx_ring(pi, 0);
+ /* udelay(100); XXXX was in original gt64260 driver */
+
+ /* Disable interrupts */
+ mpsc_sdma_intr_mask(pi, 0xf);
+ mpsc_sdma_intr_ack(pi);
+ udelay(1000);
+
+ return;
+}
+
+/*
+ ******************************************************************************
+ *
+ * Multi-Protocol Serial Controller Routines (MPSC)
+ *
+ ******************************************************************************
+ */
+
+static void
+mpsc_hw_init(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
+
+ /* Set up clock routing */
+ MPSC_MOD_FIELD_M(pi, mpsc_routing, MPSC_MRR, 3, 0, 0);
+ MPSC_MOD_FIELD_M(pi, mpsc_routing, MPSC_MRR, 3, 6, 0);
+ MPSC_MOD_FIELD_M(pi, mpsc_routing, MPSC_RCRR, 4, 0, 0);
+ MPSC_MOD_FIELD_M(pi, mpsc_routing, MPSC_RCRR, 4, 8, 1);
+ MPSC_MOD_FIELD_M(pi, mpsc_routing, MPSC_TCRR, 4, 0, 0);
+ MPSC_MOD_FIELD_M(pi, mpsc_routing, MPSC_TCRR, 4, 8, 1);
+
+ /* Put MPSC in UART mode & enabel Tx/Rx egines */
+ MPSC_WRITE(pi, mpsc, MPSC_MMCRL, 0x000004c4);
+
+ /* No preamble, 16x divider, low-latency, */
+ MPSC_WRITE(pi, mpsc, MPSC_MMCRH, 0x04400400);
+
+ MPSC_WRITE_M(pi, mpsc, MPSC_CHR_1, 0);
+ MPSC_WRITE_M(pi, mpsc, MPSC_CHR_2, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_3, pi->mpsc_max_idle);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_4, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_5, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_6, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_7, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_8, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_9, 0);
+ MPSC_WRITE(pi, mpsc, MPSC_CHR_10, 0);
+
+ return;
+}
+
+static inline void
+mpsc_enter_hunt(mpsc_port_info_t *pi)
+{
+ u32 v;
+
+ DBG("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
+
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_CHR_2, 1, 31, 1);
+
+ if (pi->mirror_regs) {
+ udelay(100);
+ }
+ else
+ do {
+ v = MPSC_READ_M(pi, mpsc, MPSC_CHR_2);
+ } while (v & MPSC_CHR_2_EH);
+
+ return;
+}
+
+static void
+mpsc_freeze(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_freeze[%d]: Freezing\n", pi->port.line);
+
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_MPCR, 1, 9, 1);
+ return;
+}
+
+static inline void
+mpsc_unfreeze(mpsc_port_info_t *pi)
+{
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_MPCR, 1, 9, 0);
+
+ DBG("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
+ return;
+}
+
+static inline void
+mpsc_set_char_length(mpsc_port_info_t *pi, u32 len)
+{
+ DBG("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line, len);
+
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_MPCR, 2, 12, len);
+ return;
+}
+
+static inline void
+mpsc_set_stop_bit_length(mpsc_port_info_t *pi, u32 len)
+{
+ DBG("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",pi->port.line,len);
+
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_MPCR, 1, 14, len);
+ return;
+}
+
+static inline void
+mpsc_set_parity(mpsc_port_info_t *pi, u32 p)
+{
+ DBG("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
+
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_CHR_2, 2, 2, p); /* TPM */
+ MPSC_MOD_FIELD_M(pi, mpsc, MPSC_CHR_2, 2, 18, p); /* RPM */
+ return;
+}
+
+/*
+ ******************************************************************************
+ *
+ * Driver Init Routines
+ *
+ ******************************************************************************
+ */
+
+static void
+mpsc_init_hw(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
+
+ mpsc_brg_init(pi, pi->brg_clk_src);
+ mpsc_brg_enable(pi);
+ mpsc_sdma_init(pi, dma_get_cache_alignment());/* burst a cacheline */
+ mpsc_sdma_stop(pi);
+ mpsc_hw_init(pi);
+
+ return;
+}
+
+static int
+mpsc_alloc_ring_mem(mpsc_port_info_t *pi)
+{
+ int rc = 0;
+ static void mpsc_free_ring_mem(mpsc_port_info_t *pi);
+
+ DBG("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n", pi->port.line);
+
+ pi->desc_region_size = MPSC_TXR_SIZE + MPSC_RXR_SIZE +
+ (2 * MPSC_DESC_ALIGN);
+ pi->buf_region_size = MPSC_TXB_SIZE + MPSC_RXB_SIZE +
+ (2 * MPSC_BUF_ALIGN);
+
+ if (!pi->desc_region) {
+ if (!dma_supported(pi->port.dev, 0xffffffff)) {
+ printk(KERN_ERR "MPSC: inadequate DMA support\n");
+ rc = -ENXIO;
+ }
+ else if ((pi->desc_region = dma_alloc_coherent(pi->port.dev,
+ pi->desc_region_size, &pi->desc_region_p,
+ GFP_KERNEL)) == NULL) {
+
+ printk(KERN_ERR "MPSC: can't alloc Desc region\n");
+ rc = -ENOMEM;
+ }
+ else if ((pi->buf_region = kmalloc(pi->buf_region_size,
+ GFP_KERNEL)) == NULL) {
+
+ printk(KERN_ERR "MPSC: can't alloc bufs\n");
+ mpsc_free_ring_mem(pi);
+ rc = -ENOMEM;
+ }
+ }
+
+ return rc;
+}
+
+static void
+mpsc_free_ring_mem(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
+
+ if (pi->desc_region) {
+ MPSC_CACHE_INVALIDATE(pi, pi->desc_region,
+ pi->desc_region + pi->desc_region_size);
+ dma_free_coherent(pi->port.dev, pi->desc_region_size,
+ pi->desc_region, pi->desc_region_p);
+ pi->desc_region = NULL;
+ pi->desc_region_p = (dma_addr_t)NULL;
+ }
+
+ if (pi->buf_region) {
+ MPSC_CACHE_INVALIDATE(pi, pi->buf_region,
+ pi->buf_region + pi->buf_region_size);
+ kfree(pi->buf_region);
+ pi->buf_region = NULL;
+ }
+
+ return;
+}
+
+static void
+mpsc_init_rings(mpsc_port_info_t *pi)
+{
+ mpsc_rx_desc_t *rxre, *rxre_p;
+ mpsc_tx_desc_t *txre, *txre_p;
+ u32 bp_p, save_first, i;
+ u8 *bp;
+
+ DBG("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
+
+ BUG_ON((pi->desc_region == NULL) || (pi->buf_region == NULL));
+
+ memset(pi->desc_region, 0, pi->desc_region_size);
+ memset(pi->buf_region, 0, pi->buf_region_size);
+
+ pi->rxr = (mpsc_rx_desc_t *)ALIGN((u32)pi->desc_region,
+ (u32)MPSC_DESC_ALIGN);
+ pi->rxr_p = (mpsc_rx_desc_t *)ALIGN((u32)pi->desc_region_p,
+ (u32)MPSC_DESC_ALIGN);
+ pi->rxb = (u8 *)ALIGN((u32)pi->buf_region, (u32)MPSC_BUF_ALIGN);
+ pi->rxb_p = __pa(pi->rxb);
+
+ rxre = pi->rxr;
+ rxre_p = pi->rxr_p;
+ save_first = (u32)rxre_p;
+ bp = pi->rxb;
+ bp_p = pi->rxb_p;
+ for (i=0; i<MPSC_RXR_ENTRIES; i++,rxre++,rxre_p++) {
+ rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
+ rxre->bytecnt = cpu_to_be16(0);
+ rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
+ SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F |
+ SDMA_DESC_CMDSTAT_L);
+ rxre->link = cpu_to_be32(rxre_p + 1);
+ rxre->buf_ptr = cpu_to_be32(bp_p);
+ MPSC_CACHE_FLUSH(pi, rxre, rxre + 1);
+ dma_map_single(pi->port.dev, bp, MPSC_RXBE_SIZE,
+ DMA_FROM_DEVICE);
+ MPSC_CACHE_INVALIDATE(pi, bp, bp + MPSC_RXBE_SIZE);
+ bp += MPSC_RXBE_SIZE;
+ bp_p += MPSC_RXBE_SIZE;
+ }
+ (--rxre)->link = cpu_to_be32(save_first); /* Wrap last back to first */
+ MPSC_CACHE_FLUSH(pi, rxre, rxre + 1);
+
+ pi->txr = (mpsc_tx_desc_t *)ALIGN((u32)&pi->rxr[MPSC_RXR_ENTRIES],
+ (u32)MPSC_DESC_ALIGN);
+ pi->txr_p = (mpsc_tx_desc_t *)ALIGN((u32)&pi->rxr_p[MPSC_RXR_ENTRIES],
+ (u32)MPSC_DESC_ALIGN);
+ pi->txb = (u8 *)ALIGN((u32)(pi->rxb + MPSC_RXB_SIZE),
+ (u32)MPSC_BUF_ALIGN);
+ pi->txb_p = __pa(pi->txb);
+
+ txre = pi->txr;
+ txre_p = pi->txr_p;
+ save_first = (u32)txre_p;
+ bp = pi->txb;
+ bp_p = pi->txb_p;
+ for (i=0; i<MPSC_TXR_ENTRIES; i++,txre++,txre_p++) {
+ txre->link = cpu_to_be32(txre_p + 1);
+ txre->buf_ptr = cpu_to_be32(bp_p);
+ MPSC_CACHE_FLUSH(pi, txre, txre + 1);
+ dma_map_single(pi->port.dev, bp, MPSC_TXBE_SIZE, DMA_TO_DEVICE);
+ bp += MPSC_TXBE_SIZE;
+ bp_p += MPSC_TXBE_SIZE;
+ }
+ (--txre)->link = cpu_to_be32(save_first); /* Wrap last back to first */
+ MPSC_CACHE_FLUSH(pi, txre, txre + 1);
+
+ return;
+}
+
+static void
+mpsc_uninit_rings(mpsc_port_info_t *pi)
+{
+ u32 bp_p, i;
+
+ DBG("mpsc_uninit_rings[%d]: Uninitializing rings\n", pi->port.line);
+
+ BUG_ON((pi->desc_region == NULL) || (pi->buf_region == NULL));
+
+ bp_p = pi->rxb_p;
+ for (i=0; i<MPSC_RXR_ENTRIES; i++) {
+ dma_unmap_single(pi->port.dev, bp_p, MPSC_RXBE_SIZE,
+ DMA_FROM_DEVICE);
+ bp_p += MPSC_RXBE_SIZE;
+ }
+ pi->rxr = NULL;
+ pi->rxr_p = NULL;
+ pi->rxr_posn = 0;
+ pi->rxb = NULL;
+ pi->rxb_p = 0;
+
+ bp_p = pi->txb_p;
+ for (i=0; i<MPSC_TXR_ENTRIES; i++) {
+ dma_unmap_single(pi->port.dev, bp_p, MPSC_TXBE_SIZE,
+ DMA_TO_DEVICE);
+ bp_p += MPSC_TXBE_SIZE;
+ }
+ pi->txr = NULL;
+ pi->txr_p = NULL;
+ pi->txr_posn = 0;
+ pi->txb = NULL;
+ pi->txb_p = 0;
+
+ return;
+}
+
+static int
+mpsc_make_ready(mpsc_port_info_t *pi)
+{
+ int rc;
+
+ DBG("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
+
+ if (!pi->ready) {
+ mpsc_init_hw(pi);
+ if ((rc = mpsc_alloc_ring_mem(pi)))
+ return rc;
+ mpsc_init_rings(pi);
+ pi->ready = 1;
+ }
+
+ return 0;
+}
+
+/*
+ ******************************************************************************
+ *
+ * Interrupt Handling Routines
+ *
+ ******************************************************************************
+ */
+
+static inline void
+mpsc_rx_intr(mpsc_port_info_t *pi, struct pt_regs *regs)
+{
+ volatile mpsc_rx_desc_t *rxre = &pi->rxr[pi->rxr_posn];
+ struct tty_struct *tty = pi->port.info->tty;
+ u32 cmdstat, bytes_in;
+ u8 *bp;
+ dma_addr_t bp_p;
+ static void mpsc_start_rx(mpsc_port_info_t *pi);
+
+ DBG("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
+
+ /*
+ * Loop through Rx descriptors handling ones that have been completed.
+ */
+ MPSC_CACHE_INVALIDATE(pi, rxre, rxre + 1);
+
+ while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) & SDMA_DESC_CMDSTAT_O)){
+ bytes_in = be16_to_cpu(rxre->bytecnt);
+
+ if (unlikely((tty->flip.count + bytes_in) >= TTY_FLIPBUF_SIZE)){
+ tty->flip.work.func((void *)tty);
+
+ if ((tty->flip.count + bytes_in) >= TTY_FLIPBUF_SIZE) {
+ /* Take what we can, throw away the rest */
+ bytes_in = TTY_FLIPBUF_SIZE - tty->flip.count;
+ cmdstat &= ~SDMA_DESC_CMDSTAT_PE;
+ }
+ }
+
+ bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
+ bp_p = pi->txb_p + (pi->rxr_posn * MPSC_RXBE_SIZE);
+
+ dma_sync_single_for_cpu(pi->port.dev, bp_p, MPSC_RXBE_SIZE,
+ DMA_FROM_DEVICE);
+ MPSC_CACHE_INVALIDATE(pi, bp, bp + MPSC_RXBE_SIZE);
+
+ /*
+ * Other than for parity error, the manual provides little
+ * info on what data will be in a frame flagged by any of
+ * these errors. For parity error, it is the last byte in
+ * the buffer that had the error. As for the rest, I guess
+ * we'll assume there is no data in the buffer.
+ * If there is...it gets lost.
+ */
+ if (cmdstat & (SDMA_DESC_CMDSTAT_BR | SDMA_DESC_CMDSTAT_FR |
+ SDMA_DESC_CMDSTAT_OR)) {
+
+ pi->port.icount.rx++;
+
+ if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
+ pi->port.icount.brk++;
+
+ if (uart_handle_break(&pi->port))
+ goto next_frame;
+ }
+ else if (cmdstat & SDMA_DESC_CMDSTAT_FR) /* Framing */
+ pi->port.icount.frame++;
+ else if (cmdstat & SDMA_DESC_CMDSTAT_OR) /* Overrun */
+ pi->port.icount.overrun++;
+
+ cmdstat &= pi->port.read_status_mask;
+
+ if (!(cmdstat & pi->port.ignore_status_mask)) {
+ if (cmdstat & SDMA_DESC_CMDSTAT_BR)
+ *tty->flip.flag_buf_ptr = TTY_BREAK;
+ else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
+ *tty->flip.flag_buf_ptr = TTY_FRAME;
+ else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
+ *tty->flip.flag_buf_ptr = TTY_OVERRUN;
+
+ tty->flip.flag_buf_ptr++;
+ *tty->flip.char_buf_ptr = '\0';
+ tty->flip.char_buf_ptr++;
+ tty->flip.count++;
+ }
+ }
+ else {
+ if (uart_handle_sysrq_char(&pi->port, *bp, regs)) {
+ bp++;
+ bytes_in--;
+ }
+
+ memcpy(tty->flip.char_buf_ptr, bp, bytes_in);
+ memset(tty->flip.flag_buf_ptr, TTY_NORMAL, bytes_in);
+
+ tty->flip.char_buf_ptr += bytes_in;
+ tty->flip.flag_buf_ptr += bytes_in;
+ tty->flip.count += bytes_in;
+ pi->port.icount.rx += bytes_in;
+
+ cmdstat &= SDMA_DESC_CMDSTAT_PE;
+
+ if (cmdstat) { /* Parity */
+ pi->port.icount.parity++;
+
+ if (!(cmdstat & pi->port.read_status_mask))
+ *(tty->flip.flag_buf_ptr-1) = TTY_FRAME;
+ }
+ }
+
+next_frame:
+ dma_sync_single_for_device(pi->port.dev, bp_p,
+ MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+ rxre->bytecnt = cpu_to_be16(0);
+ wmb(); /* ensure other writes done before cmdstat update */
+ rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
+ SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F |
+ SDMA_DESC_CMDSTAT_L);
+ MPSC_CACHE_FLUSH(pi, rxre, rxre + 1);
+
+ /* Advance to next descriptor */
+ pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
+ rxre = &pi->rxr[pi->rxr_posn];
+ MPSC_CACHE_INVALIDATE(pi, rxre, rxre + 1);
+ }
+
+ /* Restart rx engine, if its stopped */
+ if ((MPSC_READ(pi, sdma, SDMA_SDCM) & SDMA_SDCM_ERD) == 0) {
+ mpsc_start_rx(pi);
+ }
+
+ tty_flip_buffer_push(tty);
+ return;
+}
+
+static inline void
+mpsc_send_tx_data(mpsc_port_info_t *pi, volatile mpsc_tx_desc_t *txre,
+ volatile mpsc_tx_desc_t *txre_p, void *bp, u32 count, u32 intr)
+{
+ dma_sync_single_for_device(pi->port.dev, be32_to_cpu(txre->buf_ptr),
+ MPSC_TXBE_SIZE, DMA_TO_DEVICE);
+ MPSC_CACHE_FLUSH(pi, bp, bp + MPSC_TXBE_SIZE);
+
+ txre->bytecnt = cpu_to_be16(count);
+ txre->shadow = txre->bytecnt;
+ wmb(); /* ensure cmdstat is last field updated */
+ txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F |
+ SDMA_DESC_CMDSTAT_L | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
+ MPSC_CACHE_FLUSH(pi, txre, txre + 1);
+
+ /* Start Tx engine, if its stopped */
+ if ((MPSC_READ(pi, sdma, SDMA_SDCM) & SDMA_SDCM_TXD) == 0) {
+ mpsc_sdma_start_tx(pi, txre_p);
+ }
+
+ return;
+}
+
+static inline void
+mpsc_tx_intr(mpsc_port_info_t *pi)
+{
+ volatile mpsc_tx_desc_t *txre = &pi->txr[pi->txr_posn];
+ volatile mpsc_tx_desc_t *txre_p = &pi->txr_p[pi->txr_posn];
+ struct circ_buf *xmit = &pi->port.info->xmit;
+ u8 *bp;
+ u32 i;
+
+ MPSC_CACHE_INVALIDATE(pi, txre, txre + 1);
+
+ while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
+ bp = &pi->txb[pi->txr_posn * MPSC_TXBE_SIZE];
+
+ dma_sync_single_for_cpu(pi->port.dev,be32_to_cpu(txre->buf_ptr),
+ MPSC_TXBE_SIZE, DMA_TO_DEVICE);
+
+ if (pi->port.x_char) {
+ /*
+ * Ideally, we should use the TCS field in CHR_1 to
+ * put the x_char out immediately but errata prevents
+ * us from being able to read CHR_2 to know that its
+ * safe to write to CHR_1. Instead, just put it
+ * in-band with all the other Tx data.
+ */
+ *bp = pi->port.x_char;
+ pi->port.x_char = 0;
+ i = 1;
+ }
+ else if (!uart_circ_empty(xmit) && !uart_tx_stopped(&pi->port)){
+ i = MIN(MPSC_TXBE_SIZE, uart_circ_chars_pending(xmit));
+ i = MIN(i, CIRC_CNT_TO_END(xmit->head, xmit->tail,
+ UART_XMIT_SIZE));
+ memcpy(bp, &xmit->buf[xmit->tail], i);
+ xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
+ }
+ else { /* No more data to transmit or tx engine is stopped */
+ MPSC_CACHE_INVALIDATE(pi, txre, txre + 1);
+ return;
+ }
+
+ mpsc_send_tx_data(pi, txre, txre_p, bp, i, 1);
+ pi->port.icount.tx += i;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&pi->port);
+
+ /* Advance to next descriptor */
+ pi->txr_posn = (pi->txr_posn + 1) & (MPSC_TXR_ENTRIES - 1);
+ txre = &pi->txr[pi->txr_posn];
+ txre_p = &pi->txr_p[pi->txr_posn];
+ MPSC_CACHE_INVALIDATE(pi, txre, txre + 1);
+ }
+
+ return;
+}
+
+/*
+ * This is the driver's interrupt handler. To avoid a race, we first clear
+ * the interrupt, then handle any completed Rx/Tx descriptors. When done
+ * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
+ */
+static irqreturn_t
+mpsc_sdma_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ mpsc_port_info_t *pi = dev_id;
+ ulong iflags;
+
+ DBG("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n", pi->port.line);
+
+ spin_lock_irqsave(&pi->port.lock, iflags);
+ mpsc_sdma_intr_ack(pi);
+ mpsc_rx_intr(pi, regs);
+ mpsc_tx_intr(pi);
+ spin_unlock_irqrestore(&pi->port.lock, iflags);
+
+ DBG("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
+ return IRQ_HANDLED;
+}
+
+/*
+ ******************************************************************************
+ *
+ * serial_core.c Interface routines
+ *
+ ******************************************************************************
+ */
+
+static uint
+_mpsc_tx_empty(mpsc_port_info_t *pi)
+{
+ return (((MPSC_READ(pi, sdma, SDMA_SDCM) & SDMA_SDCM_TXD) == 0) ?
+ TIOCSER_TEMT : 0);
+}
+
+static uint
+mpsc_tx_empty(struct uart_port *port)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ ulong iflags;
+ uint rc;
+
+ spin_lock_irqsave(&pi->port.lock, iflags);
+ rc = _mpsc_tx_empty(pi);
+ spin_unlock_irqrestore(&pi->port.lock, iflags);
+
+ return rc;
+}
+
+static void
+mpsc_set_mctrl(struct uart_port *port, uint mctrl)
+{
+ /* Have no way to set modem control lines AFAICT */
+ return;
+}
+
+static uint
+mpsc_get_mctrl(struct uart_port *port)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ u32 mflags, status;
+ ulong iflags;
+
+ spin_lock_irqsave(&pi->port.lock, iflags);
+ status = MPSC_READ_M(pi, mpsc, MPSC_CHR_10);
+ spin_unlock_irqrestore(&pi->port.lock, iflags);
+
+ mflags = 0;
+ if (status & 0x1)
+ mflags |= TIOCM_CTS;
+ if (status & 0x2)
+ mflags |= TIOCM_CAR;
+
+ return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
+}
+
+static void
+mpsc_stop_tx(struct uart_port *port, uint tty_start)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+
+ DBG("mpsc_stop_tx[%d]: tty_start: %d\n", port->line, tty_start);
+
+ mpsc_freeze(pi);
+ return;
+}
+
+static void
+mpsc_start_tx(struct uart_port *port, uint tty_start)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+
+ mpsc_unfreeze(pi);
+ mpsc_tx_intr(pi); /* Load Tx data into Tx ring bufs & go */
+
+ DBG("mpsc_start_tx[%d]: tty_start: %d\n", port->line, tty_start);
+ return;
+}
+
+static void
+mpsc_start_rx(mpsc_port_info_t *pi)
+{
+ DBG("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
+
+ if (pi->rcv_data) {
+ mb();
+ mpsc_enter_hunt(pi);
+ mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
+ }
+ return;
+}
+
+static void
+mpsc_stop_rx(struct uart_port *port)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+
+ DBG("mpsc_stop_rx[%d]: Stopping...\n", port->line);
+
+ mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
+ return;
+}
+
+static void
+mpsc_enable_ms(struct uart_port *port)
+{
+ return; /* Not supported */
+}
+
+static void
+mpsc_break_ctl(struct uart_port *port, int ctl)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ ulong flags;
+
+ spin_lock_irqsave(&pi->port.lock, flags);
+ if (ctl) {
+ /* Send as many BRK chars as we can */
+ MPSC_WRITE_M(pi, mpsc, MPSC_CHR_1, 0x00ff0000);
+ }
+ else {
+ /* Stop sending BRK chars */
+ MPSC_WRITE_M(pi, mpsc, MPSC_CHR_1, 0);
+ }
+ spin_unlock_irqrestore(&pi->port.lock, flags);
+
+ return;
+}
+
+static int
+mpsc_startup(struct uart_port *port)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ int rc;
+
+ DBG("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
+ port->line, pi->port.irq);
+
+ if ((rc = mpsc_make_ready(pi)) == 0) {
+ /* Setup IRQ handler */
+ mpsc_sdma_intr_ack(pi);
+ mpsc_sdma_intr_unmask(pi, 0xf);
+
+ if (request_irq(pi->port.irq, mpsc_sdma_intr, 0, "MPSC/SDMA",
+ pi)) {
+ printk(KERN_ERR "MPSC: Can't get SDMA IRQ");
+ printk("MPSC: Can't get SDMA IRQ %d\n", pi->port.irq);
+ }
+
+ mpsc_sdma_set_rx_ring(pi, &pi->rxr_p[pi->rxr_posn]);
+ mpsc_start_rx(pi);
+ }
+
+ return rc;
+}
+
+static void
+mpsc_shutdown(struct uart_port *port)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ static void mpsc_release_port(struct uart_port *port);
+
+ DBG("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
+
+ mpsc_sdma_stop(pi);
+ free_irq(pi->port.irq, pi);
+ return;
+}
+
+static void
+mpsc_set_termios(struct uart_port *port, struct termios *termios,
+ struct termios *old)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ u32 baud, quot;
+ ulong flags;
+ u32 chr_bits, stop_bits, par;
+
+ pi->c_iflag = termios->c_iflag;
+ pi->c_cflag = termios->c_cflag;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ chr_bits = MPSC_MPCR_CL_5;
+ break;
+ case CS6:
+ chr_bits = MPSC_MPCR_CL_6;
+ break;
+ case CS7:
+ chr_bits = MPSC_MPCR_CL_7;
+ break;
+ default:
+ case CS8:
+ chr_bits = MPSC_MPCR_CL_8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ stop_bits = MPSC_MPCR_SBL_2;
+ else
+ stop_bits = MPSC_MPCR_SBL_1;
+
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ par = MPSC_CHR_2_PAR_ODD;
+ else
+ par = MPSC_CHR_2_PAR_EVEN;
+#ifdef CMSPAR
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ par = MPSC_CHR_2_PAR_MARK;
+ else
+ par = MPSC_CHR_2_PAR_SPACE;
+ }
+#endif
+ }
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
+ quot = uart_get_divisor(port, baud);
+
+ spin_lock_irqsave(&pi->port.lock, flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ mpsc_set_char_length(pi, chr_bits);
+ mpsc_set_stop_bit_length(pi, stop_bits);
+ mpsc_set_parity(pi, par);
+ mpsc_set_baudrate(pi, baud);
+
+ /* Characters/events to read */
+ pi->rcv_data = 1;
+ pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
+
+ if (termios->c_iflag & INPCK)
+ pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE |
+ SDMA_DESC_CMDSTAT_FR;
+
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
+
+ /* Characters/events to ignore */
+ pi->port.ignore_status_mask = 0;
+
+ if (termios->c_iflag & IGNPAR)
+ pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE |
+ SDMA_DESC_CMDSTAT_FR;
+
+ if (termios->c_iflag & IGNBRK) {
+ pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
+
+ if (termios->c_iflag & IGNPAR)
+ pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
+ }
+
+ /* Ignore all chars if CREAD not set */
+ if (!(termios->c_cflag & CREAD))
+ pi->rcv_data = 0;
+
+ spin_unlock_irqrestore(&pi->port.lock, flags);
+ return;
+}
+
+static const char *
+mpsc_type(struct uart_port *port)
+{
+ DBG("mpsc_type[%d]: port type: %s\n", port->line, MPSC_DRIVER_NAME);
+ return MPSC_DRIVER_NAME;
+}
+
+static int
+mpsc_request_port(struct uart_port *port)
+{
+ /* Should make chip/platform specific call */
+ return 0;
+}
+
+static void
+mpsc_release_port(struct uart_port *port)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+
+ mpsc_uninit_rings(pi);
+ mpsc_free_ring_mem(pi);
+ pi->ready = 0;
+
+ return;
+}
+
+static void
+mpsc_config_port(struct uart_port *port, int flags)
+{
+ return;
+}
+
+static int
+mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ mpsc_port_info_t *pi = (mpsc_port_info_t *)port;
+ int rc = 0;
+
+ DBG("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
+ rc = -EINVAL;
+ if (pi->port.irq != ser->irq)
+ rc = -EINVAL;
+ if (ser->io_type != SERIAL_IO_MEM)
+ rc = -EINVAL;
+ if (pi->port.uartclk / 16 != ser->baud_base) /* XXXX Not sure */
+ rc = -EINVAL;
+ if ((void *)pi->port.mapbase != ser->iomem_base)
+ rc = -EINVAL;
+ if (pi->port.iobase != ser->port)
+ rc = -EINVAL;
+ if (ser->hub6 != 0)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static struct uart_ops mpsc_pops = {
+ .tx_empty = mpsc_tx_empty,
+ .set_mctrl = mpsc_set_mctrl,
+ .get_mctrl = mpsc_get_mctrl,
+ .stop_tx = mpsc_stop_tx,
+ .start_tx = mpsc_start_tx,
+ .stop_rx = mpsc_stop_rx,
+ .enable_ms = mpsc_enable_ms,
+ .break_ctl = mpsc_break_ctl,
+ .startup = mpsc_startup,
+ .shutdown = mpsc_shutdown,
+ .set_termios = mpsc_set_termios,
+ .type = mpsc_type,
+ .release_port = mpsc_release_port,
+ .request_port = mpsc_request_port,
+ .config_port = mpsc_config_port,
+ .verify_port = mpsc_verify_port,
+};
+
+/*
+ ******************************************************************************
+ *
+ * Console Interface Routines
+ *
+ ******************************************************************************
+ */
+
+#ifdef CONFIG_SERIAL_MPSC_CONSOLE
+static void
+mpsc_console_write(struct console *co, const char *s, uint count)
+{
+ mpsc_port_info_t *pi = &mpsc_ports[co->index];
+ volatile mpsc_tx_desc_t *txre = &pi->txr[pi->txr_posn];
+ volatile mpsc_tx_desc_t *txre_p = &pi->txr_p[pi->txr_posn];
+ u8 *bp, *dp, add_cr = 0;
+ int i;
+
+ /*
+ * Step thru tx ring one entry at a time, filling up its buf, sending
+ * the data out and moving to the next ring entry until its all out.
+ */
+ MPSC_CACHE_INVALIDATE(pi, txre, txre + 1);
+
+ while (count > 0) {
+ while (_mpsc_tx_empty(pi) != TIOCSER_TEMT);
+
+ BUG_ON(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O);
+
+ bp = dp = &pi->txb[pi->txr_posn * MPSC_TXBE_SIZE];
+
+ dma_sync_single_for_cpu(pi->port.dev,be32_to_cpu(txre->buf_ptr),
+ MPSC_TXBE_SIZE, DMA_TO_DEVICE);
+
+ for (i=0; i<MPSC_TXBE_SIZE; i++) {
+ if (count == 0)
+ break;
+
+ if (add_cr) {
+ *(dp++) = '\r';
+ add_cr = 0;
+ }
+ else {
+ *(dp++) = *s;
+
+ if (*(s++) == '\n') { /* add '\r' after '\n' */
+ add_cr = 1;
+ count++;
+ }
+ }
+
+ count--;
+ }
+
+ mpsc_send_tx_data(pi, txre, txre_p, bp, i, 0);
+
+ /* Advance to next descriptor */
+ pi->txr_posn = (pi->txr_posn + 1) & (MPSC_TXR_ENTRIES - 1);
+ txre = &pi->txr[pi->txr_posn];
+ txre_p = &pi->txr_p[pi->txr_posn];
+ MPSC_CACHE_INVALIDATE(pi, txre, txre + 1);
+ }
+
+ while (_mpsc_tx_empty(pi) != TIOCSER_TEMT);
+ return;
+}
+
+static int __init
+mpsc_console_setup(struct console *co, char *options)
+{
+ mpsc_port_info_t *pi;
+ int baud, bits, parity, flow;
+
+ DBG("mpsc_console_setup[%d]: options: %s\n", co->index, options);
+
+ if (co->index >= MPSC_NUM_CTLRS)
+ co->index = 0;
+
+ pi = &mpsc_ports[co->index];
+
+ baud = pi->default_baud;
+ bits = pi->default_bits;
+ parity = pi->default_parity;
+ flow = pi->default_flow;
+
+ if (!pi->port.ops)
+ return -ENODEV;
+
+ spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&pi->port, co, baud, parity, bits, flow);
+}
+
+extern struct uart_driver mpsc_reg;
+static struct console mpsc_console = {
+ .name = MPSC_DEV_NAME,
+ .write = mpsc_console_write,
+ .device = uart_console_device,
+ .setup = mpsc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &mpsc_reg,
+};
+
+static int __init
+mpsc_console_init(void)
+{
+ DBG("mpsc_console_init: Enter\n");
+ register_console(&mpsc_console);
+ return 0;
+}
+console_initcall(mpsc_console_init);
+
+static int __init
+mpsc_late_console_init(void)
+{
+ DBG("mpsc_late_console_init: Enter\n");
+
+ if (!(mpsc_console.flags & CON_ENABLED))
+ register_console(&mpsc_console);
+ return 0;
+}
+late_initcall(mpsc_late_console_init);
+
+#define MPSC_CONSOLE &mpsc_console
+#else
+#define MPSC_CONSOLE NULL
+#endif
+
+/*
+ ******************************************************************************
+ *
+ * Driver Interface Routines
+ *
+ ******************************************************************************
+ */
+
+static void
+mpsc_map_regs(mpsc_port_info_t *pi)
+{
+ pi->mpsc_base = (u32)ioremap(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
+ pi->mpsc_routing_base = (u32)ioremap(pi->mpsc_routing_base_p,
+ MPSC_ROUTING_REG_BLOCK_SIZE);
+ pi->sdma_base = (u32)ioremap(pi->sdma_base_p, SDMA_REG_BLOCK_SIZE);
+ pi->sdma_intr_base = (u32)ioremap(pi->sdma_intr_base_p,
+ SDMA_INTR_REG_BLOCK_SIZE);
+ pi->brg_base = (u32)ioremap(pi->brg_base_p, BRG_REG_BLOCK_SIZE);
+
+ return;
+}
+
+static void
+mpsc_unmap_regs(mpsc_port_info_t *pi)
+{
+ iounmap((void *)pi->mpsc_base);
+ iounmap((void *)pi->mpsc_routing_base);
+ iounmap((void *)pi->sdma_base);
+ iounmap((void *)pi->sdma_intr_base);
+ iounmap((void *)pi->brg_base);
+
+ pi->mpsc_base = 0;
+ pi->mpsc_routing_base = 0;
+ pi->sdma_base = 0;
+ pi->sdma_intr_base = 0;
+ pi->brg_base = 0;
+
+ return;
+}
+
+/* Called from platform specific device probe routine */
+mpsc_port_info_t *
+mpsc_device_probe(int index)
+{
+ mpsc_port_info_t *pi = NULL;
+
+ if ((index >= 0) && (index < MPSC_NUM_CTLRS))
+ pi = &mpsc_ports[index];
+
+ return pi;
+}
+
+/* Called from platform specific device remove routine */
+mpsc_port_info_t *
+mpsc_device_remove(int index)
+{
+ mpsc_port_info_t *pi = NULL;
+
+ if ((index >= 0) && (index < MPSC_NUM_CTLRS))
+ pi = &mpsc_ports[index];
+
+ return pi;
+}
+
+static struct uart_driver mpsc_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = MPSC_DRIVER_NAME,
+ .devfs_name = MPSC_DEVFS_NAME,
+ .dev_name = MPSC_DEV_NAME,
+ .major = MPSC_MAJOR,
+ .minor = MPSC_MINOR_START,
+ .nr = MPSC_NUM_CTLRS,
+ .cons = MPSC_CONSOLE,
+};
+
+static int __init
+mpsc_init(void)
+{
+ mpsc_port_info_t *pi;
+ int i, j, rc;
+
+ printk(KERN_INFO "Serial: MPSC driver $Revision: 1.00 $\n");
+
+ if ((rc = mpsc_platform_register_driver()) >= 0) {
+ if ((rc = uart_register_driver(&mpsc_reg)) < 0) {
+ mpsc_platform_unregister_driver();
+ }
+ else {
+ for (i=0; i<MPSC_NUM_CTLRS; i++) {
+ pi = &mpsc_ports[i];
+
+ pi->port.line = i;
+ pi->port.type = PORT_MPSC;
+ pi->port.fifosize = MPSC_TXBE_SIZE;
+ pi->port.membase = (char *)pi->mpsc_base;
+ pi->port.mapbase = (ulong)pi->mpsc_base;
+ pi->port.ops = &mpsc_pops;
+
+ mpsc_map_regs(pi);
+
+ if ((rc = mpsc_make_ready(pi)) >= 0) {
+ uart_add_one_port(&mpsc_reg, &pi->port);
+ }
+ else { /* on failure, undo everything */
+ for (j=0; j<i; j++) {
+ mpsc_unmap_regs(&mpsc_ports[j]);
+ uart_remove_one_port(&mpsc_reg,
+ &mpsc_ports[j].port);
+ }
+
+ uart_unregister_driver(&mpsc_reg);
+ mpsc_platform_unregister_driver();
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+static void __exit
+mpsc_exit(void)
+{
+ int i;
+
+ DBG("mpsc_exit: Exiting\n");
+
+ for (i=0; i<MPSC_NUM_CTLRS; i++) {
+ mpsc_unmap_regs(&mpsc_ports[i]);
+ uart_remove_one_port(&mpsc_reg, &mpsc_ports[i].port);
+ }
+
+ uart_unregister_driver(&mpsc_reg);
+ mpsc_platform_unregister_driver();
+
+ return;
+}
+
+int
+register_serial(struct serial_struct *req)
+{
+ return uart_register_port(&mpsc_reg, &mpsc_ports[req->line].port);
+}
+
+void
+unregister_serial(int line)
+{
+ uart_unregister_port(&mpsc_reg, line);
+ return;
+}
+
+module_init(mpsc_init);
+module_exit(mpsc_exit);
+
+EXPORT_SYMBOL(register_serial);
+EXPORT_SYMBOL(unregister_serial);
+
+MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
+MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver $Revision: 1.00 $");
+MODULE_VERSION(MPSC_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);
--- /dev/null
+/*
+ * drivers/serial/mpsc/mpsc.h
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __MPSC_H__
+#define __MPSC_H__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/serial.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+#include "mpsc_defs.h"
+
+
+/*
+ * Descriptors and buffers must be cache line aligned.
+ * Buffers lengths must be multiple of cache line size.
+ * Number of Tx & Rx descriptors must be power of 2.
+ */
+#define MPSC_DESC_ALIGN dma_get_cache_alignment()
+#define MPSC_BUF_ALIGN dma_get_cache_alignment()
+
+#define MPSC_RXR_ENTRIES 32
+#define MPSC_RXRE_SIZE sizeof(mpsc_rx_desc_t)
+#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
+#define MPSC_RXBE_SIZE dma_get_cache_alignment()
+#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
+
+#define MPSC_TXR_ENTRIES 32
+#define MPSC_TXRE_SIZE sizeof(mpsc_tx_desc_t)
+#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
+#define MPSC_TXBE_SIZE dma_get_cache_alignment()
+#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
+
+typedef struct {
+ u16 bufsize;
+ u16 bytecnt;
+ u32 cmdstat;
+ u32 link;
+ u32 buf_ptr;
+} mpsc_rx_desc_t __attribute((packed));
+
+/* Tx and Rx Ring entry descriptors */
+typedef struct {
+ u16 bytecnt;
+ u16 shadow;
+ u32 cmdstat;
+ u32 link;
+ u32 buf_ptr;
+} mpsc_tx_desc_t __attribute((packed));
+
+/* The main driver data structure */
+typedef struct {
+ struct uart_port port; /* Overlay uart_port structure */
+
+ /* Internal driver state for this ctlr */
+ u8 ready;
+ u8 rcv_data;
+ tcflag_t c_iflag; /* save termios->c_iflag */
+ tcflag_t c_cflag; /* save termios->c_cflag */
+
+ /* Info passed in from platform */
+ u8 mirror_regs; /* Need to mirror regs? */
+ u8 cache_mgmt; /* Need manual cache mgmt? */
+ u8 brg_can_tune; /* BRG has baud tuning? */
+ u32 brg_clk_src;
+ u16 mpsc_max_idle;
+ int default_baud;
+ int default_bits;
+ int default_parity;
+ int default_flow;
+
+ /* Physical addresses of various blocks of registers (from platform) */
+ u32 mpsc_base_p;
+ u32 mpsc_routing_base_p;
+ u32 sdma_base_p;
+ u32 sdma_intr_base_p;
+ u32 brg_base_p;
+
+ /* Virtual addresses of various blocks of registers (from platform) */
+ u32 mpsc_base;
+ u32 mpsc_routing_base;
+ u32 sdma_base;
+ u32 sdma_intr_base;
+ u32 brg_base;
+
+ /* Descriptor ring and buffer allocations */
+ void *desc_region; /* Region for desc rings */
+ dma_addr_t desc_region_p;
+ u32 desc_region_size;
+
+ void *buf_region; /* kmalloc region for bufs */
+ u32 buf_region_size;
+
+ mpsc_rx_desc_t *rxr; /* Rx descriptor ring */
+ mpsc_rx_desc_t *rxr_p; /* Phys addr of rxr */
+ u32 rxr_posn; /* First desc w/ Rx data */
+ u8 *rxb; /* Rx Ring I/O buf */
+ dma_addr_t rxb_p; /* Phys addr of rxb */
+
+ mpsc_tx_desc_t *txr; /* Tx descriptor ring */
+ mpsc_tx_desc_t *txr_p; /* Phys addr of txr */
+ u32 txr_posn; /* First unused desc */
+ u8 *txb; /* Tx Ring I/O buf */
+ dma_addr_t txb_p; /* Phys addr of txb */
+
+ /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
+ u32 MPSC_CHR_1_m;
+ u32 MPSC_CHR_2_m;
+ u32 MPSC_CHR_10_m;
+ u32 MPSC_MPCR_m;
+ u32 MPSC_MRR_m;
+ u32 MPSC_RCRR_m;
+ u32 MPSC_TCRR_m;
+ u32 SDMA_INTR_MASK_m;
+ u32 BRG_BCR_m;
+} mpsc_port_info_t;
+
+/*
+ * Some MPSC ctlrs have an erratum where they aren't supposed to access
+ * cache coherent memory regions. From practical experience, the erratum
+ * is not triggered as long as there isn't a snoop hit. Therefore, if
+ * the MPSC in used has this erratum and coherency is enabled on the platform,
+ * we must manually manage the cache for ring descriptors and the I/O buffers.
+ */
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+#define MPSC_CACHE_FLUSH(pi, s, e) { \
+ if (pi->cache_mgmt) { \
+ /* 64x60 erratum: can't use dcbst/clean_dcache_range() */ \
+ flush_dcache_range((ulong)s, (ulong)e); \
+ mb(); \
+ } \
+}
+
+#define MPSC_CACHE_INVALIDATE(pi, s, e) { \
+ if (pi->cache_mgmt) { \
+ invalidate_dcache_range((ulong)s, (ulong)e); \
+ mb(); \
+ } \
+}
+
+#define MPSC_CACHE_FLUSH_INVALIDATE(pi, s, e) { \
+ if (pi->cache_mgmt) { \
+ flush_dcache_range((ulong)s, (ulong)e); \
+ mb(); \
+ } \
+}
+#else
+#define MPSC_CACHE_FLUSH(pi, s, e)
+#define MPSC_CACHE_INVALIDATE(pi, s, e)
+#define MPSC_CACHE_FLUSH_INVALIDATE(pi, s, e)
+#endif
+
+/*
+ * 'MASK_INSERT' takes the low-order 'n' bits of 'i', shifts it 'b' bits to
+ * the left, and inserts it into the target 't'. The corresponding bits in
+ * 't' will have been cleared before the bits in 'i' are inserted.
+ */
+#ifdef CONFIG_PPC32
+#define MASK_INSERT(t, i, n, b) ({ \
+ u32 rval = (t); \
+ __asm__ __volatile__( \
+ "rlwimi %0,%2,%4,32-(%3+%4),31-%4\n" \
+ : "=r" (rval) \
+ : "0" (rval), "r" (i), "i" (n), "i" (b)); \
+ rval; \
+})
+#else
+/* These macros are really just examples. Feel free to change them --MAG */
+#define GEN_MASK(n, b) \
+({ \
+ u32 m, sl, sr; \
+ sl = 32 - (n); \
+ sr = sl - (b); \
+ m = (0xffffffff << sl) >> sr; \
+})
+
+#define MASK_INSERT(t, i, n, b) \
+({ \
+ u32 m, rval = (t); \
+ m = GEN_MASK((n), (b)); \
+ rval &= ~m; \
+ rval |= (((i) << (b)) & m); \
+})
+#endif
+
+/* I/O macros for regs that you can read */
+#define MPSC_READ(pi, unit, offset) readl((pi)->unit##_base + (offset))
+#define MPSC_WRITE(pi, unit, offset, v) writel(v, (pi)->unit##_base + (offset))
+#define MPSC_MOD_FIELD(pi, unit, offset, num_bits, shift, val) \
+{ \
+ u32 v; \
+ v = readl((pi)->unit##_base + (offset)); \
+ writel(MASK_INSERT(v,val,num_bits,shift), (pi)->unit##_base+(offset));\
+}
+
+#define MPSC_READ_M(pi, unit, offset) \
+({ \
+ u32 v; \
+ if ((pi)->mirror_regs) v = (pi)->offset##_m; \
+ else v = readl((pi)->unit##_base + (offset)); \
+ v; \
+})
+
+#define MPSC_WRITE_M(pi, unit, offset, v) \
+({ \
+ if ((pi)->mirror_regs) (pi)->offset##_m = v; \
+ writel(v, (pi)->unit##_base + (offset)); \
+})
+
+#define MPSC_MOD_FIELD_M(pi, unit, offset, num_bits, shift, val) \
+({ \
+ u32 v; \
+ if ((pi)->mirror_regs) v = (pi)->offset##_m; \
+ else v = readl((pi)->unit##_base + (offset)); \
+ v = MASK_INSERT(v, val, num_bits, shift); \
+ if ((pi)->mirror_regs) (pi)->offset##_m = v; \
+ writel(v, (pi)->unit##_base + (offset)); \
+})
+
+#if !defined(MIN)
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Hooks to platform-specific code */
+int mpsc_platform_register_driver(void);
+void mpsc_platform_unregister_driver(void);
+
+/* Hooks back in to mpsc common to be called by platform-specific code */
+mpsc_port_info_t *mpsc_device_probe(int index);
+mpsc_port_info_t *mpsc_device_remove(int index);
+
+#endif /* __MPSC_H__ */
--- /dev/null
+/*
+ * drivers/serial/mpsc/mpsc_defs.h
+ *
+ * Register definitions for the Marvell Multi-Protocol Serial Controller (MPSC),
+ * Serial DMA Controller (SDMA), and Baud Rate Generator (BRG).
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __MPSC_DEFS_H__
+#define __MPSC_DEFS_H__
+
+#define MPSC_NUM_CTLRS 2
+
+
+/*
+ *****************************************************************************
+ *
+ * Multi-Protocol Serial Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+/* Main Configuratino Register Offsets */
+#define MPSC_MMCRL 0x0000
+#define MPSC_MMCRH 0x0004
+#define MPSC_MPCR 0x0008
+#define MPSC_CHR_1 0x000c
+#define MPSC_CHR_2 0x0010
+#define MPSC_CHR_3 0x0014
+#define MPSC_CHR_4 0x0018
+#define MPSC_CHR_5 0x001c
+#define MPSC_CHR_6 0x0020
+#define MPSC_CHR_7 0x0024
+#define MPSC_CHR_8 0x0028
+#define MPSC_CHR_9 0x002c
+#define MPSC_CHR_10 0x0030
+#define MPSC_CHR_11 0x0034
+#define MPSC_REG_BLOCK_SIZE 0x0038
+
+
+#define MPSC_MPCR_CL_5 0
+#define MPSC_MPCR_CL_6 1
+#define MPSC_MPCR_CL_7 2
+#define MPSC_MPCR_CL_8 3
+#define MPSC_MPCR_SBL_1 0
+#define MPSC_MPCR_SBL_2 3
+
+#define MPSC_CHR_2_TEV (1<<1)
+#define MPSC_CHR_2_TA (1<<7)
+#define MPSC_CHR_2_TTCS (1<<9)
+#define MPSC_CHR_2_REV (1<<17)
+#define MPSC_CHR_2_RA (1<<23)
+#define MPSC_CHR_2_CRD (1<<25)
+#define MPSC_CHR_2_EH (1<<31)
+#define MPSC_CHR_2_PAR_ODD 0
+#define MPSC_CHR_2_PAR_SPACE 1
+#define MPSC_CHR_2_PAR_EVEN 2
+#define MPSC_CHR_2_PAR_MARK 3
+
+/* MPSC Signal Routing */
+#define MPSC_MRR 0x0000
+#define MPSC_RCRR 0x0004
+#define MPSC_TCRR 0x0008
+#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c
+
+/*
+ *****************************************************************************
+ *
+ * Serial DMA Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define SDMA_SDC 0x0000
+#define SDMA_SDCM 0x0008
+#define SDMA_RX_DESC 0x0800
+#define SDMA_RX_BUF_PTR 0x0808
+#define SDMA_SCRDP 0x0810
+#define SDMA_TX_DESC 0x0c00
+#define SDMA_SCTDP 0x0c10
+#define SDMA_SFTDP 0x0c14
+#define SDMA_REG_BLOCK_SIZE 0x0c18
+
+#define SDMA_DESC_CMDSTAT_PE (1<<0)
+#define SDMA_DESC_CMDSTAT_CDL (1<<1)
+#define SDMA_DESC_CMDSTAT_FR (1<<3)
+#define SDMA_DESC_CMDSTAT_OR (1<<6)
+#define SDMA_DESC_CMDSTAT_BR (1<<9)
+#define SDMA_DESC_CMDSTAT_MI (1<<10)
+#define SDMA_DESC_CMDSTAT_A (1<<11)
+#define SDMA_DESC_CMDSTAT_AM (1<<12)
+#define SDMA_DESC_CMDSTAT_CT (1<<13)
+#define SDMA_DESC_CMDSTAT_C (1<<14)
+#define SDMA_DESC_CMDSTAT_ES (1<<15)
+#define SDMA_DESC_CMDSTAT_L (1<<16)
+#define SDMA_DESC_CMDSTAT_F (1<<17)
+#define SDMA_DESC_CMDSTAT_P (1<<18)
+#define SDMA_DESC_CMDSTAT_EI (1<<23)
+#define SDMA_DESC_CMDSTAT_O (1<<31)
+
+#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O | \
+ SDMA_DESC_CMDSTAT_EI)
+
+#define SDMA_SDC_RFT (1<<0)
+#define SDMA_SDC_SFM (1<<1)
+#define SDMA_SDC_BLMR (1<<6)
+#define SDMA_SDC_BLMT (1<<7)
+#define SDMA_SDC_POVR (1<<8)
+#define SDMA_SDC_RIFB (1<<9)
+
+#define SDMA_SDCM_ERD (1<<7)
+#define SDMA_SDCM_AR (1<<15)
+#define SDMA_SDCM_STD (1<<16)
+#define SDMA_SDCM_TXD (1<<23)
+#define SDMA_SDCM_AT (1<<31)
+
+#define SDMA_0_CAUSE_RXBUF (1<<0)
+#define SDMA_0_CAUSE_RXERR (1<<1)
+#define SDMA_0_CAUSE_TXBUF (1<<2)
+#define SDMA_0_CAUSE_TXEND (1<<3)
+#define SDMA_1_CAUSE_RXBUF (1<<8)
+#define SDMA_1_CAUSE_RXERR (1<<9)
+#define SDMA_1_CAUSE_TXBUF (1<<10)
+#define SDMA_1_CAUSE_TXEND (1<<11)
+
+#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR | \
+ SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
+#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND | \
+ SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
+
+/* SDMA Interrupt registers */
+#define SDMA_INTR_CAUSE 0x0000
+#define SDMA_INTR_MASK 0x0080
+#define SDMA_INTR_REG_BLOCK_SIZE 0x0084
+
+/*
+ *****************************************************************************
+ *
+ * Baud Rate Generator Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define BRG_BCR 0x0000
+#define BRG_BTR 0x0004
+#define BRG_REG_BLOCK_SIZE 0x0008
+
+#endif /*__MPSC_DEFS_H__ */
--- /dev/null
+/*
+ * drivers/serial/mpsc/mpsc_ppc32.c
+ *
+ * Middle layer that sucks data from the ppc32 OCP--that is, chip &
+ * platform-specific data--and puts it into the mpsc_port_info_t structure
+ * for the mpsc driver to use.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include "mpsc.h"
+#include <asm/ocp.h>
+#include <asm/mv64x60.h>
+
+static void mpsc_ocp_remove(struct ocp_device *ocpdev);
+
+static int
+mpsc_ocp_probe(struct ocp_device *ocpdev)
+{
+ mpsc_port_info_t *pi;
+ mv64x60_ocp_mpsc_data_t *dp;
+ u32 base;
+ int rc = -ENODEV;
+
+ if ((pi = mpsc_device_probe(ocpdev->def->index)) != NULL) {
+ dp = (mv64x60_ocp_mpsc_data_t *)ocpdev->def->additions;
+
+ pi->mpsc_base_p = ocpdev->def->paddr;
+
+ if (ocpdev->def->index == 0) {
+ base = pi->mpsc_base_p - MV64x60_MPSC_0_OFFSET;
+ pi->sdma_base_p = base + MV64x60_SDMA_0_OFFSET;
+ pi->brg_base_p = base + MV64x60_BRG_0_OFFSET;
+ }
+ else { /* Must be 1 */
+ base = pi->mpsc_base_p - MV64x60_MPSC_1_OFFSET;
+ pi->sdma_base_p = base + MV64x60_SDMA_1_OFFSET;
+ pi->brg_base_p = base + MV64x60_BRG_1_OFFSET;
+ }
+
+ pi->mpsc_routing_base_p = base + MV64x60_MPSC_ROUTING_OFFSET;
+ pi->sdma_intr_base_p = base + MV64x60_SDMA_INTR_OFFSET;
+
+ pi->port.irq = dp->sdma_irq;
+ pi->port.uartclk = dp->brg_clk_freq;
+
+ pi->mirror_regs = dp->mirror_regs;
+ pi->cache_mgmt = dp->cache_mgmt;
+ pi->brg_can_tune = dp->brg_can_tune;
+ pi->brg_clk_src = dp->brg_clk_src;
+ pi->mpsc_max_idle = dp->max_idle;
+ pi->default_baud = dp->default_baud;
+ pi->default_bits = dp->default_bits;
+ pi->default_parity = dp->default_parity;
+ pi->default_flow = dp->default_flow;
+
+ /* Initial values of mirrored regs */
+ pi->MPSC_CHR_1_m = dp->chr_1_val;
+ pi->MPSC_CHR_2_m = dp->chr_2_val;
+ pi->MPSC_CHR_10_m = dp->chr_10_val;
+ pi->MPSC_MPCR_m = dp->mpcr_val;
+ pi->MPSC_MRR_m = dp->mrr_val;
+ pi->MPSC_RCRR_m = dp->rcrr_val;
+ pi->MPSC_TCRR_m = dp->tcrr_val;
+ pi->SDMA_INTR_MASK_m = dp->intr_mask_val;
+ pi->BRG_BCR_m = dp->bcr_val;
+
+ pi->port.iotype = UPIO_MEM;
+
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void
+mpsc_ocp_remove(struct ocp_device *ocpdev)
+{
+ (void)mpsc_device_remove(ocpdev->def->index);
+ return;
+}
+
+static struct ocp_device_id mpsc_ocp_ids[] = {
+ {.vendor = OCP_VENDOR_MARVELL, .function = OCP_FUNC_MPSC},
+ {.vendor = OCP_VENDOR_INVALID}
+};
+
+static struct ocp_driver mpsc_ocp_driver = {
+ .name = "mpsc",
+ .id_table = mpsc_ocp_ids,
+ .probe = mpsc_ocp_probe,
+ .remove = mpsc_ocp_remove,
+};
+
+int
+mpsc_platform_register_driver(void)
+{
+ return ocp_register_driver(&mpsc_ocp_driver);
+}
+
+void
+mpsc_platform_unregister_driver(void)
+{
+ ocp_unregister_driver(&mpsc_ocp_driver);
+ return;
+}
if (tty->flip.count >= TTY_FLIPBUF_SIZE)
drop = 1;
if (ZS_IS_ASLEEP(uap))
- return NULL;
+ return 0;
if (!ZS_IS_OPEN(uap))
goto retry;
}
ioremap(np->addrs[np->n_addrs - 1].address, 0x1000);
if (uap->rx_dma_regs == NULL) {
iounmap((void *)uap->tx_dma_regs);
- uap->tx_dma_regs = NULL;
uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
goto no_dma;
}
uap->port.ops = &pmz_pops;
uap->port.type = PORT_PMAC_ZILOG;
uap->port.flags = 0;
+ spin_lock_init(&uap->port.lock);
/* Setup some valid baud rate information in the register
* shadows so we don't write crap there before baud rate is
{
struct device_node *np;
- np = uap->node;
- iounmap((void *)uap->rx_dma_regs);
- iounmap((void *)uap->tx_dma_regs);
iounmap((void *)uap->control_reg);
+ np = uap->node;
uap->node = NULL;
of_node_put(np);
- memset(uap, 0, sizeof(struct uart_pmac_port));
}
/*
* Register this driver with the serial core
*/
rc = uart_register_driver(&pmz_uart_reg);
- if (rc)
+ if (rc != 0)
return rc;
/*
struct uart_pmac_port *uport = &pmz_ports[i];
/* NULL node may happen on wallstreet */
if (uport->node != NULL)
- rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
- if (rc)
- goto err_out;
+ uart_add_one_port(&pmz_uart_reg, &uport->port);
}
return 0;
-err_out:
- while (i-- > 0) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
- }
- uart_unregister_driver(&pmz_uart_reg);
- return rc;
}
static struct of_match pmz_match[] =
static int __init init_pmz(void)
{
- int rc, i;
printk(KERN_INFO "%s\n", version);
/*
/*
* Now we register with the serial layer
*/
- rc = pmz_register();
- if (rc) {
- printk(KERN_ERR
- "pmac_zilog: Error registering serial device, disabling pmac_zilog.\n"
- "pmac_zilog: Did another serial driver already claim the minors?\n");
- /* effectively "pmz_unprobe()" */
- for (i=0; i < pmz_ports_count; i++)
- pmz_dispose_port(&pmz_ports[i]);
- return rc;
- }
+ pmz_register();
/*
* Then we register the macio driver itself
+++ /dev/null
-/*
- * C-Brick Serial Port (and console) driver for SGI Altix machines.
- *
- * This driver is NOT suitable for talking to the l1-controller for
- * anything other than 'console activities' --- please use the l1
- * driver for that.
- *
- *
- * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/config.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/console.h>
-#include <linux/module.h>
-#include <linux/sysrq.h>
-#include <linux/circ_buf.h>
-#include <linux/serial_reg.h>
-#include <linux/delay.h> /* for mdelay */
-#include <linux/miscdevice.h>
-#include <linux/serial_core.h>
-
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn2/sn_private.h>
-#include <asm/sn/sn_sal.h>
-
-/* number of characters we can transmit to the SAL console at a time */
-#define SN_SAL_MAX_CHARS 120
-
-/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
- * avoid losing chars, (always has to be a power of 2) */
-#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
-
-#define SN_SAL_UART_FIFO_DEPTH 16
-#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10
-
-/* sn_transmit_chars() calling args */
-#define TRANSMIT_BUFFERED 0
-#define TRANSMIT_RAW 1
-
-/* To use dynamic numbers only and not use the assigned major and minor,
- * define the following.. */
-/* #define USE_DYNAMIC_MINOR 1 */ /* use dynamic minor number */
-#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
-
-/* Device name we're using */
-#define DEVICE_NAME "ttySG"
-#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
-/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */
-#define DEVICE_MAJOR 204
-#define DEVICE_MINOR 40
-
-/*
- * Port definition - this kinda drives it all
- */
-struct sn_cons_port {
- struct timer_list sc_timer;
- struct uart_port sc_port;
- struct sn_sal_ops {
- int (*sal_puts_raw) (const char *s, int len);
- int (*sal_puts) (const char *s, int len);
- int (*sal_getc) (void);
- int (*sal_input_pending) (void);
- void (*sal_wakeup_transmit) (struct sn_cons_port *, int);
- } *sc_ops;
- unsigned long sc_interrupt_timeout;
- int sc_is_asynch;
-};
-
-static struct sn_cons_port sal_console_port;
-
-/* Only used if USE_DYNAMIC_MINOR is set to 1 */
-static struct miscdevice misc; /* used with misc_register for dynamic */
-
-extern u64 master_node_bedrock_address;
-extern void early_sn_setup(void);
-
-#undef DEBUG
-#ifdef DEBUG
-static int sn_debug_printf(const char *fmt, ...);
-#define DPRINTF(x...) sn_debug_printf(x)
-#else
-#define DPRINTF(x...) do { } while (0)
-#endif
-
-/* Prototypes */
-static int snt_hw_puts_raw(const char *, int);
-static int snt_hw_puts_buffered(const char *, int);
-static int snt_poll_getc(void);
-static int snt_poll_input_pending(void);
-static int snt_sim_puts(const char *, int);
-static int snt_sim_getc(void);
-static int snt_sim_input_pending(void);
-static int snt_intr_getc(void);
-static int snt_intr_input_pending(void);
-static void sn_transmit_chars(struct sn_cons_port *, int);
-
-/* A table for polling:
- */
-static struct sn_sal_ops poll_ops = {
- .sal_puts_raw = snt_hw_puts_raw,
- .sal_puts = snt_hw_puts_raw,
- .sal_getc = snt_poll_getc,
- .sal_input_pending = snt_poll_input_pending
-};
-
-/* A table for the simulator */
-static struct sn_sal_ops sim_ops = {
- .sal_puts_raw = snt_sim_puts,
- .sal_puts = snt_sim_puts,
- .sal_getc = snt_sim_getc,
- .sal_input_pending = snt_sim_input_pending
-};
-
-/* A table for interrupts enabled */
-static struct sn_sal_ops intr_ops = {
- .sal_puts_raw = snt_hw_puts_raw,
- .sal_puts = snt_hw_puts_buffered,
- .sal_getc = snt_intr_getc,
- .sal_input_pending = snt_intr_input_pending,
- .sal_wakeup_transmit = sn_transmit_chars
-};
-
-/* the console does output in two distinctly different ways:
- * synchronous (raw) and asynchronous (buffered). initally, early_printk
- * does synchronous output. any data written goes directly to the SAL
- * to be output (incidentally, it is internally buffered by the SAL)
- * after interrupts and timers are initialized and available for use,
- * the console init code switches to asynchronous output. this is
- * also the earliest opportunity to begin polling for console input.
- * after console initialization, console output and tty (serial port)
- * output is buffered and sent to the SAL asynchronously (either by
- * timer callback or by UART interrupt) */
-
-
-/* routines for running the console in polling mode */
-
-/**
- * snt_poll_getc - Get a character from the console in polling mode
- *
- */
-static int
-snt_poll_getc(void)
-{
- int ch;
-
- ia64_sn_console_getc(&ch);
- return ch;
-}
-
-/**
- * snt_poll_input_pending - Check if any input is waiting - polling mode.
- *
- */
-static int
-snt_poll_input_pending(void)
-{
- int status, input;
-
- status = ia64_sn_console_check(&input);
- return !status && input;
-}
-
-/* routines for running the console on the simulator */
-
-/**
- * snt_sim_puts - send to the console, used in simulator mode
- * @str: String to send
- * @count: length of string
- *
- */
-static int
-snt_sim_puts(const char *str, int count)
-{
- int counter = count;
-
-#ifdef FLAG_DIRECT_CONSOLE_WRITES
- /* This is an easy way to pre-pend the output to know whether the output
- * was done via sal or directly */
- writeb('[', master_node_bedrock_address + (UART_TX << 3));
- writeb('+', master_node_bedrock_address + (UART_TX << 3));
- writeb(']', master_node_bedrock_address + (UART_TX << 3));
- writeb(' ', master_node_bedrock_address + (UART_TX << 3));
-#endif /* FLAG_DIRECT_CONSOLE_WRITES */
- while (counter > 0) {
- writeb(*str, master_node_bedrock_address + (UART_TX << 3));
- counter--;
- str++;
- }
- return count;
-}
-
-/**
- * snt_sim_getc - Get character from console in simulator mode
- *
- */
-static int
-snt_sim_getc(void)
-{
- return readb(master_node_bedrock_address + (UART_RX << 3));
-}
-
-/**
- * snt_sim_input_pending - Check if there is input pending in simulator mode
- *
- */
-static int
-snt_sim_input_pending(void)
-{
- return readb(master_node_bedrock_address +
- (UART_LSR << 3)) & UART_LSR_DR;
-}
-
-/* routines for an interrupt driven console (normal) */
-
-/**
- * snt_intr_getc - Get a character from the console, interrupt mode
- *
- */
-static int
-snt_intr_getc(void)
-{
- return ia64_sn_console_readc();
-}
-
-/**
- * snt_intr_input_pending - Check if input is pending, interrupt mode
- *
- */
-static int
-snt_intr_input_pending(void)
-{
- return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
-}
-
-/* these functions are polled and interrupt */
-
-/**
- * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode
- * @s: String
- * @len: Length
- *
- */
-static int
-snt_hw_puts_raw(const char *s, int len)
-{
- /* this will call the PROM and not return until this is done */
- return ia64_sn_console_putb(s, len);
-}
-
-/**
- * snt_hw_puts_buffered - Send string to console, polled or interrupt mode
- * @s: String
- * @len: Length
- *
- */
-static int
-snt_hw_puts_buffered(const char *s, int len)
-{
- /* queue data to the PROM */
- return ia64_sn_console_xmit_chars((char *)s, len);
-}
-
-/* uart interface structs
- * These functions are associated with the uart_port that the serial core
- * infrastructure calls.
- *
- * Note: Due to how the console works, many routines are no-ops.
- */
-
-/**
- * snp_type - What type of console are we?
- * @port: Port to operate with (we ignore since we only have one port)
- *
- */
-static const char *
-snp_type(struct uart_port *port)
-{
- return ("SGI SN L1");
-}
-
-/**
- * snp_tx_empty - Is the transmitter empty? We pretend we're always empty
- * @port: Port to operate on (we ignore since we only have one port)
- *
- */
-static unsigned int
-snp_tx_empty(struct uart_port *port)
-{
- return 1;
-}
-
-/**
- * snp_stop_tx - stop the transmitter - no-op for us
- * @port: Port to operat eon - we ignore - no-op function
- * @tty_stop: Set to 1 if called via uart_stop
- *
- */
-static void
-snp_stop_tx(struct uart_port *port, unsigned int tty_stop)
-{
-}
-
-/**
- * snp_release_port - Free i/o and resources for port - no-op for us
- * @port: Port to operate on - we ignore - no-op function
- *
- */
-static void
-snp_release_port(struct uart_port *port)
-{
-}
-
-/**
- * snp_enable_ms - Force modem status interrupts on - no-op for us
- * @port: Port to operate on - we ignore - no-op function
- *
- */
-static void
-snp_enable_ms(struct uart_port *port)
-{
-}
-
-/**
- * snp_shutdown - shut down the port - free irq and disable - no-op for us
- * @port: Port to shut down - we ignore
- *
- */
-static void
-snp_shutdown(struct uart_port *port)
-{
-}
-
-/**
- * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console
- * @port: Port to operate on - we ignore
- * @mctrl: Lines to set/unset - we ignore
- *
- */
-static void
-snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-}
-
-/**
- * snp_get_mctrl - get contorl line info, we just return a static value
- * @port: port to operate on - we only have one port so we ignore this
- *
- */
-static unsigned int
-snp_get_mctrl(struct uart_port *port)
-{
- return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS;
-}
-
-/**
- * snp_stop_rx - Stop the receiver - we ignor ethis
- * @port: Port to operate on - we ignore
- *
- */
-static void
-snp_stop_rx(struct uart_port *port)
-{
-}
-
-/**
- * snp_start_tx - Start transmitter
- * @port: Port to operate on
- * @tty_stop: Set to 1 if called via uart_start
- *
- */
-static void
-snp_start_tx(struct uart_port *port, unsigned int tty_stop)
-{
- if (sal_console_port.sc_ops->sal_wakeup_transmit)
- sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, TRANSMIT_BUFFERED);
-
-}
-
-/**
- * snp_break_ctl - handle breaks - ignored by us
- * @port: Port to operate on
- * @break_state: Break state
- *
- */
-static void
-snp_break_ctl(struct uart_port *port, int break_state)
-{
-}
-
-/**
- * snp_startup - Start up the serial port - always return 0 (We're always on)
- * @port: Port to operate on
- *
- */
-static int
-snp_startup(struct uart_port *port)
-{
- return 0;
-}
-
-/**
- * snp_set_termios - set termios stuff - we ignore these
- * @port: port to operate on
- * @termios: New settings
- * @termios: Old
- *
- */
-static void
-snp_set_termios(struct uart_port *port, struct termios *termios,
- struct termios *old)
-{
-}
-
-/**
- * snp_request_port - allocate resources for port - ignored by us
- * @port: port to operate on
- *
- */
-static int
-snp_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-/**
- * snp_config_port - allocate resources, set up - we ignore, we're always on
- * @port: Port to operate on
- * @flags: flags used for port setup
- *
- */
-static void
-snp_config_port(struct uart_port *port, int flags)
-{
-}
-
-/* Associate the uart functions above - given to serial core */
-
-static struct uart_ops sn_console_ops = {
- .tx_empty = snp_tx_empty,
- .set_mctrl = snp_set_mctrl,
- .get_mctrl = snp_get_mctrl,
- .stop_tx = snp_stop_tx,
- .start_tx = snp_start_tx,
- .stop_rx = snp_stop_rx,
- .enable_ms = snp_enable_ms,
- .break_ctl = snp_break_ctl,
- .startup = snp_startup,
- .shutdown = snp_shutdown,
- .set_termios = snp_set_termios,
- .pm = NULL,
- .type = snp_type,
- .release_port = snp_release_port,
- .request_port = snp_request_port,
- .config_port = snp_config_port,
- .verify_port = NULL,
-};
-
-/* End of uart struct functions and defines */
-
-#ifdef DEBUG
-
-/**
- * sn_debug_printf - close to hardware debugging printf
- * @fmt: printf format
- *
- * This is as "close to the metal" as we can get, used when the driver
- * itself may be broken.
- *
- */
-static int
-sn_debug_printf(const char *fmt, ...)
-{
- static char printk_buf[1024];
- int printed_len;
- va_list args;
-
- va_start(args, fmt);
- printed_len = vsnprintf(printk_buf, sizeof (printk_buf), fmt, args);
-
- if (!sal_console_port.sc_ops) {
- if (IS_RUNNING_ON_SIMULATOR())
- sal_console_port.sc_ops = &sim_ops;
- else
- sal_console_port.sc_ops = &poll_ops;
-
- early_sn_setup();
- }
- sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len);
-
- va_end(args);
- return printed_len;
-}
-#endif /* DEBUG */
-
-/*
- * Interrupt handling routines.
- */
-
-
-/**
- * sn_receive_chars - Grab characters, pass them to tty layer
- * @port: Port to operate on
- * @regs: Saved registers (needed by uart_handle_sysrq_char)
- *
- * Note: If we're not registered with the serial core infrastructure yet,
- * we don't try to send characters to it...
- *
- */
-static void
-sn_receive_chars(struct sn_cons_port *port, struct pt_regs *regs)
-{
- int ch;
- struct tty_struct *tty;
-
- if (!port) {
- printk(KERN_ERR "sn_receive_chars - port NULL so can't receieve\n");
- return;
- }
-
- if (!port->sc_ops) {
- printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receieve\n");
- return;
- }
-
- if (port->sc_port.info) {
- /* The serial_core stuffs are initilized, use them */
- tty = port->sc_port.info->tty;
- }
- else {
- /* Not registered yet - can't pass to tty layer. */
- tty = NULL;
- }
-
- while (port->sc_ops->sal_input_pending()) {
- ch = port->sc_ops->sal_getc();
- if (ch < 0) {
- printk(KERN_ERR "sn_console: An error occured while "
- "obtaining data from the console (0x%0x)\n", ch);
- break;
- }
-#if defined(CONFIG_SERIAL_SGI_L1_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
- if (uart_handle_sysrq_char(&port->sc_port, ch, regs))
- continue;
-#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE && CONFIG_MAGIC_SYSRQ */
-
- /* record the character to pass up to the tty layer */
- if (tty) {
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- if (tty->flip.count == TTY_FLIPBUF_SIZE)
- break;
- }
- else {
- }
- port->sc_port.icount.rx++;
- }
-
- if (tty)
- tty_flip_buffer_push(tty);
-}
-
-/**
- * sn_transmit_chars - grab characters from serial core, send off
- * @port: Port to operate on
- * @raw: Transmit raw or buffered
- *
- * Note: If we're early, before we're registered with serial core, the
- * writes are going through sn_sal_console_write because that's how
- * register_console has been set up. We currently could have asynch
- * polls calling this function due to sn_sal_switch_to_asynch but we can
- * ignore them until we register with the serial core stuffs.
- *
- */
-static void
-sn_transmit_chars(struct sn_cons_port *port, int raw)
-{
- int xmit_count, tail, head, loops, ii;
- int result;
- char *start;
- struct circ_buf *xmit;
-
- if (!port)
- return;
-
- BUG_ON(!port->sc_is_asynch);
-
- if (port->sc_port.info) {
- /* We're initilized, using serial core infrastructure */
- xmit = &port->sc_port.info->xmit;
- }
- else {
- /* Probably sn_sal_switch_to_asynch has been run but serial core isn't
- * initilized yet. Just return. Writes are going through
- * sn_sal_console_write (due to register_console) at this time.
- */
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) {
- /* Nothing to do. */
- return;
- }
-
- head = xmit->head;
- tail = xmit->tail;
- start = &xmit->buf[tail];
-
- /* twice around gets the tail to the end of the buffer and
- * then to the head, if needed */
- loops = (head < tail) ? 2 : 1;
-
- for (ii = 0; ii < loops; ii++) {
- xmit_count = (head < tail) ?
- (UART_XMIT_SIZE - tail) : (head - tail);
-
- if (xmit_count > 0) {
- if (raw == TRANSMIT_RAW)
- result =
- port->sc_ops->sal_puts_raw(start,
- xmit_count);
- else
- result =
- port->sc_ops->sal_puts(start, xmit_count);
-#ifdef DEBUG
- if (!result)
- DPRINTF("`");
-#endif
- if (result > 0) {
- xmit_count -= result;
- port->sc_port.icount.tx += result;
- tail += result;
- tail &= UART_XMIT_SIZE - 1;
- xmit->tail = tail;
- start = &xmit->buf[tail];
- }
- }
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&port->sc_port);
-
- if (uart_circ_empty(xmit))
- snp_stop_tx(&port->sc_port, 0); /* no-op for us */
-}
-
-/**
- * sn_sal_interrupt - Handle console interrupts
- * @irq: irq #, useful for debug statements
- * @dev_id: our pointer to our port (sn_cons_port which contains the uart port)
- * @regs: Saved registers, used by sn_receive_chars for uart_handle_sysrq_char
- *
- */
-static irqreturn_t
-sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct sn_cons_port *port = (struct sn_cons_port *) dev_id;
- unsigned long flags;
- int status = ia64_sn_console_intr_status();
-
- if (!port)
- return IRQ_NONE;
-
- spin_lock_irqsave(&port->sc_port.lock, flags);
- if (status & SAL_CONSOLE_INTR_RECV) {
- sn_receive_chars(port, regs);
- }
- if (status & SAL_CONSOLE_INTR_XMIT) {
- sn_transmit_chars(port, TRANSMIT_BUFFERED);
- }
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- return IRQ_HANDLED;
-}
-
-/**
- * sn_sal_connect_interrupt - Request interrupt, handled by sn_sal_interrupt
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * returns the console irq if interrupt is successfully registered, else 0
- *
- */
-static int
-sn_sal_connect_interrupt(struct sn_cons_port *port)
-{
- if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt, SA_INTERRUPT,
- "SAL console driver", port) >= 0) {
- return SGI_UART_VECTOR;
- }
-
- printk(KERN_INFO "sn_console: console proceeding in polled mode\n");
- return 0;
-}
-
-/**
- * sn_sal_timer_poll - this function handles polled console mode
- * @data: A pointer to our sn_cons_port (which contains the uart port)
- *
- * data is the pointer that init_timer will store for us. This function is
- * associated with init_timer to see if there is any console traffic.
- * Obviously not used in interrupt mode
- *
- */
-static void
-sn_sal_timer_poll(unsigned long data)
-{
- struct sn_cons_port *port = (struct sn_cons_port *) data;
- unsigned long flags;
-
- if (!port)
- return;
-
- if (!port->sc_port.irq) {
- spin_lock_irqsave(&port->sc_port.lock, flags);
- sn_receive_chars(port, NULL);
- sn_transmit_chars(port, TRANSMIT_RAW);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- mod_timer(&port->sc_timer,
- jiffies + port->sc_interrupt_timeout);
- }
-}
-
-/*
- * Boot-time initialization code
- */
-
-/**
- * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch)
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * So this is used by sn_sal_serial_console_init (early on, before we're
- * registered with serial core). It's also used by sn_sal_module_init
- * right after we've registered with serial core. The later only happens
- * if we didn't already come through here via sn_sal_serial_console_init.
- *
- */
-static void __init
-sn_sal_switch_to_asynch(struct sn_cons_port *port)
-{
- unsigned long flags;
-
- if (!port)
- return;
-
- DPRINTF("sn_console: about to switch to asynchronous console\n");
-
- /* without early_printk, we may be invoked late enough to race
- * with other cpus doing console IO at this point, however
- * console interrupts will never be enabled */
- spin_lock_irqsave(&port->sc_port.lock, flags);
-
- /* early_printk invocation may have done this for us */
- if (!port->sc_ops) {
- if (IS_RUNNING_ON_SIMULATOR())
- port->sc_ops = &sim_ops;
- else
- port->sc_ops = &poll_ops;
- }
-
- /* we can't turn on the console interrupt (as request_irq
- * calls kmalloc, which isn't set up yet), so we rely on a
- * timer to poll for input and push data from the console
- * buffer.
- */
- init_timer(&port->sc_timer);
- port->sc_timer.function = sn_sal_timer_poll;
- port->sc_timer.data = (unsigned long) port;
-
- if (IS_RUNNING_ON_SIMULATOR())
- port->sc_interrupt_timeout = 6;
- else {
- /* 960cps / 16 char FIFO = 60HZ
- * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
- port->sc_interrupt_timeout =
- HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
- }
- mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout);
-
- port->sc_is_asynch = 1;
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-}
-
-/**
- * sn_sal_switch_to_interrupts - Switch to interrupt driven mode
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * In sn_sal_module_init, after we're registered with serial core and
- * the port is added, this function is called to switch us to interrupt
- * mode. We were previously in asynch/polling mode (using init_timer).
- *
- * We attempt to switch to interrupt mode here by calling
- * sn_sal_connect_interrupt. If that works out, we enable receive interrupts.
- */
-static void __init
-sn_sal_switch_to_interrupts(struct sn_cons_port *port)
-{
- int irq;
- unsigned long flags;
-
- if (!port)
- return;
-
- DPRINTF("sn_console: switching to interrupt driven console\n");
-
- spin_lock_irqsave(&port->sc_port.lock, flags);
-
- irq = sn_sal_connect_interrupt(port);
-
- if (irq) {
- port->sc_port.irq = irq;
- port->sc_ops = &intr_ops;
-
- /* turn on receive interrupts */
- ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
- }
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-}
-
-/*
- * Kernel console definitions
- */
-
-#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
-static void sn_sal_console_write(struct console *, const char *, unsigned);
-static int __init sn_sal_console_setup(struct console *, char *);
-extern struct uart_driver sal_console_uart;
-extern struct tty_driver *uart_console_device(struct console *, int *);
-
-static struct console sal_console = {
- .name = DEVICE_NAME,
- .write = sn_sal_console_write,
- .device = uart_console_device,
- .setup = sn_sal_console_setup,
- .index = -1, /* unspecified */
- .data = &sal_console_uart,
-};
-
-#define SAL_CONSOLE &sal_console
-#else
-#define SAL_CONSOLE 0
-#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */
-
-static struct uart_driver sal_console_uart = {
- .owner = THIS_MODULE,
- .driver_name = "sn_console",
- .dev_name = DEVICE_NAME,
- .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
- .minor = 0,
- .nr = 1, /* one port */
- .cons = SAL_CONSOLE,
-};
-
-/**
- * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core
- *
- * Before this is called, we've been printing kernel messages in a special
- * early mode not making use of the serial core infrastructure. When our
- * driver is loaded for real, we register the driver and port with serial
- * core and try to enable interrupt driven mode.
- *
- */
-static int __init
-sn_sal_module_init(void)
-{
- int retval;
-
- printk(KERN_INFO "sn_console: Console driver init\n");
-
- if (!ia64_platform_is("sn2"))
- return -ENODEV;
-
- if (USE_DYNAMIC_MINOR == 1) {
- misc.minor = MISC_DYNAMIC_MINOR;
- misc.name = DEVICE_NAME_DYNAMIC;
- retval = misc_register(&misc);
- if (retval != 0) {
- printk("Failed to register console device using misc_register.\n");
- return -ENODEV;
- }
- sal_console_uart.major = MISC_MAJOR;
- sal_console_uart.minor = misc.minor;
- }
- else {
- sal_console_uart.major = DEVICE_MAJOR;
- sal_console_uart.minor = DEVICE_MINOR;
- }
-
- /* We register the driver and the port before switching to interrupts
- * or async above so the proper uart structures are populated */
-
- if (uart_register_driver(&sal_console_uart) < 0) {
- printk("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
- __LINE__);
- return -ENODEV;
- }
-
- sal_console_port.sc_port.lock = SPIN_LOCK_UNLOCKED;
-
- /* Setup the port struct with the minimum needed */
- sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */
- sal_console_port.sc_port.type = PORT_16550A;
- sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS;
- sal_console_port.sc_port.ops = &sn_console_ops;
- sal_console_port.sc_port.line = 0;
-
- if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
- /* error - not sure what I'd do - so I'll do nothing */
- printk(KERN_ERR "%s: unable to add port\n", __FUNCTION__);
- }
-
- /* when this driver is compiled in, the console initialization
- * will have already switched us into asynchronous operation
- * before we get here through the module initcalls */
- if (!sal_console_port.sc_is_asynch) {
- sn_sal_switch_to_asynch(&sal_console_port);
- }
-
- /* at this point (module_init) we can try to turn on interrupts */
- if (!IS_RUNNING_ON_SIMULATOR()) {
- sn_sal_switch_to_interrupts(&sal_console_port);
- }
- return 0;
-}
-
-/**
- * sn_sal_module_exit - When we're unloaded, remove the driver/port
- *
- */
-static void __exit
-sn_sal_module_exit(void)
-{
- del_timer_sync(&sal_console_port.sc_timer);
- uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
- uart_unregister_driver(&sal_console_uart);
- misc_deregister(&misc);
-}
-
-module_init(sn_sal_module_init);
-module_exit(sn_sal_module_exit);
-
-#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
-
-/**
- * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
- * @puts_raw : puts function to do the writing
- * @s: input string
- * @count: length
- *
- * We need a \r ahead of every \n for direct writes through
- * ia64_sn_console_putb (what sal_puts_raw below actually does).
- *
- */
-
-static void puts_raw_fixed(int (*puts_raw) (const char *s, int len), const char *s, int count)
-{
- const char *s1;
-
- /* Output '\r' before each '\n' */
- while ((s1 = memchr(s, '\n', count)) != NULL) {
- puts_raw(s, s1 - s);
- puts_raw("\r\n", 2);
- count -= s1 + 1 - s;
- s = s1 + 1;
- }
- puts_raw(s, count);
-}
-
-/**
- * sn_sal_console_write - Print statements before serial core available
- * @console: Console to operate on - we ignore since we have just one
- * @s: String to send
- * @count: length
- *
- * This is referenced in the console struct. It is used for early
- * console printing before we register with serial core and for things
- * such as kdb. The console_lock must be held when we get here.
- *
- * This function has some code for trying to print output even if the lock
- * is held. We try to cover the case where a lock holder could have died.
- * We don't use this special case code if we're not registered with serial
- * core yet. After we're registered with serial core, the only time this
- * function would be used is for high level kernel output like magic sys req,
- * kdb, and printk's.
- */
-static void
-sn_sal_console_write(struct console *co, const char *s, unsigned count)
-{
- unsigned long flags = 0;
- struct sn_cons_port *port = &sal_console_port;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
- static int stole_lock = 0;
-#endif
-
- BUG_ON(!port->sc_is_asynch);
-
- /* We can't look at the xmit buffer if we're not registered with serial core
- * yet. So only do the fancy recovery after registering
- */
- if (port->sc_port.info) {
-
- /* somebody really wants this output, might be an
- * oops, kdb, panic, etc. make sure they get it. */
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
- if (spin_is_locked(&port->sc_port.lock)) {
- int lhead = port->sc_port.info->xmit.head;
- int ltail = port->sc_port.info->xmit.tail;
- int counter, got_lock = 0;
-
- /*
- * We attempt to determine if someone has died with the
- * lock. We wait ~20 secs after the head and tail ptrs
- * stop moving and assume the lock holder is not functional
- * and plow ahead. If the lock is freed within the time out
- * period we re-get the lock and go ahead normally. We also
- * remember if we have plowed ahead so that we don't have
- * to wait out the time out period again - the asumption
- * is that we will time out again.
- */
-
- for (counter = 0; counter < 150; mdelay(125), counter++) {
- if (!spin_is_locked(&port->sc_port.lock) || stole_lock) {
- if (!stole_lock) {
- spin_lock_irqsave(&port->sc_port.lock, flags);
- got_lock = 1;
- }
- break;
- }
- else {
- /* still locked */
- if ((lhead != port->sc_port.info->xmit.head) || (ltail != port->sc_port.info->xmit.tail)) {
- lhead = port->sc_port.info->xmit.head;
- ltail = port->sc_port.info->xmit.tail;
- counter = 0;
- }
- }
- }
- /* flush anything in the serial core xmit buffer, raw */
- sn_transmit_chars(port, 1);
- if (got_lock) {
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- stole_lock = 0;
- }
- else {
- /* fell thru */
- stole_lock = 1;
- }
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
- else {
- stole_lock = 0;
-#endif
- spin_lock_irqsave(&port->sc_port.lock, flags);
- sn_transmit_chars(port, 1);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
- }
- else {
- /* Not yet registered with serial core - simple case */
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
-}
-
-
-/**
- * sn_sal_console_setup - Set up console for early printing
- * @co: Console to work with
- * @options: Options to set
- *
- * Altix console doesn't do anything with baud rates, etc, anyway.
- *
- * This isn't required since not providing the setup function in the
- * console struct is ok. However, other patches like KDB plop something
- * here so providing it is easier.
- *
- */
-static int __init
-sn_sal_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
-/**
- * sn_sal_console_write_early - simple early output routine
- * @co - console struct
- * @s - string to print
- * @count - count
- *
- * Simple function to provide early output, before even
- * sn_sal_serial_console_init is called. Referenced in the
- * console struct registerd in sn_serial_console_early_setup.
- *
- */
-static void __init
-sn_sal_console_write_early(struct console *co, const char *s, unsigned count)
-{
- puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count);
-}
-
-/* Used for very early console printing - again, before
- * sn_sal_serial_console_init is run */
-static struct console sal_console_early __initdata = {
- .name = "sn_sal",
- .write = sn_sal_console_write_early,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-/**
- * sn_serial_console_early_setup - Sets up early console output support
- *
- * Register a console early on... This is for output before even
- * sn_sal_serial_cosnole_init is called. This function is called from
- * setup.c. This allows us to do really early polled writes. When
- * sn_sal_serial_console_init is called, this console is unregistered
- * and a new one registered.
- */
-int __init
-sn_serial_console_early_setup(void)
-{
- if (!ia64_platform_is("sn2"))
- return -1;
-
- if (IS_RUNNING_ON_SIMULATOR())
- sal_console_port.sc_ops = &sim_ops;
- else
- sal_console_port.sc_ops = &poll_ops;
-
- early_sn_setup(); /* Find SAL entry points */
- register_console(&sal_console_early);
-
- return 0;
-}
-
-
-/**
- * sn_sal_serial_console_init - Early console output - set up for register
- *
- * This function is called when regular console init happens. Because we
- * support even earlier console output with sn_serial_console_early_setup
- * (called from setup.c directly), this function unregisters the really
- * early console.
- *
- * Note: Even if setup.c doesn't register sal_console_early, unregistering
- * it here doesn't hurt anything.
- *
- */
-static int __init
-sn_sal_serial_console_init(void)
-{
- if (ia64_platform_is("sn2")) {
- sn_sal_switch_to_asynch(&sal_console_port);
- DPRINTF ("sn_sal_serial_console_init : register console\n");
- register_console(&sal_console);
- unregister_console(&sal_console_early);
- }
- return 0;
-}
-
-console_initcall(sn_sal_serial_console_init);
-
-#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */
cflag = CREAD | HUPCL | CLOCAL;
s = mode;
- baud = simple_strtoul(s, NULL, 0);
+ baud = simple_strtoul(s, 0, 0);
s = strchr(s, ',');
- bits = simple_strtoul(++s, NULL, 0);
+ bits = simple_strtoul(++s, 0, 0);
s = strchr(s, ',');
parity = *(++s);
s = strchr(s, ',');
- stop = simple_strtoul(++s, NULL, 0);
+ stop = simple_strtoul(++s, 0, 0);
s = strchr(s, ',');
/* XXX handshake is not handled here. */
{
unsigned char status1, status2, scratch, scratch2, scratch3;
unsigned char save_lcr, save_mcr;
- struct linux_ebus_device *dev = NULL;
+ struct linux_ebus_device *dev = 0;
struct linux_ebus *ebus;
#ifdef CONFIG_SPARC64
struct sparc_isa_bridge *isa_br;
s->count++;
up(&open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_audio_release_mixdev(struct inode *inode, struct file *file)
unsigned int ptr;
int cnt, err;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (as->usbin.dma.mapped)
return -ENXIO;
if (!as->usbin.dma.ready && (ret = prog_dmabuf_in(as)))
unsigned int start_thr;
int cnt, err;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (as->usbout.dma.mapped)
return -ENXIO;
if (!as->usbout.dma.ready && (ret = prog_dmabuf_out(as)))
as->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
s->count++;
up(&open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int usb_audio_release(struct inode *inode, struct file *file)
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
+ if ( ppos != &file->f_pos ) {
+ return -ESPIPE;
+ }
if ( !access_ok(VERIFY_READ, buffer, count) ) {
return -EFAULT;
}
ssize_t ret;
unsigned long int flags;
+ if ( ppos != &file->f_pos ) {
+ return -ESPIPE;
+ }
if ( !access_ok(VERIFY_READ, buffer, count) ) {
return -EFAULT;
}
printk(KERN_INFO "usb-midi: Open Succeeded. minor= %d.\n", minor);
#endif
- return nonseekable_open(inode, file); /** Success. **/
+ return 0; /** Success. **/
}
return 0;
}
-#define hub_suspend NULL
-#define hub_resume NULL
+#define hub_suspend 0
+#define hub_resume 0
#define remote_wakeup(x) 0
#endif /* CONFIG_USB_SUSPEND */
int rndis_proc_write (struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- rndis_params *p = data;
u32 speed = 0;
int i, fl_speed = 0;
for (i = 0; i < count; i++) {
- char c;
- if (get_user(c, buffer))
- return -EFAULT;
- switch (c) {
+ switch (*buffer) {
case '0':
case '1':
case '2':
case '8':
case '9':
fl_speed = 1;
- speed = speed*10 + c - '0';
+ speed = speed*10 + *buffer - '0';
break;
case 'C':
case 'c':
- rndis_signal_connect (p->confignr);
+ rndis_signal_connect (((rndis_params *) data)
+ ->confignr);
break;
case 'D':
case 'd':
- rndis_signal_disconnect(p->confignr);
+ rndis_signal_disconnect (((rndis_params *) data)
+ ->confignr);
break;
default:
- if (fl_speed) p->speed = speed;
- else DEBUG ("%c is not valid\n", c);
+ if (fl_speed) ((rndis_params *) data)->speed = speed;
+ else DEBUG ("%c is not valid\n", *buffer);
break;
}
size_t nbytes, loff_t *ppos)
{
struct uhci_proc *up = file->private_data;
- return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
+ unsigned int pos;
+ unsigned int size;
+
+ pos = *ppos;
+ size = up->size;
+ if (pos >= size)
+ return 0;
+ if (nbytes > size - pos)
+ nbytes = size - pos;
+
+ if (copy_to_user(buf, up->data + pos, nbytes))
+ return -EFAULT;
+
+ *ppos += nbytes;
+
+ return nbytes;
}
static int uhci_proc_release(struct inode *inode, struct file *file)
file->f_pos = 0;
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int dabusb_release (struct inode *inode, struct file *file)
* Based on the Linux CPiA driver written by Peter Pregler,
* Scott J. Bertin and Johannes Erdfelt.
*
- * Please see the file: Documentation/usb/ov511.txt
+ * Please see the file: linux/Documentation/usb/ov511.txt
* and the website at: http://alpha.dyndns.org/ov511
* for more info.
*
DECLARE_WAITQUEUE(wait, current);
int bytes_to_read;
- Trace(TRACE_READ, "video_read(0x%p, %p, %zd) called.\n", vdev, buf, count);
+ Trace(TRACE_READ, "video_read(0x%p, %p, %d) called.\n", vdev, buf, count);
if (vdev == NULL)
return -EFAULT;
pdev = vdev->priv;
/* file IO stuff */
file->f_pos = 0;
file->private_data = ccp;
- return nonseekable_open(inode, file);
+ return 0;
/* Error exit */
ofail: up (&cp->mutex);
dbg(2, "%s: enter", __FUNCTION__);
- nonseekable_open(inode, file);
subminor = iminor(inode);
down (&disconnect_sem);
*
* Based on dabusb.c, printer.c & scanner.c
*
- * Please see the file: Documentation/usb/silverlink.txt
+ * Please see the file: linux/Documentation/usb/SilverLink.txt
* and the website at: http://lpg.ticalc.org/prj_usb/
* for more info.
*
filp->f_pos = 0;
filp->private_data = s;
- return nonseekable_open(inode, filp);
+ return 0;
}
static int
config FB_CIRRUS
tristate "Cirrus Logic support"
- depends on FB && (ZORRO || PCI)
+ depends on FB && (AMIGA || PCI)
---help---
This enables support for Cirrus Logic GD542x/543x based boards on
Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
- depends on FB && PCI && (BROKEN || !SPARC64)
+ depends on FB && PCI
help
This enables support for the Integraphics CyberPro 20x0 and 5000
VGA chips used in the Rebel.com Netwinder and other machines.
config FB_S3TRIO
bool "S3 Trio display support"
- depends on FB && PPC && BROKEN
+ depends on FB && PPC
help
If you have a S3 Trio say Y. Say N for S3 Virge.
independently validate video mode parameters, you should say Y
here.
-config FB_RIVA_DEBUG
- bool "Lots of debug output from Riva(nVidia) driver"
- depends on FB_RIVA
- default n
- help
- Say Y here if you want the Riva driver to output all sorts
- of debugging informations to provide to the maintainer when
- something goes wrong.
-
config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)"
depends on FB && AGP && AGP_INTEL && EXPERIMENTAL && PCI
info->fix.ypanstep = 0;
} else {
info->fix.ywrapstep = 0;
- if (par->vmode & FB_VMODE_SMOOTH_XPAN)
+ if (par->vmode &= FB_VMODE_SMOOTH_XPAN)
info->fix.xpanstep = 1;
else
info->fix.xpanstep = 16<<maxfmode;
*/
{
- u_long tmp = DIVUL(200000000000ULL, amiga_eclock);
+ u_long tmp = DIVUL(200E9, amiga_eclock);
pixclock[TAG_SHRES] = (tmp + 4) / 8; /* SHRES: 35 ns / 28 MHz */
pixclock[TAG_HIRES] = (tmp + 2) / 4; /* HIRES: 70 ns / 14 MHz */
}
-#ifdef CONFIG_X86
+#ifdef __i386__
static void * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
{
/* I simplified this code as we used to miss the signatures in
}
return rom_base;
}
-#endif
+#endif /* __i386__ */
#endif /* ndef(__sparc__) */
/* fill in known card constants if pll_block is not available */
#ifndef __sparc__
bios = aty128_map_ROM(par, pdev);
-#ifdef CONFIG_X86
+#ifdef __i386__
if (bios == NULL)
bios = aty128_find_mem_vbios(par);
#endif
case FBIO_ATY128_SET_MIRROR:
if (par->chip_gen != rage_M3)
return -EINVAL;
- rc = get_user(value, (__u32 __user *)arg);
+ rc = get_user(value, (__u32*)arg);
if (rc)
return rc;
par->lcd_on = (value & 0x01) != 0;
if (par->chip_gen != rage_M3)
return -EINVAL;
value = (par->crt_on << 1) | par->lcd_on;
- return put_user(value, (__u32 __user *)arg);
+ return put_user(value, (__u32*)arg);
}
#endif
return -EINVAL;
wait_for_idle(par);
aty128fb_set_par(info);
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
/* Refresh */
fb_set_suspend(info, 0);
fbtyp.fb_cmsize = info->cmap.len;
fbtyp.fb_size = info->fix.smem_len;
if (copy_to_user
- ((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp)))
+ ((struct fbtype *) arg, &fbtyp, sizeof(fbtyp)))
return -EFAULT;
break;
#endif /* __sparc__ */
case PBOOK_SLEEP_REJECT:
if (par->save_framebuffer) {
vfree(par->save_framebuffer);
- par->save_framebuffer = NULL;
+ par->save_framebuffer = 0;
}
break;
case PBOOK_SLEEP_NOW:
memcpy_toio((void *) info->screen_base,
par->save_framebuffer, nb);
vfree(par->save_framebuffer);
- par->save_framebuffer = NULL;
+ par->save_framebuffer = 0;
}
/* Restore display */
atyfb_set_par(info);
for (m = MIN_M; m <= MAX_M; m++) {
for (n = MIN_N; n <= MAX_N; n++) {
- tempA = 938356; /* 14.31818 * 65536 */
+ tempA = (14.31818 * 65536);
tempA *= (n + 8); /* 43..256 */
tempB = twoToKth * 256;
tempB *= (m + 2); /* 4..32 */
return -ENXIO;
}
-#ifdef CONFIG_X86
+#ifdef __i386__
static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
{
/* I simplified this code as we used to miss the signatures in
return 0;
}
-#endif
+#endif /* __i386__ */
#ifdef CONFIG_PPC_OF
/*
printk(KERN_WARNING "radeonfb: Cannot match card to OF node !\n");
return -ENODEV;
}
- val = (u32 *) get_property(dp, "ATY,RefCLK", NULL);
+ val = (u32 *) get_property(dp, "ATY,RefCLK", 0);
if (!val || !*val) {
printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
return -EINVAL;
rinfo->pll.ref_clk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,SCLK", NULL);
+ val = (u32 *) get_property(dp, "ATY,SCLK", 0);
if (val && *val)
rinfo->pll.sclk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,MCLK", NULL);
+ val = (u32 *) get_property(dp, "ATY,MCLK", 0);
if (val && *val)
rinfo->pll.mclk = (*val) / 10;
/*
* On x86, the primary display on laptop may have it's BIOS
* ROM elsewhere, try to locate it at the legacy memory hole.
- * We probably need to make sure this is the primary display,
+ * We probably need to make sure this is the primary dispay,
* but that is difficult without some arch support.
*/
-#ifdef CONFIG_X86
+#ifdef __i386__
if (rinfo->bios_seg == NULL)
radeon_find_mem_vbios(rinfo);
-#endif
+#endif /* __i386__ */
/* If both above failed, try the BIOS ROM again for mobility
* chips
/* Restore display & engine */
radeonfb_set_par(info);
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
/* Refresh */
fb_set_suspend(info, 0);
{
struct cg14_par *par = (struct cg14_par *) info->par;
struct cg14_regs *regs = par->regs;
- struct mdi_cfginfo kmdi, __user *mdii;
+ struct mdi_cfginfo kmdi, *mdii;
unsigned long flags;
int cur_mode, mode, ret = 0;
kmdi.mdi_size = par->ramsize;
spin_unlock_irqrestore(&par->lock, flags);
- mdii = (struct mdi_cfginfo __user *) arg;
+ mdii = (struct mdi_cfginfo *) arg;
if (copy_to_user(mdii, &kmdi, sizeof(kmdi)))
ret = -EFAULT;
break;
case MDI_SET_PIXELMODE:
- if (get_user(mode, (int __user *) arg)) {
+ if (get_user(mode, (int *) arg)) {
ret = -EFAULT;
break;
}
case PBOOK_SLEEP_REJECT:
if (save_framebuffer) {
vfree(save_framebuffer);
- save_framebuffer = NULL;
+ save_framebuffer = 0;
}
break;
case PBOOK_SLEEP_NOW:
if (save_framebuffer) {
memcpy(p->screen_base, save_framebuffer, nb);
vfree(save_framebuffer);
- save_framebuffer = NULL;
+ save_framebuffer = 0;
}
chipsfb_blank(0, p);
break;
*
* Contributors (thanks, all!)
*
- * David Eger:
- * Overhaul for Linux 2.6
+ * David Eger:
+ * Overhaul for Linux 2.6
*
* Jeff Rugen:
* Major contributions; Motorola PowerStack (PPC and PCI) support,
* a run-time table?
*/
static const struct cirrusfb_board_info_rec {
+ cirrusfb_board_t btype; /* chipset enum, not strictly necessary, as
+ * cirrusfb_board_info[] is directly indexed
+ * by this value */
char *name; /* ASCII name of chipset */
long maxclock[5]; /* maximum video clock */
/* for 1/4bpp, 8bpp 15/16bpp, 24bpp, 32bpp - numbers from xorg code */
unsigned char sr1f; /* SR1F VGA initial register value */
} cirrusfb_board_info[] = {
- [BT_SD64] = {
- .name = "CL SD64",
- .maxclock = {
- /* guess */
- /* the SD64/P4 have a higher max. videoclock */
- 140000, 140000, 140000, 140000, 140000,
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0xF0,
- .sr07_1bpp = 0xF0,
- .sr07_8bpp = 0xF1,
- .sr1f = 0x20
- },
- [BT_PICCOLO] = {
- .name = "CL Piccolo",
- .maxclock = {
- /* guess */
- 90000, 90000, 90000, 90000, 90000
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = FALSE,
- .sr07 = 0x80,
- .sr07_1bpp = 0x80,
- .sr07_8bpp = 0x81,
- .sr1f = 0x22
- },
- [BT_PICASSO] = {
- .name = "CL Picasso",
- .maxclock = {
- /* guess */
- 90000, 90000, 90000, 90000, 90000
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = FALSE,
- .sr07 = 0x20,
- .sr07_1bpp = 0x20,
- .sr07_8bpp = 0x21,
- .sr1f = 0x22
- },
- [BT_SPECTRUM] = {
- .name = "CL Spectrum",
- .maxclock = {
- /* guess */
- 90000, 90000, 90000, 90000, 90000
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = FALSE,
- .sr07 = 0x80,
- .sr07_1bpp = 0x80,
- .sr07_8bpp = 0x81,
- .sr1f = 0x22
- },
- [BT_PICASSO4] = {
- .name = "CL Picasso4",
- .maxclock = {
- 135100, 135100, 85500, 85500, 0
- },
- .init_sr07 = TRUE,
- .init_sr1f = FALSE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0x20,
- .sr07_1bpp = 0x20,
- .sr07_8bpp = 0x21,
- .sr1f = 0
- },
- [BT_ALPINE] = {
- .name = "CL Alpine",
- .maxclock = {
- /* for the GD5430. GD5446 can do more... */
- 85500, 85500, 50000, 28500, 0
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0xA0,
- .sr07_1bpp = 0xA1,
- .sr07_1bpp_mux = 0xA7,
- .sr07_8bpp = 0xA1,
- .sr07_8bpp_mux = 0xA7,
- .sr1f = 0x1C
- },
- [BT_GD5480] = {
- .name = "CL GD5480",
- .maxclock = {
- 135100, 200000, 200000, 135100, 135100
- },
- .init_sr07 = TRUE,
- .init_sr1f = TRUE,
- .scrn_start_bit19 = TRUE,
- .sr07 = 0x10,
- .sr07_1bpp = 0x11,
- .sr07_8bpp = 0x11,
- .sr1f = 0x1C
- },
- [BT_LAGUNA] = {
- .name = "CL Laguna",
- .maxclock = {
- /* guess */
- 135100, 135100, 135100, 135100, 135100,
- },
- .init_sr07 = FALSE,
- .init_sr1f = FALSE,
- .scrn_start_bit19 = TRUE,
- }
+ { BT_NONE, }, /* dummy record */
+ { BT_SD64,
+ "CL SD64",
+ { 140000, 140000, 140000, 140000, 140000, }, /* guess */
+ /* the SD64/P4 have a higher max. videoclock */
+ TRUE,
+ TRUE,
+ TRUE,
+ 0xF0,
+ 0xF0,
+ 0, /* unused, does not multiplex */
+ 0xF1,
+ 0, /* unused, does not multiplex */
+ 0x20 },
+ { BT_PICCOLO,
+ "CL Piccolo",
+ { 90000, 90000, 90000, 90000, 90000 }, /* guess */
+ TRUE,
+ TRUE,
+ FALSE,
+ 0x80,
+ 0x80,
+ 0, /* unused, does not multiplex */
+ 0x81,
+ 0, /* unused, does not multiplex */
+ 0x22 },
+ { BT_PICASSO,
+ "CL Picasso",
+ { 90000, 90000, 90000, 90000, 90000, }, /* guess */
+ TRUE,
+ TRUE,
+ FALSE,
+ 0x20,
+ 0x20,
+ 0, /* unused, does not multiplex */
+ 0x21,
+ 0, /* unused, does not multiplex */
+ 0x22 },
+ { BT_SPECTRUM,
+ "CL Spectrum",
+ { 90000, 90000, 90000, 90000, 90000, }, /* guess */
+ TRUE,
+ TRUE,
+ FALSE,
+ 0x80,
+ 0x80,
+ 0, /* unused, does not multiplex */
+ 0x81,
+ 0, /* unused, does not multiplex */
+ 0x22 },
+ { BT_PICASSO4,
+ "CL Picasso4",
+ { 135100, 135100, 85500, 85500, 0 },
+ TRUE,
+ FALSE,
+ TRUE,
+ 0x20,
+ 0x20,
+ 0, /* unused, does not multiplex */
+ 0x21,
+ 0, /* unused, does not multiplex */
+ 0 },
+ { BT_ALPINE,
+ "CL Alpine",
+ { 85500, 85500, 50000, 28500, 0}, /* for the GD5430. GD5446 can do more... */
+ TRUE,
+ TRUE,
+ TRUE,
+ 0xA0,
+ 0xA1,
+ 0xA7,
+ 0xA1,
+ 0xA7,
+ 0x1C },
+ { BT_GD5480,
+ "CL GD5480",
+ { 135100, 200000, 200000, 135100, 135100 },
+ TRUE,
+ TRUE,
+ TRUE,
+ 0x10,
+ 0x11,
+ 0, /* unused, does not multiplex */
+ 0x11,
+ 0, /* unused, does not multiplex */
+ 0x1C },
+ { BT_LAGUNA,
+ "CL Laguna",
+ { 135100, 135100, 135100, 135100, 135100, }, /* guess */
+ FALSE,
+ FALSE,
+ TRUE,
+ 0, /* unused */
+ 0, /* unused */
+ 0, /* unused */
+ 0, /* unused */
+ 0, /* unused */
+ 0 }, /* unused */
};
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_##id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (btype) }
static struct pci_device_id cirrusfb_pci_table[] = {
- CHIP( CIRRUS_5436, BT_ALPINE ),
- CHIP( CIRRUS_5434_8, BT_ALPINE ),
- CHIP( CIRRUS_5434_4, BT_ALPINE ),
- CHIP( CIRRUS_5430, BT_ALPINE ), /* GD-5440 has identical id */
- CHIP( CIRRUS_7543, BT_ALPINE ),
- CHIP( CIRRUS_7548, BT_ALPINE ),
+ CHIP( CIRRUS_5436, BT_ALPINE ),
+ CHIP( CIRRUS_5434_8, BT_ALPINE ),
+ CHIP( CIRRUS_5434_4, BT_ALPINE ),
+ CHIP( CIRRUS_5430, BT_ALPINE ), /* GD-5440 has identical id */
+ CHIP( CIRRUS_7543, BT_ALPINE ),
+ CHIP( CIRRUS_7548, BT_ALPINE ),
CHIP( CIRRUS_5480, BT_GD5480 ), /* MacPicasso probably */
CHIP( CIRRUS_5446, BT_PICASSO4 ), /* Picasso 4 is a GD5446 */
CHIP( CIRRUS_5462, BT_LAGUNA ), /* CL Laguna */
#ifdef CONFIG_ZORRO
-static const struct zorro_device_id cirrusfb_zorro_table[] = {
- {
- .id = ZORRO_PROD_HELFRICH_SD64_RAM,
- .driver_data = BT_SD64,
- }, {
- .id = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
- .driver_data = BT_PICCOLO,
- }, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
- .driver_data = BT_PICASSO,
- }, {
- .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
- .driver_data = BT_SPECTRUM,
- }, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
- .driver_data = BT_PICASSO4,
- },
- { 0 }
-};
-
static const struct {
- zorro_id id2;
+ cirrusfb_board_t btype;
+ zorro_id id, id2;
unsigned long size;
-} cirrusfb_zorro_table2[] = {
- [BT_SD64] = {
- .id2 = ZORRO_PROD_HELFRICH_SD64_REG,
- .size = 0x400000
- },
- [BT_PICCOLO] = {
- .id2 = ZORRO_PROD_HELFRICH_PICCOLO_REG,
- .size = 0x200000
- },
- [BT_PICASSO] = {
- .id2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
- .size = 0x200000
- },
- [BT_SPECTRUM] = {
- .id2 = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
- .size = 0x200000
- },
- [BT_PICASSO4] = {
- .id2 = 0,
- .size = 0x400000
- }
+} cirrusfb_zorro_probe_list[] __initdata = {
+ { BT_SD64,
+ ZORRO_PROD_HELFRICH_SD64_RAM,
+ ZORRO_PROD_HELFRICH_SD64_REG,
+ 0x400000 },
+ { BT_PICCOLO,
+ ZORRO_PROD_HELFRICH_PICCOLO_RAM,
+ ZORRO_PROD_HELFRICH_PICCOLO_REG,
+ 0x200000 },
+ { BT_PICASSO,
+ ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
+ ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
+ 0x200000 },
+ { BT_SPECTRUM,
+ ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
+ ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
+ 0x200000 },
+ { BT_PICASSO4,
+ ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
+ 0,
+ 0x400000 },
};
#endif /* CONFIG_ZORRO */
struct { u8 red, green, blue, pad; } palette[256];
#ifdef CONFIG_ZORRO
- struct zorro_dev *zdev;
+ unsigned long board_addr,
+ board_size;
#endif
+
#ifdef CONFIG_PCI
struct pci_dev *pdev;
#endif
- void (*unmap)(struct cirrusfb_info *cinfo);
};
static const struct {
const char *name;
struct fb_var_screeninfo var;
-} cirrusfb_predefined[] = {
- {
- /* autodetect mode */
- .name = "Autodetect",
- }, {
- /* 640x480, 31.25 kHz, 60 Hz, 25 MHz PixClock */
- .name = "640x480",
- .var = {
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = { .length = 8 },
- .green = { .length = 8 },
- .blue = { .length = 8 },
- .width = -1,
- .height = -1,
- .pixclock = 40000,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 32,
- .lower_margin = 8,
- .hsync_len = 96,
- .vsync_len = 4,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED
- }
- }, {
- /* 800x600, 48 kHz, 76 Hz, 50 MHz PixClock */
- .name = "800x600",
- .var = {
- .xres = 800,
- .yres = 600,
- .xres_virtual = 800,
- .yres_virtual = 600,
- .bits_per_pixel = 8,
- .red = { .length = 8 },
- .green = { .length = 8 },
- .blue = { .length = 8 },
- .width = -1,
- .height = -1,
- .pixclock = 20000,
- .left_margin = 128,
- .right_margin = 16,
- .upper_margin = 24,
- .lower_margin = 2,
- .hsync_len = 96,
- .vsync_len = 6,
- .vmode = FB_VMODE_NONINTERLACED
- }
- }, {
- /*
- * Modeline from XF86Config:
- * Mode "1024x768" 80 1024 1136 1340 1432 768 770 774 805
- */
- /* 1024x768, 55.8 kHz, 70 Hz, 80 MHz PixClock */
- .name = "1024x768",
- .var = {
- .xres = 1024,
- .yres = 768,
- .xres_virtual = 1024,
- .yres_virtual = 768,
- .bits_per_pixel = 8,
- .red = { .length = 8 },
- .green = { .length = 8 },
- .blue = { .length = 8 },
- .width = -1,
- .height = -1,
- .pixclock = 12500,
- .left_margin = 144,
- .right_margin = 32,
- .upper_margin = 30,
- .lower_margin = 2,
- .hsync_len = 192,
- .vsync_len = 6,
- .vmode = FB_VMODE_NONINTERLACED
+} cirrusfb_predefined[] =
+
+{
+ {"Autodetect", /* autodetect mode */
+ {0}
+ },
+
+ {"640x480", /* 640x480, 31.25 kHz, 60 Hz, 25 MHz PixClock */
+ {
+ 640, 480, 640, 480, 0, 0, 8, 0,
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 0, 0},
+ 0, 0, -1, -1, FB_ACCEL_NONE, 40000, 48, 16, 32, 8, 96, 4,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
+ }
+ },
+
+ {"800x600", /* 800x600, 48 kHz, 76 Hz, 50 MHz PixClock */
+ {
+ 800, 600, 800, 600, 0, 0, 8, 0,
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 0, 0},
+ 0, 0, -1, -1, FB_ACCEL_NONE, 20000, 128, 16, 24, 2, 96, 6,
+ 0, FB_VMODE_NONINTERLACED
+ }
+ },
+
+ /*
+ Modeline from XF86Config:
+ Mode "1024x768" 80 1024 1136 1340 1432 768 770 774 805
+ */
+ {"1024x768", /* 1024x768, 55.8 kHz, 70 Hz, 80 MHz PixClock */
+ {
+ 1024, 768, 1024, 768, 0, 0, 8, 0,
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 8, 0},
+ {0, 0, 0},
+ 0, 0, -1, -1, FB_ACCEL_NONE, 12500, 144, 32, 30, 2, 192, 6,
+ 0, FB_VMODE_NONINTERLACED
}
}
};
static struct fb_ops cirrusfb_ops = {
.owner = THIS_MODULE,
.fb_open = cirrusfb_open,
- .fb_release = cirrusfb_release,
+ .fb_release = cirrusfb_release,
.fb_setcolreg = cirrusfb_setcolreg,
.fb_check_var = cirrusfb_check_var,
.fb_set_par = cirrusfb_set_par,
DPRINTK (" (for GD54xx)\n");
vga_wseq (regbase, CL_SEQR7,
regs.multiplexing ?
- bi->sr07_1bpp_mux : bi->sr07_1bpp);
+ bi->sr07_1bpp_mux : bi->sr07_1bpp);
break;
case BT_LAGUNA:
DPRINTK (" (for GD54xx)\n");
vga_wseq (regbase, CL_SEQR7,
regs.multiplexing ?
- bi->sr07_8bpp_mux : bi->sr07_8bpp);
+ bi->sr07_8bpp_mux : bi->sr07_8bpp);
break;
case BT_LAGUNA:
}
-static void cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
+static void __devexit cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
{
struct pci_dev *pdev = cinfo->pdev;
framebuffer_release(cinfo->info);
pci_disable_device(pdev);
}
-#endif /* CONFIG_PCI */
-
-
-#ifdef CONFIG_ZORRO
-static void __devexit cirrusfb_zorro_unmap (struct cirrusfb_info *cinfo)
-{
- zorro_release_device(cinfo->zdev);
-
- if (cinfo->btype == BT_PICASSO4) {
- cinfo->regbase -= 0x600000;
- iounmap ((void *)cinfo->regbase);
- iounmap ((void *)cinfo->fbmem);
- } else {
- if (zorro_resource_start(cinfo->zdev) > 0x01000000)
- iounmap ((void *)cinfo->fbmem);
- }
- framebuffer_release(cinfo->info);
-}
-#endif /* CONFIG_ZORRO */
-
-static int cirrusfb_set_fbinfo(struct cirrusfb_info *cinfo)
-{
- struct fb_info *info = cinfo->info;
- struct fb_var_screeninfo *var = &info->var;
-
- info->currcon = -1;
- info->par = cinfo;
- info->pseudo_palette = cinfo->pseudo_palette;
- info->flags = FBINFO_DEFAULT
- | FBINFO_HWACCEL_XPAN
- | FBINFO_HWACCEL_YPAN
- | FBINFO_HWACCEL_FILLRECT
- | FBINFO_HWACCEL_COPYAREA;
- if (noaccel)
- info->flags |= FBINFO_HWACCEL_DISABLED;
- info->fbops = &cirrusfb_ops;
- info->screen_base = cinfo->fbmem;
- if (cinfo->btype == BT_GD5480) {
- if (var->bits_per_pixel == 16)
- info->screen_base += 1 * MB_;
- if (var->bits_per_pixel == 24 || var->bits_per_pixel == 32)
- info->screen_base += 2 * MB_;
- }
-
- /* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
- sizeof(info->fix.id));
-
- /* monochrome: only 1 memory plane */
- /* 8 bit and above: Use whole memory area */
- info->fix.smem_start = cinfo->fbmem_phys;
- info->fix.smem_len = (var->bits_per_pixel == 1) ? cinfo->size / 4 : cinfo->size;
- info->fix.type = cinfo->currentmode.type;
- info->fix.type_aux = 0;
- info->fix.visual = cinfo->currentmode.visual;
- info->fix.xpanstep = 1;
- info->fix.ypanstep = 1;
- info->fix.ywrapstep = 0;
- info->fix.line_length = cinfo->currentmode.line_length;
-
- /* FIXME: map region at 0xB8000 if available, fill in here */
- info->fix.mmio_start = cinfo->fbregs_phys;
- info->fix.mmio_len = 0;
- info->fix.accel = FB_ACCEL_NONE;
-
- fb_alloc_cmap(&info->cmap, 256, 0);
-
- return 0;
-}
-static int cirrusfb_register(struct cirrusfb_info *cinfo)
-{
- struct fb_info *info;
- int err;
- cirrusfb_board_t btype;
-
- DPRINTK ("ENTER\n");
- printk (KERN_INFO "cirrusfb: Driver for Cirrus Logic based graphic boards, v" CIRRUSFB_VERSION "\n");
-
- info = cinfo->info;
- btype = cinfo->btype;
-
- /* sanity checks */
- assert (btype != BT_NONE);
-
- DPRINTK ("cirrusfb: (RAM start set to: 0x%p)\n", cinfo->fbmem);
-
- /* Make pretend we've set the var so our structures are in a "good" */
- /* state, even though we haven't written the mode to the hw yet... */
- info->var = cirrusfb_predefined[cirrusfb_def_mode].var;
- info->var.activate = FB_ACTIVATE_NOW;
-
- err = cirrusfb_decode_var(&info->var, &cinfo->currentmode, info);
- if (err < 0) {
- /* should never happen */
- DPRINTK("choking on default var... umm, no good.\n");
- goto err_unmap_cirrusfb;
- }
-
- /* set all the vital stuff */
- cirrusfb_set_fbinfo(cinfo);
-
- err = register_framebuffer(info);
- if (err < 0) {
- printk (KERN_ERR "cirrusfb: could not register fb device; err = %d!\n", err);
- goto err_dealloc_cmap;
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_dealloc_cmap:
- fb_dealloc_cmap(&info->cmap);
-err_unmap_cirrusfb:
- cinfo->unmap(cinfo);
- return err;
-}
-
-static void __devexit cirrusfb_cleanup (struct fb_info *info)
-{
- struct cirrusfb_info *cinfo = info->par;
- DPRINTK ("ENTER\n");
-
- switch_monitor (cinfo, 0);
-
- unregister_framebuffer (info);
- fb_dealloc_cmap (&info->cmap);
- printk ("Framebuffer unregistered\n");
- cinfo->unmap(cinfo);
-
- DPRINTK ("EXIT\n");
-}
-
-
-#ifdef CONFIG_PCI
-static int cirrusfb_pci_register (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static struct cirrusfb_info *cirrusfb_pci_setup (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
cinfo->fbmem_phys = board_addr;
cinfo->size = board_size;
- cinfo->unmap = cirrusfb_pci_unmap;
printk (" RAM (%lu kB) at 0xx%lx, ", cinfo->size / KB_, board_addr);
printk ("Cirrus Logic chipset on PCI bus\n");
- pci_set_drvdata(pdev, info);
- return cirrusfb_register(cinfo);
+ return cinfo;
err_release_legacy:
if (release_io_ports)
err_disable:
pci_disable_device(pdev);
err_out:
- return ret;
+ return ERR_PTR(ret);
}
+#endif /* CONFIG_PCI */
-void __devexit cirrusfb_pci_unregister (struct pci_dev *pdev)
+
+
+
+#ifdef CONFIG_ZORRO
+static int cirrusfb_zorro_find (struct zorro_dev **z_o,
+ struct zorro_dev **z2_o,
+ cirrusfb_board_t *btype, unsigned long *size)
{
- struct fb_info *info = pci_get_drvdata(pdev);
- DPRINTK ("ENTER\n");
+ struct zorro_dev *z = NULL;
+ int i;
- cirrusfb_cleanup (info);
+ assert (z_o != NULL);
+ assert (btype != NULL);
- DPRINTK ("EXIT\n");
+ for (i = 0; i < ARRAY_SIZE(cirrusfb_zorro_probe_list); i++)
+ if ((z = zorro_find_device(cirrusfb_zorro_probe_list[i].id, NULL)))
+ break;
+
+ if (z) {
+ *z_o = z;
+ if (cirrusfb_zorro_probe_list[i].id2)
+ *z2_o = zorro_find_device(cirrusfb_zorro_probe_list[i].id2, NULL);
+ else
+ *z2_o = NULL;
+
+ *btype = cirrusfb_zorro_probe_list[i].btype;
+ *size = cirrusfb_zorro_probe_list[i].size;
+
+ printk (KERN_INFO "cirrusfb: %s board detected; ",
+ cirrusfb_board_info[*btype].name);
+
+ return 0;
+ }
+
+ printk (KERN_NOTICE "cirrusfb: no supported board found.\n");
+ return -ENODEV;
}
-static struct pci_driver cirrusfb_pci_driver = {
- .name = "cirrusfb",
- .id_table = cirrusfb_pci_table,
- .probe = cirrusfb_pci_register,
- .remove = __devexit_p(cirrusfb_pci_unregister),
-#ifdef CONFIG_PM
-#if 0
- .suspend = cirrusfb_pci_suspend,
- .resume = cirrusfb_pci_resume,
-#endif
-#endif
-};
-#endif /* CONFIG_PCI */
+static void __devexit cirrusfb_zorro_unmap (struct cirrusfb_info *cinfo)
+{
+ release_mem_region(cinfo->board_addr, cinfo->board_size);
-#ifdef CONFIG_ZORRO
-static int cirrusfb_zorro_register(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+ if (cinfo->btype == BT_PICASSO4) {
+ cinfo->regbase -= 0x600000;
+ iounmap ((void *)cinfo->regbase);
+ iounmap ((void *)cinfo->fbmem);
+ } else {
+ if (cinfo->board_addr > 0x01000000)
+ iounmap ((void *)cinfo->fbmem);
+ }
+ framebuffer_release(cinfo->info);
+}
+
+
+static struct cirrusfb_info *cirrusfb_zorro_setup(void)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
cirrusfb_board_t btype;
- struct zorro_dev *z2 = NULL;
+ struct zorro_dev *z = NULL, *z2 = NULL;
unsigned long board_addr, board_size, size;
int ret;
- btype = ent->driver_data;
- if (cirrusfb_zorro_table2[btype].id2)
- z2 = zorro_find_device(cirrusfb_zorro_table2[btype].id2, NULL);
- size = cirrusfb_zorro_table2[btype].size;
- printk(KERN_INFO "cirrusfb: %s board detected; ",
- cirrusfb_board_info[btype].name);
+ ret = cirrusfb_zorro_find (&z, &z2, &btype, &size);
+ if (ret < 0)
+ goto err_out;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
assert (z2 >= 0);
assert (btype != BT_NONE);
- cinfo->zdev = z;
- board_addr = zorro_resource_start(z);
- board_size = zorro_resource_len(z);
+ cinfo->board_addr = board_addr = z->resource.start;
+ cinfo->board_size = board_size = z->resource.end-z->resource.start+1;
cinfo->size = size;
- if (!zorro_request_device(z, "cirrusfb")) {
+ if (!request_mem_region(board_addr, board_size, "cirrusfb")) {
printk(KERN_ERR "cirrusfb: cannot reserve region 0x%lx, abort\n",
board_addr);
ret = -EBUSY;
cinfo->fbregs_phys = board_addr + 0x600000;
cinfo->fbmem_phys = board_addr + 16777216;
- cinfo->fbmem = ioremap (cinfo->fbmem_phys, 16777216);
+ cinfo->fbmem = ioremap (info->fbmem_phys, 16777216);
if (!cinfo->fbmem)
goto err_unmap_regbase;
} else {
DPRINTK ("cirrusfb: Virtual address for board set to: $%p\n", cinfo->regbase);
}
- cinfo->unmap = cirrusfb_zorro_unmap;
printk (KERN_INFO "Cirrus Logic chipset on Zorro bus\n");
- zorro_set_drvdata(z, info);
- return cirrusfb_register(cinfo);
+ return 0;
err_unmap_regbase:
/* Parental advisory: explicit hack */
err_release_fb:
framebuffer_release(info);
err_out:
- return ret;
+ return ERR_PTR(ret);
}
+#endif /* CONFIG_ZORRO */
-void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
+static int cirrusfb_set_fbinfo(struct cirrusfb_info *cinfo)
{
- struct fb_info *info = zorro_get_drvdata(z);
+ struct fb_info *info = cinfo->info;
+ struct fb_var_screeninfo *var = &info->var;
+
+ info->currcon = -1;
+ info->par = cinfo;
+ info->pseudo_palette = cinfo->pseudo_palette;
+ info->flags = FBINFO_DEFAULT
+ | FBINFO_HWACCEL_XPAN
+ | FBINFO_HWACCEL_YPAN
+ | FBINFO_HWACCEL_FILLRECT
+ | FBINFO_HWACCEL_COPYAREA;
+ if (noaccel)
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ info->fbops = &cirrusfb_ops;
+ info->screen_base = cinfo->fbmem;
+ if (cinfo->btype == BT_GD5480) {
+ if (var->bits_per_pixel == 16)
+ info->screen_base += 1 * MB_;
+ if (var->bits_per_pixel == 24 || var->bits_per_pixel == 32)
+ info->screen_base += 2 * MB_;
+ }
+
+ /* Fill fix common fields */
+ strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ sizeof(info->fix.id));
+
+ /* monochrome: only 1 memory plane */
+ /* 8 bit and above: Use whole memory area */
+ info->fix.smem_start = cinfo->fbmem_phys;
+ info->fix.smem_len = (var->bits_per_pixel == 1) ? cinfo->size / 4 : cinfo->size;
+ info->fix.type = cinfo->currentmode.type;
+ info->fix.type_aux = 0;
+ info->fix.visual = cinfo->currentmode.visual;
+ info->fix.xpanstep = 1;
+ info->fix.ypanstep = 1;
+ info->fix.ywrapstep = 0;
+ info->fix.line_length = cinfo->currentmode.line_length;
+
+ /* FIXME: map region at 0xB8000 if available, fill in here */
+ info->fix.mmio_start = cinfo->fbregs_phys;
+ info->fix.mmio_len = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+
+ fb_alloc_cmap(&info->cmap, 256, 0);
+
+ return 0;
+}
+
+#if defined(CONFIG_PCI)
+#define cirrusfb_unmap cirrusfb_pci_unmap
+#define cirrusfb_bus_setup cirrusfb_pci_setup
+#elif defined(CONFIG_ZORRO)
+#define cirrusfb_unmap cirrusfb_zorro_unmap
+#define cirrusfb_bus_setup cirrusfb_zorro_setup
+#endif
+
+
+static int cirrusfb_pci_register (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct fb_info *info;
+ struct cirrusfb_info *cinfo = NULL;
+ int err;
+ cirrusfb_board_t btype;
+
+ DPRINTK ("ENTER\n");
+
+ printk (KERN_INFO "cirrusfb: Driver for Cirrus Logic based graphic boards, v" CIRRUSFB_VERSION "\n");
+
+ cinfo = cirrusfb_bus_setup(pdev, ent);
+
+ if (IS_ERR(cinfo)) {
+ err = PTR_ERR(cinfo);
+ goto err_out;
+ }
+
+ info = cinfo->info;
+ btype = cinfo->btype;
+
+ /* sanity checks */
+ assert (btype != BT_NONE);
+ assert (btype == cirrusfb_board_info[btype].btype);
+
+ DPRINTK ("cirrusfb: (RAM start set to: 0x%p)\n", cinfo->fbmem);
+
+ /* Make pretend we've set the var so our structures are in a "good" */
+ /* state, even though we haven't written the mode to the hw yet... */
+ info->var = cirrusfb_predefined[cirrusfb_def_mode].var;
+ info->var.activate = FB_ACTIVATE_NOW;
+
+ err = cirrusfb_decode_var(&info->var, &cinfo->currentmode, info);
+ if (err < 0) {
+ /* should never happen */
+ DPRINTK("choking on default var... umm, no good.\n");
+ goto err_unmap_cirrusfb;
+ }
+
+ /* set all the vital stuff */
+ cirrusfb_set_fbinfo(cinfo);
+
+ pci_set_drvdata(pdev, info);
+
+ err = register_framebuffer(info);
+ if (err < 0) {
+ printk (KERN_ERR "cirrusfb: could not register fb device; err = %d!\n", err);
+ goto err_dealloc_cmap;
+ }
+
+ DPRINTK ("EXIT, returning 0\n");
+ return 0;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&info->cmap);
+err_unmap_cirrusfb:
+ cirrusfb_unmap(cinfo);
+err_out:
+ return err;
+}
+
+
+static void __devexit cirrusfb_cleanup (struct fb_info *info)
+{
+ struct cirrusfb_info *cinfo = info->par;
+ DPRINTK ("ENTER\n");
+
+#ifdef CONFIG_ZORRO
+ switch_monitor (cinfo, 0);
+#endif
+
+ unregister_framebuffer (info);
+ fb_dealloc_cmap (&info->cmap);
+ printk ("Framebuffer unregistered\n");
+ cirrusfb_unmap (cinfo);
+
+ DPRINTK ("EXIT\n");
+}
+
+
+void __devexit cirrusfb_pci_unregister (struct pci_dev *pdev)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
DPRINTK ("ENTER\n");
cirrusfb_cleanup (info);
DPRINTK ("EXIT\n");
}
-static struct zorro_driver cirrusfb_zorro_driver = {
- .name = "cirrusfb",
- .id_table = cirrusfb_zorro_table,
- .probe = cirrusfb_zorro_register,
- .remove = __devexit_p(cirrusfb_zorro_unregister),
+static struct pci_driver cirrusfb_driver = {
+ .name = "cirrusfb",
+ .id_table = cirrusfb_pci_table,
+ .probe = cirrusfb_pci_register,
+ .remove = __devexit_p(cirrusfb_pci_unregister),
+#ifdef CONFIG_PM
+#if 0
+ .suspend = cirrusfb_pci_suspend,
+ .resume = cirrusfb_pci_resume,
+#endif
+#endif
};
-#endif /* CONFIG_ZORRO */
int __init cirrusfb_init(void)
{
- int error = 0;
-
#ifdef CONFIG_ZORRO
- error |= zorro_module_init(&cirrusfb_zorro_driver);
-#endif
-#ifdef CONFIG_PCI
- error |= pci_module_init(&cirrusfb_pci_driver);
+ return cirrusfb_pci_register(NULL, NULL);
+#else
+ return pci_module_init(&cirrusfb_driver);
#endif
- return error;
}
void __exit cirrusfb_exit (void)
{
-#ifdef CONFIG_PCI
- pci_unregister_driver(&cirrusfb_pci_driver);
-#endif
-#ifdef CONFIG_ZORRO
- zorro_unregister_driver(&cirrusfb_zorro_driver);
-#endif
+ pci_unregister_driver (&cirrusfb_driver);
}
#ifdef MODULE
static void cirrusfb_BitBLT (caddr_t regbase, int bits_per_pixel,
u_short curx, u_short cury, u_short destx, u_short desty,
- u_short width, u_short height, u_short line_length)
+ u_short width, u_short height, u_short line_length)
{
u_short nwidth, nheight;
u_long nsrc, ndest;
.con_bmove = DUMMY,
.con_switch = DUMMY,
.con_blank = DUMMY,
- .con_font_set = DUMMY,
- .con_font_get = DUMMY,
- .con_font_default = DUMMY,
- .con_font_copy = DUMMY,
+ .con_font_op = DUMMY,
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
};
int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
+static int fbcon_font_op(struct vc_data *vc, struct console_font_op *op);
static int fbcon_set_palette(struct vc_data *vc, unsigned char *table);
static int fbcon_scrolldelta(struct vc_data *vc, int lines);
void accel_clear_margins(struct vc_data *vc, struct fb_info *info,
p->userfont = 0;
}
-static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
+static inline int fbcon_get_font(struct vc_data *vc, struct console_font_op *op)
{
u8 *fontdata = vc->vc_font.data;
- u8 *data = font->data;
+ u8 *data = op->data;
int i, j;
- font->width = vc->vc_font.width;
- font->height = vc->vc_font.height;
- font->charcount = vc->vc_hi_font_mask ? 512 : 256;
- if (!font->data)
+ op->width = vc->vc_font.width;
+ op->height = vc->vc_font.height;
+ op->charcount = vc->vc_hi_font_mask ? 512 : 256;
+ if (!op->data)
return 0;
- if (font->width <= 8) {
+ if (op->width <= 8) {
j = vc->vc_font.height;
- for (i = 0; i < font->charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 32 - j);
data += 32;
fontdata += j;
}
- } else if (font->width <= 16) {
+ } else if (op->width <= 16) {
j = vc->vc_font.height * 2;
- for (i = 0; i < font->charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 64 - j);
data += 64;
fontdata += j;
}
- } else if (font->width <= 24) {
- for (i = 0; i < font->charcount; i++) {
+ } else if (op->width <= 24) {
+ for (i = 0; i < op->charcount; i++) {
for (j = 0; j < vc->vc_font.height; j++) {
*data++ = fontdata[0];
*data++ = fontdata[1];
}
} else {
j = vc->vc_font.height * 4;
- for (i = 0; i < font->charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 128 - j);
data += 128;
return 0;
}
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+static int fbcon_do_set_font(struct vc_data *vc, struct console_font_op *op,
u8 * data, int userfont)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int resize;
+ int w = op->width;
+ int h = op->height;
int cnt;
char *old_data = NULL;
+ if (!w > 32) {
+ if (userfont && op->op != KD_FONT_OP_COPY)
+ kfree(data - FONT_EXTRA_WORDS * sizeof(int));
+ return -ENXIO;
+ }
+
if (CON_IS_VISIBLE(vc) && softback_lines)
fbcon_set_origin(vc);
return 0;
}
-static int fbcon_copy_font(struct vc_data *vc, int con)
+static inline int fbcon_copy_font(struct vc_data *vc, struct console_font_op *op)
{
- struct display *od = &fb_display[con];
- struct console_font *f = &vc->vc_font;
-
- if (od->fontdata == f->data)
+ struct display *od;
+ int h = op->height;
+
+ if (h < 0 || !vc_cons_allocated(h))
+ return -ENOTTY;
+ if (h == vc->vc_num)
+ return 0; /* nothing to do */
+ od = &fb_display[h];
+ if (od->fontdata == vc->vc_font.data)
return 0; /* already the same font... */
- return fbcon_do_set_font(vc, f->width, f->height, od->fontdata, od->userfont);
+ op->width = vc->vc_font.width;
+ op->height = vc->vc_font.height;
+ return fbcon_do_set_font(vc, op, od->fontdata, od->userfont);
}
-/*
- * User asked to set font; we are guaranteed that
- * a) width and height are in range 1..32
- * b) charcount does not exceed 512
- */
-
-static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned flags)
+static inline int fbcon_set_font(struct vc_data *vc, struct console_font_op *op)
{
- unsigned charcount = font->charcount;
- int w = font->width;
- int h = font->height;
+ int w = op->width;
+ int h = op->height;
int size = h;
int i, k;
- u8 *new_data, *data = font->data, *p;
+ u8 *new_data, *data = op->data, *p;
- if (charcount != 256 && charcount != 512)
+ if ((w <= 0) || (w > 32)
+ || (op->charcount != 256 && op->charcount != 512))
return -EINVAL;
if (w > 8) {
else
size *= 4;
}
- size *= charcount;
-
- new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
+ size *= op->charcount;
- if (!new_data)
+ if (!
+ (new_data =
+ kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER)))
return -ENOMEM;
-
new_data += FONT_EXTRA_WORDS * sizeof(int);
FNTSIZE(new_data) = size;
- FNTCHARCNT(new_data) = charcount;
+ FNTCHARCNT(new_data) = op->charcount;
REFCOUNT(new_data) = 0; /* usage counter */
p = new_data;
if (w <= 8) {
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(p, data, h);
data += 32;
p += h;
}
} else if (w <= 16) {
h *= 2;
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(p, data, h);
data += 64;
p += h;
}
} else if (w <= 24) {
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
int j;
for (j = 0; j < h; j++) {
memcpy(p, data, 3);
}
} else {
h *= 4;
- for (i = 0; i < charcount; i++) {
+ for (i = 0; i < op->charcount; i++) {
memcpy(p, data, h);
data += 128;
p += h;
break;
}
}
- return fbcon_do_set_font(vc, font->width, font->height, new_data, 1);
+ return fbcon_do_set_font(vc, op, new_data, 1);
}
-static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
+static inline int fbcon_set_def_font(struct vc_data *vc, struct console_font_op *op)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ char name[MAX_FONT_NAME];
struct font_desc *f;
- if (!name)
+ if (!op->data)
f = get_default_font(info->var.xres, info->var.yres);
- else if (!(f = find_font(name)))
- return -ENOENT;
+ else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
+ return -EFAULT;
+ else {
+ name[MAX_FONT_NAME - 1] = 0;
+ if (!(f = find_font(name)))
+ return -ENOENT;
+ }
+ op->width = f->width;
+ op->height = f->height;
+ return fbcon_do_set_font(vc, op, f->data, 0);
+}
- font->width = f->width;
- font->height = f->height;
- return fbcon_do_set_font(vc, f->width, f->height, f->data, 0);
+static int fbcon_font_op(struct vc_data *vc, struct console_font_op *op)
+{
+ switch (op->op) {
+ case KD_FONT_OP_SET:
+ return fbcon_set_font(vc, op);
+ case KD_FONT_OP_GET:
+ return fbcon_get_font(vc, op);
+ case KD_FONT_OP_SET_DEFAULT:
+ return fbcon_set_def_font(vc, op);
+ case KD_FONT_OP_COPY:
+ return fbcon_copy_font(vc, op);
+ default:
+ return -ENOSYS;
+ }
}
static u16 palette_red[16];
else
palette_cmap.len = 16;
palette_cmap.start = 0;
- return fb_set_cmap(&palette_cmap, info);
+ return fb_set_cmap(&palette_cmap, 1, info);
}
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
.con_bmove = fbcon_bmove,
.con_switch = fbcon_switch,
.con_blank = fbcon_blank,
- .con_font_set = fbcon_set_font,
- .con_font_get = fbcon_get_font,
- .con_font_default = fbcon_set_def_font,
- .con_font_copy = fbcon_copy_font,
+ .con_font_op = fbcon_font_op,
.con_set_palette = fbcon_set_palette,
.con_scrolldelta = fbcon_scrolldelta,
.con_set_origin = fbcon_set_origin,
}
}
+static int mdacon_font_op(struct vc_data *c, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
+
static int mdacon_scrolldelta(struct vc_data *c, int lines)
{
return 0;
.con_bmove = mdacon_bmove,
.con_switch = mdacon_switch,
.con_blank = mdacon_blank,
+ .con_font_op = mdacon_font_op,
.con_set_palette = mdacon_set_palette,
.con_scrolldelta = mdacon_scrolldelta,
.con_build_attr = mdacon_build_attr,
static int newport_xsize;
static int newport_ysize;
-static int newport_set_def_font(int unit, struct console_font *op);
+static int newport_set_def_font(int unit, struct console_font_op *op);
#define BMASK(c) (c << 24)
return 1;
}
-static int newport_set_font(int unit, struct console_font *op)
+static int newport_set_font(int unit, struct console_font_op *op)
{
int w = op->width;
int h = op->height;
return 0;
}
-static int newport_set_def_font(int unit, struct console_font *op)
+static int newport_set_def_font(int unit, struct console_font_op *op)
{
if (font_data[unit] != FONT_DATA) {
if (--REFCOUNT(font_data[unit]) == 0)
return 0;
}
-static int newport_font_default(struct vc_data *vc, struct console_font *op, char *name)
+static int newport_font_op(struct vc_data *vc, struct console_font_op *op)
{
- return newport_set_def_font(vc->vc_num, op);
-}
-
-static int newport_font_set(struct vc_data *vc, struct console_font *font, unsigned flags)
-{
- return newport_set_font(vc->vc_num, font);
+ int unit = vc->vc_num;
+
+ switch (op->op) {
+ case KD_FONT_OP_SET:
+ return newport_set_font(unit, op);
+ case KD_FONT_OP_SET_DEFAULT:
+ return newport_set_def_font(unit, op);
+ default:
+ return -ENOSYS;
+ }
}
static int newport_set_palette(struct vc_data *vc, unsigned char *table)
.con_bmove = newport_bmove,
.con_switch = newport_switch,
.con_blank = newport_blank,
- .con_font_set = newport_font_set,
- .con_font_default = newport_font_default,
+ .con_font_op = newport_font_op,
.con_set_palette = newport_set_palette,
.con_scrolldelta = newport_scrolldelta,
.con_set_origin = DUMMY,
}
}
+static int
+promcon_font_op(struct vc_data *conp, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
+
static int
promcon_blank(struct vc_data *conp, int blank, int mode_switch)
{
.con_bmove = promcon_bmove,
.con_switch = promcon_switch,
.con_blank = promcon_blank,
+ .con_font_op = promcon_font_op,
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
#if !(PROMCON_COLOR)
return -EINVAL;
}
+static int sticon_font_op(struct vc_data *c, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
+
static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
{
int unit = conp->vc_num;
.con_bmove = sticon_bmove,
.con_switch = sticon_switch,
.con_blank = sticon_blank,
+ .con_font_op = sticon_font_op,
.con_set_palette = sticon_set_palette,
.con_scrolldelta = sticon_scrolldelta,
.con_set_origin = sticon_set_origin,
static void vgacon_cursor(struct vc_data *c, int mode);
static int vgacon_switch(struct vc_data *c);
static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
+static int vgacon_font_op(struct vc_data *c, struct console_font_op *op);
static int vgacon_set_palette(struct vc_data *vc, unsigned char *table);
static int vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
return 0;
}
-static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned flags)
+static int vgacon_font_op(struct vc_data *c, struct console_font_op *op)
{
- unsigned charcount = font->charcount;
int rc;
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
- if (font->width != 8 || (charcount != 256 && charcount != 512))
- return -EINVAL;
-
- rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512);
- if (rc)
- return rc;
-
- if (!(flags & KD_FONT_FLAG_DONT_RECALC))
- rc = vgacon_adjust_height(c, font->height);
+ if (op->op == KD_FONT_OP_SET) {
+ if (op->width != 8
+ || (op->charcount != 256 && op->charcount != 512))
+ return -EINVAL;
+ rc = vgacon_do_font_op(&state, op->data, 1, op->charcount == 512);
+ if (!rc && !(op->flags & KD_FONT_FLAG_DONT_RECALC))
+ rc = vgacon_adjust_height(c, op->height);
+ } else if (op->op == KD_FONT_OP_GET) {
+ op->width = 8;
+ op->height = c->vc_font.height;
+ op->charcount = vga_512_chars ? 512 : 256;
+ if (!op->data)
+ return 0;
+ rc = vgacon_do_font_op(&state, op->data, 0, 0);
+ } else
+ rc = -ENOSYS;
return rc;
}
-static int vgacon_font_get(struct vc_data *c, struct console_font *font)
-{
- if (vga_video_type < VIDEO_TYPE_EGAM)
- return -EINVAL;
-
- font->width = 8;
- font->height = c->vc_font.height;
- font->charcount = vga_512_chars ? 512 : 256;
- if (!font->data)
- return 0;
- return vgacon_do_font_op(&state, font->data, 0, 0);
-}
-
#else
-#define vgacon_font_set NULL
-#define vgacon_font_get NULL
+static int vgacon_font_op(struct vc_data *c, struct console_font_op *op)
+{
+ return -ENOSYS;
+}
#endif
.con_bmove = DUMMY,
.con_switch = vgacon_switch,
.con_blank = vgacon_blank,
- .con_font_set = vgacon_font_set,
- .con_font_get = vgacon_font_get,
+ .con_font_op = vgacon_font_op,
.con_set_palette = vgacon_set_palette,
.con_scrolldelta = vgacon_scrolldelta,
.con_set_origin = vgacon_set_origin,
};
struct fb_var_screeninfo dnfb_var __devinitdata = {
- .xres = 1280,
- .yres = 1024,
- .xres_virtual = 2048,
- .yres_virtual = 1024,
- .bits_per_pixel = 1,
- .height = -1,
- .width = -1,
- .vmode = FB_VMODE_NONINTERLACED,
+ .xres 1280,
+ .yres 1024,
+ .xres_virtual 2048,
+ .yres_virtual 1024,
+ .bits_per_pixel 1,
+ .height -1,
+ .width -1,
+ .vmode FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo dnfb_fix __devinitdata = {
- .id = "Apollo Mono",
- .smem_start = (FRAME_BUFFER_START + IO_BASE),
- .smem_len = FRAME_BUFFER_LEN,
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_MONO10,
- .line_length = 256,
+ .id "Apollo Mono",
+ .smem_start (FRAME_BUFFER_START + IO_BASE),
+ .smem_len FRAME_BUFFER_LEN,
+ .type FB_TYPE_PACKED_PIXELS,
+ .visual FB_VISUAL_MONO10,
+ .line_length 256,
};
static int dnfb_blank(int blank, struct fb_info *info)
}
cmap->start = 0;
cmap->len = len;
- fb_copy_cmap(fb_default_cmap(len), cmap);
+ fb_copy_cmap(fb_default_cmap(len), cmap, 0);
return 0;
fail:
* fb_copy_cmap - copy a colormap
* @from: frame buffer colormap structure
* @to: frame buffer colormap structure
+ * @fsfromto: determine copy method
*
* Copy contents of colormap from @from to @to.
+ *
+ * @fsfromto accepts the following integer parameters:
+ * 0: memcpy function
+ * 1: copy_from_user() function to copy from userspace
+ * 2: copy_to_user() function to copy to userspace
+ *
*/
-int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to)
+int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to, int fsfromto)
{
- int tooff = 0, fromoff = 0;
- int size;
-
- if (to->start > from->start)
- fromoff = to->start - from->start;
- else
- tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
- return -EINVAL;
- size *= sizeof(u16);
-
+ int tooff = 0, fromoff = 0;
+ int size;
+
+ if (to->start > from->start)
+ fromoff = to->start-from->start;
+ else
+ tooff = from->start-to->start;
+ size = to->len-tooff;
+ if (size > (int) (from->len - fromoff))
+ size = from->len-fromoff;
+ if (size <= 0)
+ return -EINVAL;
+ size *= sizeof(u16);
+
+ switch (fsfromto) {
+ case 0:
memcpy(to->red+tooff, from->red+fromoff, size);
memcpy(to->green+tooff, from->green+fromoff, size);
memcpy(to->blue+tooff, from->blue+fromoff, size);
if (from->transp && to->transp)
- memcpy(to->transp+tooff, from->transp+fromoff, size);
- return 0;
-}
-
-int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to)
-{
- int tooff = 0, fromoff = 0;
- int size;
-
- if (to->start > from->start)
- fromoff = to->start - from->start;
- else
- tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
- return -EINVAL;
- size *= sizeof(u16);
-
+ memcpy(to->transp+tooff, from->transp+fromoff, size);
+ break;
+ case 1:
+ if (copy_from_user(to->red+tooff, from->red+fromoff, size))
+ return -EFAULT;
+ if (copy_from_user(to->green+tooff, from->green+fromoff, size))
+ return -EFAULT;
+ if (copy_from_user(to->blue+tooff, from->blue+fromoff, size))
+ return -EFAULT;
+ if (from->transp && to->transp)
+ if (copy_from_user(to->transp+tooff, from->transp+fromoff, size))
+ return -EFAULT;
+ break;
+ case 2:
if (copy_to_user(to->red+tooff, from->red+fromoff, size))
return -EFAULT;
if (copy_to_user(to->green+tooff, from->green+fromoff, size))
if (from->transp && to->transp)
if (copy_to_user(to->transp+tooff, from->transp+fromoff, size))
return -EFAULT;
- return 0;
+ break;
+ }
+ return 0;
}
/**
* fb_set_cmap - set the colormap
* @cmap: frame buffer colormap structure
+ * @kspc: boolean, 1 copy local, 0 get_user() function
* @info: frame buffer info structure
*
* Sets the colormap @cmap for a screen of device @info.
*
*/
-int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+int fb_set_cmap(struct fb_cmap *cmap, int kspc, struct fb_info *info)
{
- int i, start;
- u16 *red, *green, *blue, *transp;
- u_int hred, hgreen, hblue, htransp = 0xffff;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- if (start < 0 || !info->fbops->fb_setcolreg)
- return -EINVAL;
- for (i = 0; i < cmap->len; i++) {
- hred = *red++;
- hgreen = *green++;
- hblue = *blue++;
- if (transp)
- htransp = *transp++;
- if (info->fbops->fb_setcolreg(start++,
- hred, hgreen, hblue, htransp,
- info))
- break;
+ int i, start;
+ u16 *red, *green, *blue, *transp;
+ u_int hred, hgreen, hblue, htransp;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ if (start < 0 || !info->fbops->fb_setcolreg)
+ return -EINVAL;
+ for (i = 0; i < cmap->len; i++) {
+ if (kspc) {
+ hred = *red;
+ hgreen = *green;
+ hblue = *blue;
+ htransp = transp ? *transp : 0xffff;
+ } else {
+ get_user(hred, red);
+ get_user(hgreen, green);
+ get_user(hblue, blue);
+ if (transp)
+ get_user(htransp, transp);
+ else
+ htransp = 0xffff;
}
- return 0;
+ red++;
+ green++;
+ blue++;
+ if (transp)
+ transp++;
+ if (info->fbops->fb_setcolreg(start++, hred, hgreen, hblue, htransp, info))
+ return 0;
+ }
+ return 0;
}
-int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
-{
- int i, start;
- u16 __user *red, *green, *blue, *transp;
- u_int hred, hgreen, hblue, htransp = 0xffff;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- if (start < 0 || !info->fbops->fb_setcolreg)
- return -EINVAL;
- for (i = 0; i < cmap->len; i++, red++, blue++, green++) {
- if (get_user(hred, red) ||
- get_user(hgreen, green) ||
- get_user(hblue, blue) ||
- (transp && get_user(htransp, transp)))
- return -EFAULT;
- if (info->fbops->fb_setcolreg(start++,
- hred, hgreen, hblue, htransp,
- info))
- return 0;
- if (transp)
- transp++;
- }
- return 0;
-}
/**
* fb_default_cmap - get default colormap
return n < 0 ? d >> -n : d << n;
}
-static void fb_set_logocmap(struct fb_info *info,
+static void __init fb_set_logocmap(struct fb_info *info,
const struct linux_logo *logo)
{
struct fb_cmap palette_cmap;
palette_cmap.blue[j] = clut[2] << 8 | clut[2];
clut += 3;
}
- fb_set_cmap(&palette_cmap, info);
+ fb_set_cmap(&palette_cmap, 1, info);
}
}
-static void fb_set_logo_truepalette(struct fb_info *info,
+static void __init fb_set_logo_truepalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
}
}
-static void fb_set_logo_directpalette(struct fb_info *info,
+static void __init fb_set_logo_directpalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
palette[i] = i << redshift | i << greenshift | i << blueshift;
}
-static void fb_set_logo(struct fb_info *info,
+static void __init fb_set_logo(struct fb_info *info,
const struct linux_logo *logo, u8 *dst,
int depth)
{
}
int
-fb_cursor(struct fb_info *info, struct fb_cursor_user __user *sprite)
+fb_cursor(struct fb_info *info, struct fb_cursor *sprite)
{
- struct fb_cursor_user cursor_user;
struct fb_cursor cursor;
- char *data = NULL, *mask = NULL;
- u16 *red = NULL, *green = NULL, *blue = NULL, *transp = NULL;
- int err = -EINVAL;
+ int err;
- if (copy_from_user(&cursor_user, sprite, sizeof(struct fb_cursor_user)))
+ if (copy_from_user(&cursor, sprite, sizeof(struct fb_cursor)))
return -EFAULT;
- memcpy(&cursor, &cursor_user, sizeof(cursor));
- cursor.mask = NULL;
- cursor.image.data = NULL;
- cursor.image.cmap.red = NULL;
- cursor.image.cmap.green = NULL;
- cursor.image.cmap.blue = NULL;
- cursor.image.cmap.transp = NULL;
-
if (cursor.set & FB_CUR_SETCUR)
info->cursor.enable = 1;
if (cursor.set & FB_CUR_SETCMAP) {
- unsigned len = cursor.image.cmap.len;
- if ((int)len <= 0)
- goto out;
- len *= 2;
- err = -ENOMEM;
- red = kmalloc(len, GFP_USER);
- green = kmalloc(len, GFP_USER);
- blue = kmalloc(len, GFP_USER);
- if (!red || !green || !blue)
- goto out;
- if (cursor_user.image.cmap.transp) {
- transp = kmalloc(len, GFP_USER);
- if (!transp)
- goto out;
- }
- err = -EFAULT;
- if (copy_from_user(red, cursor_user.image.cmap.red, len))
- goto out;
- if (copy_from_user(green, cursor_user.image.cmap.green, len))
- goto out;
- if (copy_from_user(blue, cursor_user.image.cmap.blue, len))
- goto out;
- if (transp) {
- if (copy_from_user(transp,
- cursor_user.image.cmap.transp, len))
- goto out;
- }
- cursor.image.cmap.red = red;
- cursor.image.cmap.green = green;
- cursor.image.cmap.blue = blue;
- cursor.image.cmap.transp = transp;
+ err = fb_copy_cmap(&cursor.image.cmap, &sprite->image.cmap, 1);
+ if (err)
+ return err;
}
if (cursor.set & FB_CUR_SETSHAPE) {
int size = ((cursor.image.width + 7) >> 3) * cursor.image.height;
+ char *data, *mask;
if ((cursor.image.height != info->cursor.image.height) ||
(cursor.image.width != info->cursor.image.width))
cursor.set |= FB_CUR_SETSIZE;
- err = -ENOMEM;
- data = kmalloc(size, GFP_USER);
- mask = kmalloc(size, GFP_USER);
- if (!mask || !data)
- goto out;
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- err = -EFAULT;
- if (copy_from_user(data, cursor_user.image.data, size) ||
- copy_from_user(mask, cursor_user.mask, size))
- goto out;
+ mask = kmalloc(size, GFP_KERNEL);
+ if (!mask) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ if (copy_from_user(data, sprite->image.data, size) ||
+ copy_from_user(mask, sprite->mask, size)) {
+ kfree(data);
+ kfree(mask);
+ return -EFAULT;
+ }
cursor.image.data = data;
cursor.mask = mask;
}
info->cursor.set = cursor.set;
info->cursor.rop = cursor.rop;
err = info->fbops->fb_cursor(info, &cursor);
-out:
- kfree(data);
- kfree(mask);
- kfree(red);
- kfree(green);
- kfree(blue);
- kfree(transp);
return err;
}
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
if (info->flags & FBINFO_MISC_MODECHANGEUSER) {
info->flags &= ~FBINFO_MISC_MODECHANGEUSER;
cmap.len = info->cmap.len;
} else
cmap = info->cmap;
- return fb_set_cmap(&cmap, info);
+ return fb_set_cmap(&cmap, 1, info);
}
static int
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
struct fb_con2fbmap con2fb;
#endif
- struct fb_cmap_user cmap;
- void __user *argp = (void __user *)arg;
+ struct fb_cmap cmap;
int i;
if (!fb)
return -ENODEV;
switch (cmd) {
case FBIOGET_VSCREENINFO:
- return copy_to_user(argp, &info->var,
+ return copy_to_user((void *) arg, &info->var,
sizeof(var)) ? -EFAULT : 0;
case FBIOPUT_VSCREENINFO:
- if (copy_from_user(&var, argp, sizeof(var)))
+ if (copy_from_user(&var, (void *) arg, sizeof(var)))
return -EFAULT;
acquire_console_sem();
info->flags |= FBINFO_MISC_MODECHANGEUSER;
info->flags &= ~FBINFO_MISC_MODECHANGEUSER;
release_console_sem();
if (i) return i;
- if (copy_to_user(argp, &var, sizeof(var)))
+ if (copy_to_user((void *) arg, &var, sizeof(var)))
return -EFAULT;
return 0;
case FBIOGET_FSCREENINFO:
- return copy_to_user(argp, &info->fix,
+ return copy_to_user((void *) arg, &info->fix,
sizeof(fix)) ? -EFAULT : 0;
case FBIOPUTCMAP:
- if (copy_from_user(&cmap, argp, sizeof(cmap)))
+ if (copy_from_user(&cmap, (void *) arg, sizeof(cmap)))
return -EFAULT;
- return (fb_set_user_cmap(&cmap, info));
+ return (fb_set_cmap(&cmap, 0, info));
case FBIOGETCMAP:
- if (copy_from_user(&cmap, argp, sizeof(cmap)))
+ if (copy_from_user(&cmap, (void *) arg, sizeof(cmap)))
return -EFAULT;
- return fb_cmap_to_user(&info->cmap, &cmap);
+ return (fb_copy_cmap(&info->cmap, &cmap, 2));
case FBIOPAN_DISPLAY:
- if (copy_from_user(&var, argp, sizeof(var)))
+ if (copy_from_user(&var, (void *) arg, sizeof(var)))
return -EFAULT;
acquire_console_sem();
i = fb_pan_display(info, &var);
release_console_sem();
if (i)
return i;
- if (copy_to_user(argp, &var, sizeof(var)))
+ if (copy_to_user((void *) arg, &var, sizeof(var)))
return -EFAULT;
return 0;
case FBIO_CURSOR:
acquire_console_sem();
- i = fb_cursor(info, argp);
+ i = fb_cursor(info, (struct fb_cursor *) arg);
release_console_sem();
return i;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
case FBIOGET_CON2FBMAP:
- if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
+ if (copy_from_user(&con2fb, (void *)arg, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
con2fb.framebuffer = con2fb_map[con2fb.console-1];
- return copy_to_user(argp, &con2fb,
+ return copy_to_user((void *)arg, &con2fb,
sizeof(con2fb)) ? -EFAULT : 0;
case FBIOPUT_CON2FBMAP:
- if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
+ if (copy_from_user(&con2fb, (void *)arg, sizeof(con2fb)))
return - EFAULT;
if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
#ifdef MEMCPYTOIO_WORKS
memcpy_toio(va.vaddr + offs, src, len);
#elif defined(MEMCPYTOIO_WRITEL)
+#define srcd ((const u_int32_t*)src)
if (offs & 3) {
while (len >= 4) {
- mga_writel(va, offs, get_unaligned((u32 *)src));
+ mga_writel(va, offs, get_unaligned(srcd++));
offs += 4;
len -= 4;
- src += 4;
}
} else {
while (len >= 4) {
- mga_writel(va, offs, *(u32 *)src);
+ mga_writel(va, offs, *srcd++);
offs += 4;
len -= 4;
- src += 4;
}
}
+#undef srcd
if (len) {
u_int32_t tmp;
break;
}
} else
- fb_set_cmap(&info->cmap, info);
+ fb_set_cmap(&info->cmap, 1, info);
return 0;
}
//TODO if (pxafb_blank_helper) pxafb_blank_helper(blank);
if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
- fb_set_cmap(&fbi->fb.cmap, info);
+ fb_set_cmap(&fbi->fb.cmap, 1, info);
pxafb_schedule_work(fbi, C_ENABLE);
}
return 0;
dp = pci_device_to_OF_node(rinfo->pdev);
- xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", NULL);
+ xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", 0);
rinfo->pll.ref_clk = *xtal / 10;
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
-#ifdef CONFIG_PMAC_BACKLIGHT
-#include <asm/backlight.h>
-#endif
#include "rivafb.h"
#include "nvreg.h"
* various helpful macros and constants
*
* ------------------------------------------------------------------------- */
-#ifdef CONFIG_FB_RIVA_DEBUG
-#define NVTRACE printk
+
+#undef RIVAFBDEBUG
+#ifdef RIVAFBDEBUG
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
#else
-#define NVTRACE if(0) printk
+#define DPRINTK(fmt, args...)
#endif
-#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __FUNCTION__)
-#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __FUNCTION__)
-
-#ifdef CONFIG_FB_RIVA_DEBUG
+#ifndef RIVA_NDEBUG
#define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
{ "GeForce2-GTS", NV_ARCH_10 },
{ "GeForce2-ULTRA", NV_ARCH_10 },
{ "Quadro2-PRO", NV_ARCH_10 },
- { "GeForce4-MX-460", NV_ARCH_10 },
- { "GeForce4-MX-440", NV_ARCH_10 },
- { "GeForce4-MX-420", NV_ARCH_10 },
- { "GeForce4-440-GO", NV_ARCH_10 },
- { "GeForce4-420-GO", NV_ARCH_10 },
- { "GeForce4-420-GO-M32", NV_ARCH_10 },
- { "Quadro4-500-XGL", NV_ARCH_10 },
- { "GeForce4-440-GO-M64", NV_ARCH_10 },
- { "Quadro4-200", NV_ARCH_10 },
- { "Quadro4-550-XGL", NV_ARCH_10 },
- { "Quadro4-500-GOGL", NV_ARCH_10 },
- { "GeForce2", NV_ARCH_10 },
+ { "GeForce4-MX-460", NV_ARCH_20 },
+ { "GeForce4-MX-440", NV_ARCH_20 },
+ { "GeForce4-MX-420", NV_ARCH_20 },
+ { "GeForce4-440-GO", NV_ARCH_20 },
+ { "GeForce4-420-GO", NV_ARCH_20 },
+ { "GeForce4-420-GO-M32", NV_ARCH_20 },
+ { "Quadro4-500-XGL", NV_ARCH_20 },
+ { "GeForce4-440-GO-M64", NV_ARCH_20 },
+ { "Quadro4-200", NV_ARCH_20 },
+ { "Quadro4-550-XGL", NV_ARCH_20 },
+ { "Quadro4-500-GOGL", NV_ARCH_20 },
+ { "GeForce2", NV_ARCH_20 },
{ "GeForce3", NV_ARCH_20 },
{ "GeForce3 Ti 200", NV_ARCH_20 },
{ "GeForce3 Ti 500", NV_ARCH_20 },
0xEB /* MISC */
};
-/*
- * Backlight control
- */
-#ifdef CONFIG_PMAC_BACKLIGHT
-
-static int riva_backlight_levels[] = {
- 0x158,
- 0x192,
- 0x1c6,
- 0x200,
- 0x234,
- 0x268,
- 0x2a2,
- 0x2d6,
- 0x310,
- 0x344,
- 0x378,
- 0x3b2,
- 0x3e6,
- 0x41a,
- 0x454,
- 0x534,
-};
-
-static int riva_set_backlight_enable(int on, int level, void *data);
-static int riva_set_backlight_level(int level, void *data);
-static struct backlight_controller riva_backlight_controller = {
- riva_set_backlight_enable,
- riva_set_backlight_level
-};
-#endif /* CONFIG_PMAC_BACKLIGHT */
-
/* ------------------------------------------------------------------------- *
*
* MMIO access macros
{
int i;
- NVTRACE_ENTER();
par->riva.LockUnlock(&par->riva, 0);
par->riva.UnloadStateExt(&par->riva, ®s->ext);
for (i = 0; i < NUM_SEQ_REGS; i++)
regs->seq[i] = SEQin(par, i);
- NVTRACE_LEAVE();
}
/**
RIVA_HW_STATE *state = ®s->ext;
int i;
- NVTRACE_ENTER();
CRTCout(par, 0x11, 0x00);
par->riva.LockUnlock(&par->riva, 0);
for (i = 0; i < NUM_SEQ_REGS; i++)
SEQout(par, i, regs->seq[i]);
- NVTRACE_LEAVE();
}
/**
struct riva_par *par = (struct riva_par *) info->par;
struct riva_regs newmode;
- NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(1, info);
riva_load_state(par, &par->current_state);
par->riva.LockUnlock(&par->riva, 0); /* important for HW cursor */
rivafb_blank(0, info);
- NVTRACE_LEAVE();
}
static void riva_update_var(struct fb_var_screeninfo *var, struct fb_videomode *modedb)
{
- NVTRACE_ENTER();
var->xres = var->xres_virtual = modedb->xres;
var->yres = modedb->yres;
if (var->yres_virtual < var->yres)
var->vsync_len = modedb->vsync_len;
var->sync = modedb->sync;
var->vmode = modedb->vmode;
- NVTRACE_LEAVE();
}
/**
};
int i;
- NVTRACE_ENTER();
/* use highest possible virtual resolution */
if (var->xres_virtual == -1 && var->yres_virtual == -1) {
printk(KERN_WARNING PFX
if (modes[i].xres == -1) {
printk(KERN_ERR PFX
"could not find a virtual resolution that fits into video memory!!\n");
- NVTRACE("EXIT - EINVAL error\n");
+ DPRINTK("EXIT - EINVAL error\n");
return -EINVAL;
}
var->xres_virtual = modes[i].xres;
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...resolution too high to fit into video memory!\n",
var->xres, var->yres, var->bits_per_pixel);
- NVTRACE("EXIT - EINVAL error\n");
+ DPRINTK("EXIT - EINVAL error\n");
return -EINVAL;
}
}
var->yres_virtual = 0x7fff/nom;
if (var->xres_virtual > 0x7fff/nom)
var->xres_virtual = 0x7fff/nom;
- NVTRACE_LEAVE();
+
return 0;
}
return rc;
}
-/* ------------------------------------------------------------------------- *
- *
- * Backlight operations
- *
- * ------------------------------------------------------------------------- */
-
-#ifdef CONFIG_PMAC_BACKLIGHT
-static int riva_set_backlight_enable(int on, int level, void *data)
-{
- struct riva_par *par = (struct riva_par *)data;
- U032 tmp_pcrt, tmp_pmc;
-
- tmp_pmc = par->riva.PMC[0x10F0/4] & 0x0000FFFF;
- tmp_pcrt = par->riva.PCRTC0[0x081C/4] & 0xFFFFFFFC;
- if(on && (level > BACKLIGHT_OFF)) {
- tmp_pcrt |= 0x1;
- tmp_pmc |= (1 << 31); // backlight bit
- tmp_pmc |= riva_backlight_levels[level-1] << 16; // level
- }
- par->riva.PCRTC0[0x081C/4] = tmp_pcrt;
- par->riva.PMC[0x10F0/4] = tmp_pmc;
- return 0;
-}
-
-static int riva_set_backlight_level(int level, void *data)
-{
- return riva_set_backlight_enable(1, level, data);
-}
-#endif /* CONFIG_PMAC_BACKLIGHT */
-
/* ------------------------------------------------------------------------- *
*
* framebuffer operations
struct riva_par *par = (struct riva_par *) info->par;
int cnt = atomic_read(&par->ref_count);
- NVTRACE_ENTER();
if (!cnt) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
riva_save_state(par, &par->initial_state);
}
atomic_inc(&par->ref_count);
- NVTRACE_LEAVE();
return 0;
}
struct riva_par *par = (struct riva_par *) info->par;
int cnt = atomic_read(&par->ref_count);
- NVTRACE_ENTER();
if (!cnt)
return -EINVAL;
if (cnt == 1) {
par->riva.LockUnlock(&par->riva, 1);
}
atomic_dec(&par->ref_count);
- NVTRACE_LEAVE();
return 0;
}
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
- NVTRACE_ENTER();
switch (var->bits_per_pixel) {
case 1 ... 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...color depth not supported.\n",
var->xres, var->yres, var->bits_per_pixel);
- NVTRACE("EXIT, returning -EINVAL\n");
+ DPRINTK("EXIT, returning -EINVAL\n");
return -EINVAL;
}
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
- NVTRACE_LEAVE();
return 0;
}
{
struct riva_par *par = (struct riva_par *) info->par;
- NVTRACE_ENTER();
riva_common_setup(par);
RivaGetConfig(&par->riva, par->Chipset);
/* vgaHWunlock() + riva unlock (0x7F) */
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
- NVTRACE_LEAVE();
return 0;
}
struct riva_par *par = (struct riva_par *)info->par;
unsigned int base;
- NVTRACE_ENTER();
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
- NVTRACE_LEAVE();
return 0;
}
tmp = SEQin(par, 0x01) & ~0x20; /* screen on/off */
vesa = CRTCin(par, 0x1a) & ~0xc0; /* sync on/off */
- NVTRACE_ENTER();
if (blank) {
tmp |= 0x20;
switch (blank - 1) {
}
SEQout(par, 0x01, tmp);
CRTCout(par, 0x1a, vesa);
-
-#ifdef CONFIG_PMAC_BACKLIGHT
- if ( par->FlatPanel && _machine == _MACH_Pmac) {
- set_backlight_enable(!blank);
- }
-#endif
-
- NVTRACE_LEAVE();
return 0;
}
{
unsigned int cmap_len;
- NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN
info->pixmap.scan_align = 4;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->var.yres_virtual = -1;
- NVTRACE_LEAVE();
return (rivafb_check_var(&info->var, info));
}
"DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
int i;
- NVTRACE_ENTER();
dp = pci_device_to_OF_node(pd);
for (; dp != NULL; dp = dp->child) {
disptype = (unsigned char *)get_property(dp, "display-type", NULL);
}
}
}
- NVTRACE_LEAVE();
return 0;
}
#endif /* CONFIG_PPC_OF */
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
- NVTRACE_ENTER();
/* respect mode options */
if (mode_option) {
fb_find_mode(var, info, mode_option,
riva_update_var(var, &modedb);
}
var->accel_flags |= FB_ACCELF_TEXT;
- NVTRACE_LEAVE();
}
static void riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
- struct riva_par *par;
- int i;
-
- NVTRACE_ENTER();
#ifdef CONFIG_PPC_OF
if (!riva_get_EDID_OF(info, pdev))
printk("rivafb: could not retrieve EDID from OF\n");
#else
/* XXX use other methods later */
#ifdef CONFIG_FB_RIVA_I2C
+ struct riva_par *par = (struct riva_par *) info->par;
+ int i;
- par = (struct riva_par *) info->par;
riva_create_i2c_busses(par);
for (i = par->bus; i >= 1; i--) {
riva_probe_i2c_connector(par, i, &par->EDID);
riva_delete_i2c_busses(par);
#endif
#endif
- NVTRACE_LEAVE();
}
struct riva_par *default_par;
struct fb_info *info;
- NVTRACE_ENTER();
assert(pd != NULL);
assert(rci != NULL);
info->fix.id,
info->fix.smem_len / (1024 * 1024),
info->fix.smem_start);
-#ifdef CONFIG_PMAC_BACKLIGHT
- if (default_par->FlatPanel && _machine == _MACH_Pmac)
- register_backlight_controller(&riva_backlight_controller,
- default_par, "mnca");
-#endif
- NVTRACE_LEAVE();
return 0;
err_out_iounmap_fb:
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = (struct riva_par *) info->par;
- NVTRACE_ENTER();
if (!info)
return;
kfree(par);
kfree(info);
pci_set_drvdata(pd, NULL);
- NVTRACE_LEAVE();
}
/* ------------------------------------------------------------------------- *
{
char *this_opt;
- NVTRACE_ENTER();
if (!options || !*options)
return 0;
} else
mode_option = this_opt;
}
- NVTRACE_LEAVE();
return 0;
}
#endif /* !MODULE */
case VESA_NO_BLANKING:
if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
- fb_set_cmap(&fbi->fb.cmap, info);
+ fb_set_cmap(&fbi->fb.cmap, 1, info);
sa1100fb_schedule_work(fbi, C_ENABLE);
}
return 0;
#error Where is GPIO24 set as an output? Can we fit this in somewhere else?
if (machine_is_graphicsclient()) {
// From ADS doc again...same as disable
- msleep(20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(20 * HZ / 1000);
GPSR |= GPIO_GPIO24;
}
#endif
* We'll wait 20msec.
*/
GPCR |= GPIO_GPIO24;
- msleep(20);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(20 * HZ / 1000);
}
#endif
#ifdef CONFIG_SA1100_HUW_WEBPANEL
{
switch(cmd) {
case FBIOGTYPE: {
- struct fbtype __user *f = (struct fbtype __user *) arg;
+ struct fbtype *f = (struct fbtype *) arg;
if (put_user(type, &f->fb_type) ||
__put_user(info->var.yres, &f->fb_height) ||
return 0;
}
case FBIOPUTCMAP_SPARC: {
- struct fbcmap __user *c = (struct fbcmap __user *) arg;
+ struct fbcmap *c = (struct fbcmap *) arg;
struct fb_cmap cmap;
u16 red, green, blue;
- unsigned char __user *ured;
- unsigned char __user *ugreen;
- unsigned char __user *ublue;
+ unsigned char *ured, *ugreen, *ublue;
int index, count, i;
if (get_user(index, &c->index) ||
return -EFAULT;
cmap.start = index + i;
- err = fb_set_cmap(&cmap, info);
+ err = fb_set_cmap(&cmap, 0, info);
if (err)
return err;
}
return 0;
}
case FBIOGETCMAP_SPARC: {
- struct fbcmap __user *c = (struct fbcmap __user *) arg;
- unsigned char __user *ured;
- unsigned char __user *ugreen;
- unsigned char __user *ublue;
+ struct fbcmap *c = (struct fbcmap *) arg;
+ unsigned char *ured, *ugreen, *ublue;
struct fb_cmap *cmap = &info->cmap;
int index, count, i;
if(con != ivideo->currcon) return;
if(fb_display[con].cmap.len) {
- fb_set_cmap(&fb_display[con].cmap, sisfb_setcolreg, info);
+ fb_set_cmap(&fb_display[con].cmap, 1, sisfb_setcolreg, info);
} else {
int size = sisfb_get_cmap_len(&fb_display[con].var);
- fb_set_cmap(fb_default_cmap(size), sisfb_setcolreg, info);
+ fb_set_cmap(fb_default_cmap(size), 1, sisfb_setcolreg, info);
}
}
info->cursor.image.fg_color = cursor->image.fg_color;
} else {
if (cursor->image.cmap.len)
- fb_copy_cmap(&cursor->image.cmap, &info->cursor.image.cmap);
+ fb_copy_cmap(&cursor->image.cmap, &info->cursor.image.cmap, 0);
}
info->cursor.image.depth = cursor->image.depth;
}
bg_color = ((cmap.red[cmap.start+1] << 16) |
(cmap.green[cmap.start+1] << 8) |
(cmap.blue[cmap.start+1]));
- fb_copy_cmap(&cmap, &info->cursor.image.cmap);
+ fb_copy_cmap(&cmap, &info->cursor.image.cmap, 0);
spin_lock_irqsave(&par->DAClock, flags);
banshee_make_room(par, 2);
tdfx_outl(par, HWCURC0, bg_color);
static int valkyriefb_blank(int blank_mode, struct fb_info *info);
static int read_valkyrie_sense(struct fb_info_valkyrie *p);
+static inline int valkyrie_vram_reqd(int video_mode, int color_mode);
static void set_valkyrie_clock(unsigned char *params);
+static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var);
static int valkyrie_var_to_par(struct fb_var_screeninfo *var,
struct fb_par_valkyrie *par, const struct fb_info *fb_info);
return 0;
}
-static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par,
- struct fb_var_screeninfo *var)
-{
- return mac_vmode_to_var(par->vmode, par->cmode, var);
-}
-
static int
valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
return 0;
}
-static inline int valkyrie_vram_reqd(int video_mode, int color_mode)
+static int valkyrie_vram_reqd(int video_mode, int color_mode)
{
int pitch;
struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1];
return 0;
}
+static int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var)
+{
+ return mac_vmode_to_var(par->vmode, par->cmode, var);
+}
+
static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p)
{
memset(fix, 0, sizeof(*fix));
config W1_MATROX
tristate "Matrox G400 transport layer for 1-wire"
- depends on W1 && PCI
+ depends on W1
help
Say Y here if you want to communicate with your 1-wire devices
using Matrox's G400 GPIO pins.
#include <asm/atomic.h>
#include <asm/types.h>
#include <asm/io.h>
+#include <asm/delay.h>
-#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
*/
#include <asm/atomic.h>
+#include <asm/delay.h>
-#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
dev = kmalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
printk(KERN_ERR
- "Failed to allocate %zd bytes for new w1 device.\n",
+ "Failed to allocate %d bytes for new w1 device.\n",
sizeof(struct w1_master));
return NULL;
}
*/
#include <asm/io.h>
+#include <asm/delay.h>
-#include <linux/delay.h>
#include <linux/moduleparam.h>
#include "w1.h"
messages at debug level 1 while the misbehaviour was occurring.
config JFFS2_FS_NAND
- bool "JFFS2 support for NAND flash"
- depends on JFFS2_FS
+ bool "JFFS2 support for NAND flash (EXPERIMENTAL)"
+ depends on JFFS2_FS && EXPERIMENTAL
default n
help
- This enables the support for NAND flash in JFFS2. NAND is a newer
- type of flash chip design than the traditional NOR flash, with
- higher density but a handful of characteristics which make it more
- interesting for the file system to use.
+ This enables the experimental support for NAND flash in JFFS2. NAND
+ is a newer type of flash chip design than the traditional NOR flash,
+ with higher density but a handful of characteristics which make it
+ more interesting for the file system to use. Support for NAND flash
+ is not yet complete and may corrupt data. For further information,
+ including a link to the mailing list where details of the remaining
+ work to be completed for NAND flash support can be found, see the
+ JFFS2 web site at <http://sources.redhat.com/jffs2>.
- Say 'N' unless you have NAND flash.
+ Say 'N' unless you have NAND flash and you are willing to test and
+ develop JFFS2 support for it.
config JFFS2_COMPRESSION_OPTIONS
bool "Advanced compression options for JFFS2"
- depends on JFFS2_FS
default n
help
Enabling this option allows you to explicitly choose which
endchoice
+config JFFS2_PROC
+ bool "JFFS2 proc interface support" if JFFS2_COMPRESSION_OPTIONS
+ depends on JFFS2_FS && PROC_FS
+ default n
+ help
+ You can read some statistics and set the compression mode and
+ compressor priorities with this interface.
+
+
config CRAMFS
tristate "Compressed ROM file system support"
select ZLIB_INFLATE
Enabling this option will cause statistics for each server share
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
-config CIFS_XATTR
- bool "CIFS extended attributes (EXPERIMENTAL)"
- depends on CIFS
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details). CIFS maps the name of
- extended attributes beginning with the user namespace prefix
- to SMB/CIFS EAs. EAs are stored on Windows servers without the
- user namespace prefix, but their names are seen by Linux cifs clients
- prefaced by the user namespace prefix. The system namespace
- (used by some filesystems to store ACLs) is not supported at
- this time.
-
- If unsure, say N.
-
config CIFS_POSIX
bool "CIFS POSIX Extensions (EXPERIMENTAL)"
depends on CIFS
#include <linux/fcntl.h>
#include <linux/quotaops.h>
#include <linux/security.h>
-#include <linux/vs_base.h>
-#include <linux/proc_fs.h>
-#include <linux/devpts_fs.h>
/* Taken over from the old code... */
if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
goto error;
}
-
- /* Check for evil vserver activity */
- if (vx_check(0, VX_ADMIN))
- goto fine;
-
- if (IS_BARRIER(inode)) {
- printk(KERN_WARNING
- "VSW: xid=%d messing with the barrier.\n",
- vx_current_xid());
- goto error;
- }
- switch (inode->i_sb->s_magic) {
- case PROC_SUPER_MAGIC:
- printk(KERN_WARNING
- "VSW: xid=%d messing with the procfs.\n",
- vx_current_xid());
- goto error;
- case DEVPTS_SUPER_MAGIC:
- if (vx_check(inode->i_xid, VX_IDENT))
- goto fine;
- printk(KERN_WARNING
- "VSW: xid=%d messing with the devpts.\n",
- vx_current_xid());
- goto error;
- }
fine:
retval = 0;
error:
(current->mm->start_data = N_DATADDR(ex));
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
- current->mm->free_area_cache = current->mm->mmap_base;
-
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ /* unlimited stack is larger than TASK_SIZE */
+ current->mm->non_executable_cache = current->mm->mmap_top;
// current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
current->mm->mmap = NULL;
return error;
}
- error = bprm->file->f_op->read(bprm->file,
- (char __user *)text_addr,
+ error = bprm->file->f_op->read(bprm->file, (char *)text_addr,
ex.a_text+ex.a_data, &pos);
if ((signed long)error < 0) {
send_sig(SIGKILL, current, 0);
if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
loff_t pos = fd_offset;
do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- bprm->file->f_op->read(bprm->file,
- (char __user *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
flush_icache_range((unsigned long) N_TXTADDR(ex),
(unsigned long) N_TXTADDR(ex) +
do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- file->f_op->read(file, (char __user *)start_addr,
+ file->f_op->read(file, (char *)start_addr,
ex.a_text + ex.a_data, &pos);
flush_icache_range((unsigned long) start_addr,
(unsigned long) start_addr + ex.a_text + ex.a_data);
#include <linux/pagemap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
-#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/param.h>
NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
if (k_platform) {
- NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
+ NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
if (interp_aout) {
argv = sp + 2;
envp = argv + argc + 1;
- __put_user((elf_addr_t)(unsigned long)argv, sp++);
- __put_user((elf_addr_t)(unsigned long)envp, sp++);
+ __put_user((elf_addr_t)(long)argv, sp++);
+ __put_user((elf_addr_t)(long)envp, sp++);
} else {
argv = sp;
envp = argv + argc + 1;
struct exec interp_ex;
char passed_fileno[6];
struct files_struct *files;
- int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
+ int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
unsigned long def_flags = 0;
/* Get the exec-header */
executable_stack = EXSTACK_DISABLE_X;
break;
}
- have_pt_gnu_stack = (i < elf_ex.e_phnum);
+ if (i == elf_ex.e_phnum)
+ def_flags |= VM_EXEC | VM_MAYEXEC;
relocexec = 0;
current->mm->end_data = 0;
current->mm->end_code = 0;
current->mm->mmap = NULL;
+#ifdef __HAVE_ARCH_MMAP_TOP
+ current->mm->mmap_top = mmap_top();
+#endif
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
SET_PERSONALITY(elf_ex, ibcs2_interpreter);
- if (elf_read_implies_exec(elf_ex, have_pt_gnu_stack))
- current->personality |= READ_IMPLIES_EXEC;
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
// current->mm->rss = 0;
vx_rsspages_sub(current->mm, current->mm->rss);
- current->mm->free_area_cache = current->mm->mmap_base;
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ current->mm->non_executable_cache = current->mm->mmap_top;
retval = setup_arg_pages(bprm, executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
#include <linux/personality.h>
#include <linux/init.h>
#include <linux/flat.h>
-#include <linux/vs_memory.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <linux/shm.h>
#include <linux/personality.h>
#include <linux/init.h>
-#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
len, offset);
}
-/**
- * bio_uncopy_user - finish previously mapped bio
- * @bio: bio being terminated
- *
- * Free pages allocated from bio_copy_user() and write back data
- * to user space in case of a read.
- */
-int bio_uncopy_user(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i, ret = 0;
-
- if (bio_data_dir(bio) == READ) {
- char *uaddr = bio->bi_private;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- char *addr = page_address(bvec->bv_page);
-
- if (!ret && copy_to_user(uaddr, addr, bvec->bv_len))
- ret = -EFAULT;
-
- __free_page(bvec->bv_page);
- uaddr += bvec->bv_len;
- }
- }
-
- bio_put(bio);
- return ret;
-}
-
-/**
- * bio_copy_user - copy user data to bio
- * @q: destination block queue
- * @uaddr: start of user address
- * @len: length in bytes
- * @write_to_vm: bool indicating writing to pages or not
- *
- * Prepares and returns a bio for indirect user io, bouncing data
- * to/from kernel pages as necessary. Must be paired with
- * call bio_uncopy_user() on io completion.
- */
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
- unsigned int len, int write_to_vm)
-{
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- struct bio_vec *bvec;
- struct page *page;
- struct bio *bio;
- int i, ret;
-
- bio = bio_alloc(GFP_KERNEL, end - start);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- ret = 0;
- while (len) {
- unsigned int bytes = PAGE_SIZE;
-
- if (bytes > len)
- bytes = len;
-
- page = alloc_page(q->bounce_gfp | GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- break;
- }
-
- if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
- ret = -EINVAL;
- break;
- }
-
- len -= bytes;
- }
-
- /*
- * success
- */
- if (!ret) {
- if (!write_to_vm) {
- bio->bi_rw |= (1 << BIO_RW);
- /*
- * for a write, copy in data to kernel pages
- */
- ret = -EFAULT;
- bio_for_each_segment(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
-
- if (copy_from_user(addr, (char *) uaddr, bvec->bv_len))
- goto cleanup;
- }
- }
-
- bio->bi_private = (void *) uaddr;
- return bio;
- }
-
- /*
- * cleanup
- */
-cleanup:
- bio_for_each_segment(bvec, bio, i)
- __free_page(bvec->bv_page);
-
- bio_put(bio);
- return ERR_PTR(ret);
-}
-
static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len,
int write_to_vm)
* size for now, in the future we can relax this restriction
*/
if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
- return ERR_PTR(-EINVAL);
+ return NULL;
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
- return ERR_PTR(-ENOMEM);
+ return NULL;
- ret = -ENOMEM;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto out;
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
- bio->bi_flags |= (1 << BIO_USER_MAPPED);
+ blk_queue_bounce(q, &bio);
return bio;
out:
kfree(pages);
bio_put(bio);
- return ERR_PTR(ret);
+ return NULL;
}
/**
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
+ * device.
*/
struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
- if (IS_ERR(bio))
- return bio;
-
- /*
- * subtle -- if __bio_map_user() ended up bouncing a bio,
- * it would normally disappear when its bi_end_io is run.
- * however, we need it for the unmap, so grab an extra
- * reference to it
- */
- bio_get(bio);
+ if (bio) {
+ /*
+ * subtle -- if __bio_map_user() ended up bouncing a bio,
+ * it would normally disappear when its bi_end_io is run.
+ * however, we need it for the unmap, so grab an extra
+ * reference to it
+ */
+ bio_get(bio);
- if (bio->bi_size == len)
- return bio;
+ if (bio->bi_size < len) {
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio, 0);
+ return NULL;
+ }
+ }
- /*
- * don't support partial mappings
- */
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio);
- return ERR_PTR(-EINVAL);
+ return bio;
}
-static void __bio_unmap_user(struct bio *bio)
+static void __bio_unmap_user(struct bio *bio, int write_to_vm)
{
struct bio_vec *bvec;
int i;
* make sure we dirty pages we wrote to
*/
__bio_for_each_segment(bvec, bio, i, 0) {
- if (bio_data_dir(bio) == READ)
+ if (write_to_vm)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
+ * @write_to_vm: bool indicating whether pages were written to
*
- * Unmap a bio previously mapped by bio_map_user(). Must be called with
+ * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
+ * must be the same as passed into bio_map_user(). Must be called with
* a process context.
*
* bio_unmap_user() may sleep.
*/
-void bio_unmap_user(struct bio *bio)
+void bio_unmap_user(struct bio *bio, int write_to_vm)
{
- __bio_unmap_user(bio);
+ __bio_unmap_user(bio, write_to_vm);
bio_put(bio);
}
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);
-EXPORT_SYMBOL(bio_copy_user);
-EXPORT_SYMBOL(bio_uncopy_user);
-Version 1.22
-------------
-Add config option to enable XATTR (extended attribute) support, mapping
-xattr names in the "user." namespace space to SMB/CIFS EAs.
-
-Version 1.21
-------------
-Add new mount parm to control whether mode check (vfs_permission) is done on
-the client. If Unix extensions are enabled and the uids on the client
-and server do not match, client permission checks are meaningless on
-server uids that do not exist on the client (this does not affect the
-normal ACL check which occurs on the server). Fix default uid
-on mknod to match create and mkdir. Add optional mount parm to allow
-override of the default uid behavior (in which the server sets the uid
-and gid of newly created files). Normally for network filesystem mounts
-user want the server to set the uid/gid on newly created files (rather than
-using uid of the client processes you would in a local filesystem).
-
Version 1.20
------------
Make transaction counts more consistent. Merge /proc/fs/cifs/SimultaneousOps
5) make dep
6) make modules (or "make" if CIFS VFS not to be built as a module)
-For Linux 2.6:
+For Linux 2.5:
1) Download the kernel (e.g. from http://www.kernel.org or from bitkeeper
at bk://linux.bkbits.net/linux-2.5) and change directory into the top
of the kernel directory tree (e.g. /usr/src/linux-2.5.73)
similar files reside (usually /sbin). Although the helper software is not
required, mount.cifs is recommended. Eventually the Samba 3.0 utility program
"net" may also be helpful since it may someday provide easier mount syntax for
-users who are used to Windows e.g. net use <mount point> <UNC name or cifs URL>
+users who are used to Windows e.g. net use <mount point> <UNC name or cifs URL>
Note that running the Winbind pam/nss module (logon service) on all of your
Linux clients is useful in mapping Uids and Gids consistently across the
domain to the proper network user. The mount.cifs mount helper can be
gcc samba/source/client/mount.cifs.c -o mount.cifs
-Allowing User Mounts
-====================
-To permit users to mount and unmount over directories they own is possible
-with the cifs vfs. A way to enable such mounting is to mark the mount.cifs
-utility as suid (e.g. "chmod +s /sbin/mount/cifs). To enable users to
-umount shares they mount requires
-1) mount.cifs version 1.4 or later
-2) an entry for the share in /etc/fstab indicating that a user may
-unmount it e.g.
-//server/usersharename /mnt/username cifs user 0 0
-
Note that when the mount.cifs utility is run suid (allowing user mounts),
in order to reduce risks, the "nosuid" mount flag is passed in on mount to
disallow execution of an suid program mounted on the remote target.
delete readonly = yes
ea support = yes
-Note that server ea support is required for supporting xattrs from the Linux
-cifs client, and that EA support is present in later versions of Samba (e.g.
-3.0.6 and later (also EA support works in all versions of Windows, at least to
-shares on NTFS filesystems). Extended Attribute (xattr) support is an optional
-feature of most Linux filesystems which may require enabling via
-make menuconfig
-
-Some administrators may want to change Samba's smb.conf "map archive" and
-"create mask" parameters from the default. Creating special devices (mknod)
+Note that ea support is required for supporting Linux xattrs.
+Some administrators also change the "map archive" and the "create mask"
+parameters from their default values. Creating special devices (mknod)
remotely may require specifying a mkdev function to Samba if you are not using
-Samba 3.0.6 or later. For more information on these see the manual pages
+Samba 3.0.5 or later. For more information on these see the manual pages
("man smb.conf") on the Samba server system. Note that the cifs vfs,
unlike the smbfs vfs, does not read the smb.conf on the client system
(the few optional settings are passed in on mount via -o parameters instead).
Note that Samba 2.2.7 or later includes a fix that allows the CIFS VFS to delete
open files (required for strict POSIX compliance). Windows Servers already
supported this feature. Samba server does not allow symlinks that refer to files
-outside of the share, so in Samba versions prior to 3.0.6, most symlinks to
+outside of the share, so in Samba versions prior to 3.0.5, most symlinks to
files with absolute paths (ie beginning with slash) such as:
ln -s /mnt/foo bar
-would be forbidden. Samba 3.0.6 server or later includes the ability to create
+would be forbidden. Samba 3.0.5 server or later includes the ability to create
such symlinks safely by converting unsafe symlinks (ie symlinks to server
files that are outside of the share) to a samba specific format on the server
that is ignored by local server applications and non-cifs clients and that will
running an altered binary on your local system (downloaded from a hostile server
or altered by a hostile router).
-Although mounting using format corresponding to the CIFS URL specification is
-not possible in mount.cifs yet, it is possible to use an alternate format
-for the server and sharename (which is somewhat similar to NFS style mount
-syntax) instead of the more widely used UNC format (i.e. \\server\share):
- mount -t cifs tcp_name_of_server:share_name /mnt -o user=myname,pass=mypasswd
-
When using the mount helper mount.cifs, passwords may be specified via alternate
mechanisms, instead of specifying it after -o using the normal "pass=" syntax
on the command line:
mount helper will not prompt the user for a password
if guest is specified on the mount options. If no
password is specified a null password will be used.
- perm Client does permission checks (vfs_permission check of uid
- and gid of the file against the mode and desired operation),
- Note that this is in addition to the normal ACL check on the
- target machine done by the server software.
- Client permission checking is enabled by default.
- noperm Client does not do permission checks. This can expose
- files on this mount to access by other users on the local
- client system. It is typically only needed when the server
- supports the CIFS Unix Extensions but the UIDs/GIDs on the
- client and server system do not match closely enough to allow
- access by the user doing the mount.
- Note that this does not affect the normal ACL check on the
- target machine done by the server software (of the server
- ACL against the user name provided at mount time).
- setuids If the CIFS Unix extensions are negotiated with the server
- the client will attempt to set the effective uid and gid of
- the local process on newly created files, directories, and
- devices (create, mkdir, mknod).
- nosetuids The client will not attempt to set the uid and gid on
- on newly created files, directories, and devices (create,
- mkdir, mknod) which will result in the server setting the
- uid and gid to the default (usually the server uid of the
- usern who mounted the share). Letting the server (rather than
- the client) set the uid and gid is the default. This
- parameter has no effect if the CIFS Unix Extensions are not
- negotiated.
-
+
The mount.cifs mount helper also accepts a few mount options before -o
including:
echo 1 > /proc/fs/cifs/traceSMB
-Two other experimental features are under development and to test
+Three other experimental features are under development and to test
require enabling an ifdef (e.g. by adding "#define CIFS_FCNTL" in cifsglob.h)
CONFIG_CIFS_QUOTA
+ CONFIG_CIFS_XATTR
+
CONFIG_CIFS_FCNTL (fcntl needed for support of directory change
notification and perhaps later for file leases)
-version 1.22 July 30, 2004
+version 1.16 May 27, 2004
A Partial List of Missing Features
==================================
a) Support for SecurityDescriptors for chmod/chgrp/chown so
these can be supported for Windows servers
-b) Better pam/winbind integration (e.g. to handle uid mapping
-better)
+b) Better pam/winbind integration
c) multi-user mounts - multiplexed sessionsetups over single vc
(ie tcp session) - prettying up needed
h) quota support
-j) finish writepages support (multi-page write behind for improved
+i) support for the Linux 2.5 kernel new feature get_xattr and set_xattr
+which will allow us to expose dos attributes as well as real
+ACLs. This support has been started in the current code, but is
+ifdeffed out.
+
+k) finish writepages support (multi-page write behind for improved
performance) and syncpage
-k) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
+l) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
extra copy in/out of the socket buffers in some cases.
-l) finish support for IPv6. This is mostly complete but
+m) finish support for IPv6. This is mostly complete but
needs a simple inet_pton like function to convert ipv6
addresses in string representation.
-m) Better optimize open (and pathbased setfilesize) to reduce the
+o) Better optimize open (and pathbased setfilesize) to reduce the
oplock breaks coming from windows srv. Piggyback identical file
opens on top of each other by incrementing reference count rather
than resending (helps reduce server resource utilization and avoid
spurious oplock breaks).
-o) Improve performance of readpages by sending more than one read
+p) Improve performance of readpages by sending more than one read
at a time when 8 pages or more are requested. Evaluate whether
reads larger than 16K would be helpful.
-p) For support of Windows9x/98 we need to retry failed mounts
+q) For support of Windows9x/98 we need to retry failed mounts
to *SMBSERVER (default server name) with the uppercase hostname
in the RFC1001 session_init request.
-q) Add support for storing symlink and fifo info to Windows servers
-in the Extended Attribute format their SFU clients would recognize.
+r) Add Extended Attributed support (for storing UID/GID info
+to Windows servers)
-r) Finish fcntl D_NOTIFY support so kde and gnome file list windows
+s) Finish fcntl D_NOTIFY support so kde and gnome file list windows
will autorefresh
-s) Add GUI tool to configure /proc/fs/cifs settings and for display of
+t) Add GUI tool to configure /proc/fs/cifs settings and for display of
the CIFS statistics
KNOWN BUGS (updated May 27, 2004)
differences but worth investigating). Also debug Samba to
see why lock test case 7 takes longer to complete to Samba
than to Windows.
-5) implement search rewind (seeking backward in a readdir), which is
-necessary for one of the "special" subsection of posix file API
-tests in the Connectathon nfs test suite.
Misc testing to do
==================
1) check out max path names and max path name components against various server
-types. Try nested symlinks (8 deep). Return max path name in stat -f information
+types. Try nested symlinks. Return max path name in stat -f information
2) Modify file portion of ltp so it can run against a mounted network
share and run it against cifs vfs.
negotiated size) and send larger write sizes to modern servers.
4) More exhaustively test the recently added NT4 support against various
-NT4 service pack levels, and fix cifs_setattr for setting file times and
-size to fall back to level 1 when error invalid level returned.
+NT4 service pack levels.
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
-#define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */
-#define CIFS_MOUNT_SET_UID 2 /* set current->euid in create etc. */
-
struct cifs_sb_info {
struct cifsTconInfo *tcon; /* primary mount */
struct list_head nested_tcon_q;
gid_t mnt_gid;
mode_t mnt_file_mode;
mode_t mnt_dir_mode;
- int mnt_cifs_flags;
};
#endif /* _CIFS_FS_SB_H */
#include "cifs_fs_sb.h"
#include <linux/mm.h>
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+/* BB when mempool_resize is added back in, we will resize pool on new mount */
+#define CIFS_MIN_RCV_POOL 11 /* enough for progress to five servers */
#ifdef CONFIG_CIFS_QUOTA
static struct quotactl_ops cifs_quotactl_ops;
static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
{
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb;
- cifs_sb = CIFS_SB(inode->i_sb);
+ cifs_sb = CIFS_SB(inode->i_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
+ if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
+ /* the server supports the Unix-like mode bits and does its
+ own permission checks, and therefore we do not allow the file
+ mode to be overriden on these mounts - so do not do perm
+ check on client side */
return 0;
} else /* file mode might have been restricted at mount time
on the client (above and beyond ACL on servers) for
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.sendfile = generic_file_sendfile,
- .dir_notify = cifs_dir_notify,
+#ifdef CONFIG_CIFS_FCNTL
+ .fcntl = cifs_fcntl,
+#endif
};
struct file_operations cifs_dir_ops = {
.readdir = cifs_readdir,
.release = cifs_closedir,
.read = generic_read_dir,
- .dir_notify = cifs_dir_notify,
+#ifdef CONFIG_CIFS_FCNTL
+ .fcntl = cifs_fcntl,
+#endif
};
static void
*/
atomic_set(&sesInfoAllocCount, 0);
atomic_set(&tconInfoAllocCount, 0);
- atomic_set(&tcpSesAllocCount,0);
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
#define TRUE 1
#endif
+extern int map_cifs_error(int error_class, int error_code,
+ int status_codes_negotiated);
+
extern struct address_space_operations cifs_addr_ops;
/* Functions related to super block operations */
extern struct super_operations cifs_super_ops;
+extern void cifs_put_inode(struct inode *);
extern void cifs_read_inode(struct inode *);
extern void cifs_delete_inode(struct inode *);
/* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
extern struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
-extern int cifs_dir_notify(struct file *, unsigned long arg);
+extern long cifs_fcntl(int, unsigned int, unsigned long, struct file *);
/* Functions related to dir entries */
extern struct dentry_operations cifs_dentry_ops;
termination then *2 for unicode versions */
#define MAX_PASSWORD_SIZE 16
-#define CIFS_MIN_RCV_POOL 4
-
/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurently. It also matches the most common
*/
GLOBAL_EXTERN atomic_t sesInfoAllocCount;
GLOBAL_EXTERN atomic_t tconInfoAllocCount;
-GLOBAL_EXTERN atomic_t tcpSesAllocCount;
+
GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
/* PathInfo/FileInfo infolevels */
#define SMB_INFO_STANDARD 1
-#define SMB_SET_FILE_EA 2
-#define SMB_QUERY_FILE_EA_SIZE 2
#define SMB_INFO_QUERY_EAS_FROM_LIST 3
#define SMB_INFO_QUERY_ALL_EAS 4
#define SMB_INFO_IS_NAME_VALID 6
char LinkDest[1];
} FILE_UNIX_LINK_INFO; /* level 513 QPathInfo */
-typedef struct {
- __u16 CreationDate;
- __u16 CreationTime;
- __u16 LastAccessDate;
- __u16 LastAccessTime;
- __u16 LastWriteDate;
- __u16 LastWriteTime;
- __u32 DataSize; /* File Size (EOF) */
- __u32 AllocationSize;
- __u16 Attributes; /* verify not u32 */
- __u32 EASize;
-} FILE_INFO_STANDARD; /* level 1 SetPath/FileInfo */
-
/* defines for enumerating possible values of the Unix type field below */
#define UNIX_FILE 0
#define UNIX_DIR 1
} FILE_DIRECTORY_INFO; /* level 257 FF response data area */
struct gea {
- unsigned char name_len;
- char name[1];
+ unsigned char cbName;
+ char szName[1];
};
struct gealist {
- unsigned long list_len;
+ unsigned long cbList;
struct gea list[1];
};
unsigned char EA_flags;
__u8 name_len;
__u16 value_len;
- char name[1];
+ char szName[1];
/* optionally followed by value */
};
/* flags for _FEA.fEA */
const struct nls_table *nls_codepage);
extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, const FILE_BASIC_INFO * data,
+ char *fileName, FILE_BASIC_INFO * data,
const struct nls_table *nls_codepage);
extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, __u64 size,int setAllocationSizeFlag,
+ char *fileName, __u64 size,int setAllocationSizeFlag,
const struct nls_table *nls_codepage);
extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
__u64 size, __u16 fileHandle,__u32 opener_pid, int AllocSizeFlag);
extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
extern void CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *);
extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * );
+
+extern int CIFSBuildServerList(int xid, char *serverBufferList,
+ int recordlength, int *entries,
+ int *totalEntries, int *topoChangedFlag);
+extern int CIFSSMBQueryShares(int xid, struct cifsTconInfo *tcon,
+ struct shareInfo *shareList, int bufferLen,
+ int *entries, int *totalEntries);
+extern int CIFSSMBQueryAlias(int xid, struct cifsTconInfo *tcon,
+ struct aliasInfo *aliasList, int bufferLen,
+ int *entries, int *totalEntries);
+extern int CIFSSMBAliasInfo(int xid, struct cifsTconInfo *tcon,
+ char *aliasName, char *serverName,
+ char *shareName, char *comment);
+extern int CIFSSMBGetShareInfo(int xid, struct cifsTconInfo *tcon,
+ char *share, char *comment);
+extern int CIFSSMBGetUserPerms(int xid, struct cifsTconInfo *tcon,
+ char *userName, char *searchName, int *perms);
+extern int CIFSSMBSync(int xid, struct cifsTconInfo *tcon, int netfid, int pid);
+
+extern int CIFSSMBSeek(int xid,
+ struct cifsTconInfo *tcon,
+ int netfid,
+ int pid,
+ int whence, unsigned long offset, long long *newoffset);
+
extern int CIFSSMBCopy(int xid,
struct cifsTconInfo *source_tcon,
const char *fromName,
extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
const int notify_subdirs,const __u16 netfid,__u32 filter,
const struct nls_table *nls_codepage);
-extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, char * EAData,
- size_t bufsize, const struct nls_table *nls_codepage);
-extern ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
- const unsigned char * searchName,const unsigned char * ea_name,
- unsigned char * ea_value, size_t buf_size,
- const struct nls_table *nls_codepage);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, const char * ea_name,
- const void * ea_value, const __u16 ea_value_len,
- const struct nls_table *nls_codepage);
+extern int CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+ const unsigned char *searchName,
+ char * EAData, size_t size,
+ const struct nls_table *nls_codepage);
#endif /* _CIFSPROTO_H */
}
int
-CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSAttributeInfo(int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
}
int
-CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSDeviceInfo(int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
}
int
-CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSUnixInfo(int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
in Samba which this routine can run into */
int
-CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEOF(int xid, struct cifsTconInfo *tcon, char *fileName,
__u64 size, int SetAllocation, const struct nls_table *nls_codepage)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
}
int
-CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- const FILE_BASIC_INFO * data,
- const struct nls_table *nls_codepage)
+CIFSSMBSetTimes(int xid, struct cifsTconInfo *tcon, char *fileName,
+ FILE_BASIC_INFO * data, const struct nls_table *nls_codepage)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
return rc;
}
-
-int
-CIFSSMBSetTimesLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
- FILE_INFO_STANDARD * data, const struct nls_table *nls_codepage)
-{
- TRANSACTION2_SPI_REQ *pSMB = NULL;
- TRANSACTION2_SPI_RSP *pSMBr = NULL;
- int name_len;
- int rc = 0;
- int bytes_returned = 0;
- char *data_offset;
-
- cFYI(1, ("In SetTimesLegacy"));
-
-SetTimesRetryLegacy:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, 530
- /* find define for this maxpathcomponent */
- , nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, 530);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
- }
-/* BB fixme - we have to map to FILE_STANDARD_INFO (level 1 info
- in parent function, from the better and ususal FILE_BASIC_INFO */
- pSMB->ParameterCount = 6 + name_len;
- pSMB->DataCount = sizeof (FILE_INFO_STANDARD);
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
- pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
- data_offset = (char *) (&pSMB->hdr.Protocol) + pSMB->DataOffset;
- pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
- pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
- pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount + pSMB->DataCount;
-
- pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
- pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
- pSMB->TotalDataCount = pSMB->DataCount;
- pSMB->TotalParameterCount = pSMB->ParameterCount;
- /* I doubt that passthrough levels apply to this old
- preNT info level */
-/* if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
- pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
- else*/
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += pSMB->ByteCount;
- memcpy(data_offset, data, sizeof (FILE_INFO_STANDARD));
- pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("SetPathInfo (times legacy) returned %d", rc));
- }
-
- if (pSMB)
- cifs_buf_release(pSMB);
-
- if (rc == -EAGAIN)
- goto SetTimesRetryLegacy;
-
- return rc;
-}
-
int
CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
char *fileName, __u64 mode, __u64 uid, __u64 gid,
return rc;
}
#ifdef CONFIG_CIFS_XATTR
-ssize_t
+int
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- char * EAData, size_t buf_size,
+ char * EAData, size_t size,
const struct nls_table *nls_codepage)
{
/* BB assumes one setup word */
int rc = 0;
int bytes_returned;
int name_len;
- struct fea * temp_fea;
- char * temp_ptr;
cFYI(1, ("In Query All EAs path %s", searchName));
QAllEAsRetry:
, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, 530);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) +
pSMBr->DataOffset);
- ea_response_data->list_len =
- cpu_to_le32(ea_response_data->list_len);
cFYI(1,("ea length %d",ea_response_data->list_len));
- name_len = ea_response_data->list_len;
- if(name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1,("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- while(name_len > 0) {
- name_len -= 4;
- temp_ptr += 4;
- rc += temp_fea->name_len;
- /* account for prefix user. and trailing null */
- rc = rc + 5 + 1;
- if(rc<buf_size) {
- memcpy(EAData,"user.",5);
- EAData+=5;
- memcpy(EAData,temp_ptr,temp_fea->name_len);
- EAData+=temp_fea->name_len;
- /* null terminate name */
- *EAData = 0;
- EAData = EAData + 1;
- } else if(buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- temp_fea->value_len = cpu_to_le16(temp_fea->value_len);
- name_len -= temp_fea->value_len;
- temp_ptr += temp_fea->value_len;
- /* BB check that temp_ptr is still within smb BB*/
- /* no trailing null to account for in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
}
}
if (pSMB)
return rc;
}
-
-ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
- const unsigned char * searchName,const unsigned char * ea_name,
- unsigned char * ea_value, size_t buf_size,
- const struct nls_table *nls_codepage)
-{
- TRANSACTION2_QPI_REQ *pSMB = NULL;
- TRANSACTION2_QPI_RSP *pSMBr = NULL;
- int rc = 0;
- int bytes_returned;
- int name_len;
- struct fea * temp_fea;
- char * temp_ptr;
-
- cFYI(1, ("In Query EA path %s", searchName));
-QEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, 530
- /* find define for this maxpathcomponent */
- , nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, 530);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
- }
-
- pSMB->TotalParameterCount = 2 /* level */ + 4 /* reserved */ +
- name_len /* includes null */ ;
- pSMB->TotalDataCount = 0;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
- pSMB->ByteCount = pSMB->TotalParameterCount + 1 /* pad */ ;
- pSMB->TotalParameterCount = cpu_to_le16(pSMB->TotalParameterCount);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += pSMB->ByteCount;
- pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
-
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("Send error in Query EA = %d", rc));
- } else { /* decode response */
- pSMBr->DataOffset = le16_to_cpu(pSMBr->DataOffset);
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if ((pSMBr->ByteCount < 4) || (pSMBr->DataOffset > 512))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- pSMBr->DataOffset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- struct fealist * ea_response_data;
- rc = -ENOENT;
- /* validate_trans2_offsets() */
- /* BB to check if(start of smb + pSMBr->DataOffset > &bcc+ bcc)*/
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- pSMBr->DataOffset);
- ea_response_data->list_len =
- cpu_to_le32(ea_response_data->list_len);
- cFYI(1,("ea length %d",ea_response_data->list_len));
- name_len = ea_response_data->list_len;
- if(name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1,("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- /* loop through checking if we have a matching
- name and then return the associated value */
- while(name_len > 0) {
- name_len -= 4;
- temp_ptr += 4;
- temp_fea->value_len = cpu_to_le16(temp_fea->value_len);
- /* BB validate that value_len falls within SMB,
- even though maximum for name_len is 255 */
- if(memcmp(temp_fea->name,ea_name,
- temp_fea->name_len) == 0) {
- /* found a match */
- rc = temp_fea->value_len;
- /* account for prefix user. and trailing null */
- if(rc<=buf_size) {
- memcpy(ea_value,
- temp_fea->name+temp_fea->name_len+1,
- rc);
- /* ea values, unlike ea names,
- are not null terminated */
- } else if(buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- }
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- name_len -= temp_fea->value_len;
- temp_ptr += temp_fea->value_len;
- /* no trailing null to account for in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
- }
- }
- if (pSMB)
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto QEARetry;
-
- return rc;
-}
-
-int
-CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
- const char * ea_name, const void * ea_value,
- const __u16 ea_value_len, const struct nls_table *nls_codepage)
-{
- struct smb_com_transaction2_spi_req *pSMB = NULL;
- struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
- struct fealist *parm_data;
- int name_len;
- int rc = 0;
- int bytes_returned = 0;
-
- cFYI(1, ("In SetEA"));
-SetEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, 530
- /* find define for this maxpathcomponent */
- , nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, 530);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
- }
-
- pSMB->ParameterCount = 6 + name_len;
-
- /* done calculating parms using name_len of file name,
- now use name_len to calculate length of ea name
- we are going to create in the inode xattrs */
- if(ea_name == NULL)
- name_len = 0;
- else
- name_len = strnlen(ea_name,255);
-
- pSMB->DataCount = sizeof(*parm_data) + ea_value_len + name_len + 1;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB size from sess */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_spi_req,
- InformationLevel) - 4;
- pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
- pSMB->InformationLevel =
- cpu_to_le16(SMB_SET_FILE_EA);
-
- parm_data =
- (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
- pSMB->DataOffset);
- pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
- pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
- pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount + pSMB->DataCount;
- pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
- parm_data->list_len = (__u32)(pSMB->DataCount);
- parm_data->list[0].EA_flags = 0;
- /* we checked above that name len is less than 255 */
- parm_data->list[0].name_len = (__u8)name_len;;
- /* EA names are always ASCII */
- strncpy(parm_data->list[0].name,ea_name,name_len);
- parm_data->list[0].name[name_len] = 0;
- parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
- /* caller ensures that ea_value_len is less than 64K but
- we need to ensure that it fits within the smb */
-
- /*BB add length check that it would fit in negotiated SMB buffer size BB */
- /* if(ea_value_len > buffer_size - 512 (enough for header)) */
- if(ea_value_len)
- memcpy(parm_data->list[0].name+name_len+1,ea_value,ea_value_len);
-
- pSMB->TotalDataCount = pSMB->DataCount;
- pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
- pSMB->TotalParameterCount = pSMB->ParameterCount;
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += pSMB->ByteCount;
- pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("SetPathInfo (EA) returned %d", rc));
- }
-
- if (pSMB)
- cifs_buf_release(pSMB);
-
- if (rc == -EAGAIN)
- goto SetEARetry;
-
- return rc;
-}
-
#endif
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
-#include <linux/mempool.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include "cifspdu.h"
unsigned char *p24);
extern int cifs_inet_pton(int, const char *, void *dst);
-extern mempool_t *cifs_req_poolp;
-
struct smb_vol {
char *username;
char *password;
int rw:1;
int retry:1;
int intr:1;
- int setuids:1;
- int noperm:1;
unsigned int rsize;
unsigned int wsize;
unsigned int sockopt;
unsigned int pdu_length, total_read;
struct smb_hdr *smb_buffer = NULL;
struct msghdr smb_msg;
- struct kvec iov;
+ mm_segment_t temp_fs;
+ struct iovec iov;
struct socket *csocket = server->ssocket;
struct list_head *tmp;
struct cifsSesInfo *ses;
current->flags |= PF_MEMALLOC;
server->tsk = current; /* save process info to wake at shutdown */
cFYI(1, ("Demultiplex PID: %d", current->pid));
- write_lock(&GlobalSMBSeslock);
- atomic_inc(&tcpSesAllocCount);
- length = tcpSesAllocCount.counter;
- write_unlock(&GlobalSMBSeslock);
- if(length > 1) {
- mempool_resize(cifs_req_poolp,
- length + CIFS_MIN_RCV_POOL,
- GFP_KERNEL);
- }
+
+ temp_fs = get_fs(); /* we must turn off socket api parm checking */
+ set_fs(get_ds());
while (server->tcpStatus != CifsExiting) {
if (smb_buffer == NULL)
iov.iov_base = smb_buffer;
iov.iov_len = sizeof (struct smb_hdr) - 1;
/* 1 byte less above since wct is not always returned in error cases */
+ smb_msg.msg_iov = &iov;
+ smb_msg.msg_iovlen = 1;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
length =
- kernel_recvmsg(csocket, &smb_msg,
- &iov, 1,
- sizeof (struct smb_hdr) -
- 1 /* RFC1001 header and SMB header */ ,
- MSG_PEEK /* flags see socket.h */ );
+ sock_recvmsg(csocket, &smb_msg,
+ sizeof (struct smb_hdr) -
+ 1 /* RFC1001 header and SMB header */ ,
+ MSG_PEEK /* flags see socket.h */ );
if(server->tcpStatus == CifsExiting) {
break;
if (temp[0] == (char) RFC1002_SESSION_KEEP_ALIVE) {
iov.iov_base = smb_buffer;
iov.iov_len = 4;
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1, 4, 0);
+ length = sock_recvmsg(csocket, &smb_msg, 4, 0);
cFYI(0,("Received 4 byte keep alive packet"));
} else if (temp[0] == (char) RFC1002_POSITIVE_SESSION_RESPONSE) {
- iov.iov_base = smb_buffer;
- iov.iov_len = 4;
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1, 4, 0);
+ iov.iov_base = smb_buffer;
+ iov.iov_len = 4;
+ length = sock_recvmsg(csocket, &smb_msg, 4, 0);
cFYI(1,("Good RFC 1002 session rsp"));
} else if ((temp[0] == (char)RFC1002_NEGATIVE_SESSION_RESPONSE)
&& (length == 5)) {
for (total_read = 0;
total_read < pdu_length;
total_read += length) {
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1,
+ length = sock_recvmsg(csocket, &smb_msg,
pdu_length - total_read, 0);
if (length == 0) {
cERROR(1,
("Frame less than four bytes received %d bytes long.",
length));
if (length > 0) {
- length = kernel_recvmsg(csocket, &smb_msg,
- &iov, 1,
- length, 0); /* throw away junk frame */
+ length = sock_recvmsg(csocket, &smb_msg, length, 0); /* throw away junk frame */
cFYI(1,
(" with junk 0x%x in it ",
*(__u32 *) smb_buffer));
sock_release(csocket);
server->ssocket = NULL;
}
+ set_fs(temp_fs);
if (smb_buffer) /* buffer usually freed in free_mid - need to free it on error or exit */
cifs_buf_release(smb_buffer);
}
kfree(server);
- write_lock(&GlobalSMBSeslock);
- atomic_dec(&tcpSesAllocCount);
- length = tcpSesAllocCount.counter;
- write_unlock(&GlobalSMBSeslock);
- if(length > 0) {
- mempool_resize(cifs_req_poolp,
- length + CIFS_MIN_RCV_POOL,
- GFP_KERNEL);
- }
-
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/4);
return 0;
vol->retry = 1;
} else if (strnicmp(data, "soft", 4) == 0) {
vol->retry = 0;
- } else if (strnicmp(data, "perm", 4) == 0) {
- vol->noperm = 0;
- } else if (strnicmp(data, "noperm", 6) == 0) {
- vol->noperm = 1;
- } else if (strnicmp(data, "setuids", 7) == 0) {
- vol->setuids = 1;
- } else if (strnicmp(data, "nosetuids", 9) == 0) {
- vol->setuids = 0;
} else if (strnicmp(data, "nohard", 6) == 0) {
vol->retry = 0;
} else if (strnicmp(data, "nosoft", 6) == 0) {
cifs_sb->mnt_file_mode = volume_info.file_mode;
cifs_sb->mnt_dir_mode = volume_info.dir_mode;
cFYI(1,("file mode: 0x%x dir mode: 0x%x",cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
-
- if(volume_info.noperm)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
- if(volume_info.setuids)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
-
tcon =
find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
volume_info.username);
then we now have to set the mode if possible */
if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(oplock & CIFS_CREATE_ACTION))
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)current->euid,
- (__u64)current->egid,
- 0 /* dev */,
- cifs_sb->local_nls);
- } else {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
(__u64)-1,
(__u64)-1,
0 /* dev */,
cifs_sb->local_nls);
- }
else {
/* BB implement via Windows security descriptors */
/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
rc = -ENOMEM;
if (full_path && (pTcon->ses->capabilities & CAP_UNIX)) {
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
- mode,(__u64)current->euid,(__u64)current->egid,
- device_number, cifs_sb->local_nls);
- } else {
- rc = CIFSSMBUnixSetPerms(xid, pTcon,
- full_path, mode, (__u64)-1, (__u64)-1,
- device_number, cifs_sb->local_nls);
- }
-
+ rc = CIFSSMBUnixSetPerms(xid, pTcon,
+ full_path, mode, current->euid, current->egid,
+ device_number, cifs_sb->local_nls);
if(!rc) {
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb,xid);
#include "cifs_unicode.h"
#include "cifs_debug.h"
-int cifs_dir_notify(struct file * file, unsigned long arg)
+int cifs_directory_notify(unsigned long arg, struct file * file)
{
int xid;
int rc = -EINVAL;
FreeXid(xid);
return rc;
}
+
+
+long cifs_fcntl(int file_desc, unsigned int command, unsigned long arg,
+ struct file * file)
+{
+ /* Few few file control functions need to be specially mapped. So far
+ only:
+ F_NOTIFY (for directory change notification)
+ And eventually:
+ F_GETLEASE
+ F_SETLEASE
+ need to be mapped here. The others either already are mapped downstream
+ or do not need to go to the server (client only sideeffects):
+ F_DUPFD:
+ F_GETFD:
+ F_SETFD:
+ F_GETFL:
+ F_SETFL:
+ F_GETLK:
+ F_SETLK:
+ F_SETLKW:
+ F_GETOWN:
+ F_SETOWN:
+ F_GETSIG:
+ F_SETSIG:
+ */
+ long rc = 0;
+
+ cFYI(1,("cifs_fcntl: command %d with arg %lx",command,arg)); /* BB removeme BB */
+
+ switch (command) {
+ case F_NOTIFY:
+ /* let the local call have a chance to fail first */
+ rc = generic_file_fcntl(file_desc,command,arg,file);
+ if(rc)
+ return rc;
+ else {
+ /* local call succeeded try to do remote notify to
+ pick up changes from other clients to server file */
+ cifs_directory_notify(arg, file);
+ /* BB add case to long and return rc from above */
+ return rc;
+ }
+ break;
+ default:
+ break;
+ }
+ return generic_file_fcntl(file_desc,command,arg,file);
+}
+
d_instantiate(direntry, newinode);
if(direntry->d_inode)
direntry->d_inode->i_nlink = 2;
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
- if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)current->euid,
- (__u64)current->egid,
- 0 /* dev_t */,
- cifs_sb->local_nls);
- } else {
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)-1,
- (__u64)-1,
- 0 /* dev_t */,
- cifs_sb->local_nls);
- }
+ if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ (__u64)-1,
+ (__u64)-1,
+ 0 /* dev_t */,
+ cifs_sb->local_nls);
else { /* BB to be implemented via Windows secrty descriptors*/
/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
}
void NTLMSSPOWFencrypt(unsigned char passwd[8],
unsigned char *ntlmchalresp, unsigned char p24[24]);
void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+int decode_pw_buffer(char in_buffer[516], char *new_pwrd,
+ int new_pwrd_size, __u32 * new_pw_len);
/*
This implements the X/Open SMB password encryption
/*
* fs/cifs/smberr.h
*
- * Copyright (c) International Business Machines Corp., 2002,2004
+ * Copyright (c) International Business Machines Corp., 2002
* Author(s): Steve French (sfrench@us.ibm.com)
*
* See Error Codes section of the SNIA CIFS Specification
#define ERRinvparm 87
#define ERRdiskfull 112
#define ERRinvname 123
-#define ERRinvlevel 124
#define ERRdirnotempty 145
#define ERRnotlocked 158
#define ERRalreadyexists 183
int rc = 0;
int i = 0;
struct msghdr smb_msg;
- struct kvec iov;
+ struct iovec iov;
+ mm_segment_t temp_fs;
if(ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
smb_msg.msg_name = sin;
smb_msg.msg_namelen = sizeof (struct sockaddr);
+ smb_msg.msg_iov = &iov;
+ smb_msg.msg_iovlen = 1;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
cFYI(1, ("Sending smb of length %d ", smb_buf_length));
dump_smb(smb_buffer, smb_buf_length + 4);
+ temp_fs = get_fs(); /* we must turn off socket api parm checking */
+ set_fs(get_ds());
while(iov.iov_len > 0) {
- rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, smb_buf_length + 4);
+ rc = sock_sendmsg(ssocket, &smb_msg, smb_buf_length + 4);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
if(i > 60) {
iov.iov_base += rc;
iov.iov_len -= rc;
}
+ set_fs(temp_fs);
if (rc < 0) {
cERROR(1,("Error %d sending data on socket to server.", rc));
#include "cifsproto.h"
#include "cifs_debug.h"
-#define MAX_EA_VALUE_SIZE 65535
-#define CIFS_XATTR_DOS_ATTRIB "user.DOSATTRIB"
-#define CIFS_XATTR_USER_PREFIX "user."
-#define CIFS_XATTR_SYSTEM_PREFIX "system."
-#define CIFS_XATTR_OS2_PREFIX "OS2." /* BB should check for this someday */
-/* also note could add check for security prefix XATTR_SECURITY_PREFIX */
-
-
-int cifs_removexattr(struct dentry * direntry, const char * ea_name)
+int cifs_removexattr(struct dentry * direntry, const char * name)
{
int rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
- int xid;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
- return -EIO;
- if(direntry->d_inode == NULL)
- return -EIO;
- sb = direntry->d_inode->i_sb;
- if(sb == NULL)
- return -EIO;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
- down(&sb->s_vfs_rename_sem);
- full_path = build_path_from_dentry(direntry);
- up(&sb->s_vfs_rename_sem);
- if(full_path == NULL) {
- FreeXid(xid);
- return -ENOMEM;
- }
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
- /* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for
- system and perhaps security prefixes? */
- } else {
- ea_name+=5; /* skip past user. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,NULL,
- (__u16)0, cifs_sb->local_nls);
- }
- if (full_path)
- kfree(full_path);
- FreeXid(xid);
-#endif
return rc;
}
-int cifs_setxattr(struct dentry * direntry, const char * ea_name,
- const void * ea_value, size_t value_size, int flags)
+int cifs_setxattr(struct dentry * direntry, const char * name,
+ const void * value, size_t size, int flags)
{
int rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
- int xid;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
- return -EIO;
- if(direntry->d_inode == NULL)
- return -EIO;
- sb = direntry->d_inode->i_sb;
- if(sb == NULL)
- return -EIO;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
- down(&sb->s_vfs_rename_sem);
- full_path = build_path_from_dentry(direntry);
- up(&sb->s_vfs_rename_sem);
- if(full_path == NULL) {
- FreeXid(xid);
- return -ENOMEM;
- }
- /* return dos attributes as pseudo xattr */
- /* return alt name if available as pseudo attr */
-
- /* if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
- returns as xattrs */
- if(value_size > MAX_EA_VALUE_SIZE) {
- cFYI(1,("size of EA value too large"));
- if(full_path)
- kfree(full_path);
- FreeXid(xid);
- return -EOPNOTSUPP;
- }
-
- if(ea_name == NULL) {
- cFYI(1,("Null xattr names not supported"));
- } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
- /* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for
- system and perhaps security prefixes? */
- } else {
- ea_name+=5; /* skip past user. prefix */
- rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
- (__u16)value_size, cifs_sb->local_nls);
- }
- if (full_path)
- kfree(full_path);
- FreeXid(xid);
-#endif
return rc;
}
-ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
- void * ea_value, size_t buf_size)
+ssize_t cifs_getxattr(struct dentry * direntry, const char * name,
+ void * value, size_t size)
{
ssize_t rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
- int xid;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct super_block * sb;
- char * full_path;
-
- if(direntry == NULL)
- return -EIO;
- if(direntry->d_inode == NULL)
- return -EIO;
- sb = direntry->d_inode->i_sb;
- if(sb == NULL)
- return -EIO;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
- down(&sb->s_vfs_rename_sem);
- full_path = build_path_from_dentry(direntry);
- up(&sb->s_vfs_rename_sem);
- if(full_path == NULL) {
- FreeXid(xid);
- return -ENOMEM;
- }
- /* return dos attributes as pseudo xattr */
- /* return alt name if available as pseudo attr */
- if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
- cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
- /* BB what if no namespace prefix? */
- /* Should we just pass them to server, except for system? */
- } else {
- /* We could add a check here
- if proc/fs/cifs/streamstoxattr is set then
- search server for EAs or streams to
- returns as xattrs */
- ea_name+=5; /* skip past user. */
- rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
- buf_size, cifs_sb->local_nls);
- }
- if (full_path)
- kfree(full_path);
- FreeXid(xid);
-#endif
return rc;
}
-ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
+ssize_t cifs_listxattr(struct dentry * direntry, char * ea_data, size_t ea_size)
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
struct cifsTconInfo *pTcon;
struct super_block * sb;
char * full_path;
-
if(direntry == NULL)
return -EIO;
if(direntry->d_inode == NULL)
FreeXid(xid);
return -ENOMEM;
}
- /* return dos attributes as pseudo xattr */
+ /* return dosattributes as pseudo xattr */
/* return alt name if available as pseudo attr */
/* if proc/fs/cifs/streamstoxattr is set then
search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid,pTcon,full_path,data,buf_size,
- cifs_sb->local_nls);
-
- if (full_path)
- kfree(full_path);
+ rc = CIFSSMBQAllEAs(xid,pTcon,full_path,ea_data,ea_size,cifs_sb->local_nls);
FreeXid(xid);
#endif
return rc;
}
int do_reset_coda_vfs_stats( ctl_table * table, int write, struct file * filp,
- void __user * buffer, size_t * lenp, loff_t * ppos )
+ void __user * buffer, size_t * lenp )
{
if ( write ) {
reset_coda_vfs_stats();
- *ppos += *lenp;
+ filp->f_pos += *lenp;
} else {
*lenp = 0;
}
int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
struct file * filp, void __user * buffer,
- size_t * lenp, loff_t * ppos )
+ size_t * lenp )
{
if ( write ) {
reset_coda_cache_inv_stats();
- *ppos += *lenp;
+ filp->f_pos += *lenp;
} else {
*lenp = 0;
}
static int fb_getput_cmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fb_cmap_user __user *cmap;
+ struct fb_cmap __user *cmap;
struct fb_cmap32 __user *cmap32;
__u32 data;
int err;
{
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
- if (dentry->d_extra_attributes) {
- kfree(dentry->d_extra_attributes);
- dentry->d_extra_attributes = NULL;
- }
call_rcu(&dentry->d_rcu, d_callback);
}
struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
if (!list_empty(&this->d_lru)) {
dentry_stat.nr_unused--;
- list_del_init(&this->d_lru);
+ list_del(&this->d_lru);
}
/*
dentry->d_sb = NULL;
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
- dentry->d_extra_attributes = NULL;
dentry->d_mounted = 0;
dentry->d_cookie = NULL;
dentry->d_bucket = NULL;
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
- /* flush any possible attributes */
- if (dentry->d_extra_attributes) {
- kfree(dentry->d_extra_attributes);
- dentry->d_extra_attributes = NULL;
- }
- if (target->d_extra_attributes) {
- kfree(target->d_extra_attributes);
- target->d_extra_attributes = NULL;
- }
-
list_del(&dentry->d_child);
list_del(&target->d_child);
*
* "buflen" should be positive. Caller holds the dcache_lock.
*/
-char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
struct dentry *root, struct vfsmount *rootmnt,
char *buffer, int buflen)
{
return ERR_PTR(-ENAMETOOLONG);
}
-EXPORT_SYMBOL_GPL(__d_path);
-
/* write full pathname into buffer and return start of pathname */
char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
char *buf, int buflen)
INIT_HLIST_HEAD(&dentry_hashtable[loop]);
}
-void flush_dentry_attributes (void)
-{
- struct hlist_node *tmp;
- struct dentry *dentry;
- int i;
-
- spin_lock(&dcache_lock);
- for (i = 0; i <= d_hash_mask; i++)
- hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) {
- kfree(dentry->d_extra_attributes);
- dentry->d_extra_attributes = NULL;
- }
- spin_unlock(&dcache_lock);
-}
-
-EXPORT_SYMBOL_GPL(flush_dentry_attributes);
-
static void __init dcache_init(unsigned long mempages)
{
/*
static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
loff_t * ppos);
static struct file_operations stat_fops = {
- .open = nonseekable_open,
.read = stat_read,
};
#endif
/* Devfs daemon file operations */
static struct file_operations devfsd_fops = {
- .open = nonseekable_open,
.read = devfsd_read,
.ioctl = devfsd_ioctl,
.release = devfsd_close,
struct devfsd_notify_struct *info = fs_info->devfsd_info;
DECLARE_WAITQUEUE(wait, current);
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
/* Verify the task has grabbed the queue */
if (fs_info->devfsd_task != current)
return -EPERM;
num = sprintf(txt, "Number of entries: %u number of bytes: %u\n",
stat_num_entries, stat_num_bytes) + 1;
+ /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (*ppos >= num)
return 0;
if (*ppos + len > num)
#include <linux/vs_base.h>
#include "xattr.h"
+#define DEVPTS_SUPER_MAGIC 0x1cd1
+
static struct vfsmount *devpts_mnt;
static struct dentry *devpts_root;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_nlink = 2;
- inode->i_xid = vx_current_xid();
devpts_root = s->s_root = d_alloc_root(inode);
if (s->s_root)
dn->dn_next = inode->i_dnotify;
inode->i_dnotify = dn;
spin_unlock(&inode->i_lock);
-
- if (filp->f_op && filp->f_op->dir_notify)
- return filp->f_op->dir_notify(filp, arg);
return 0;
out_free:
#include <linux/syscalls.h>
#include <linux/rmap.h>
#include <linux/ckrm.h>
-#include <linux/vs_memory.h>
-#include <linux/ckrm_mem.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
tsk->active_mm = mm;
activate_mm(active_mm, mm);
task_unlock(tsk);
- arch_pick_mmap_layout(mm);
-#ifdef CONFIG_CKRM_RES_MEM
- if (old_mm) {
- spin_lock(&old_mm->peertask_lock);
- list_del(&tsk->mm_peers);
- ckrm_mem_evaluate_mm(old_mm);
- spin_unlock(&old_mm->peertask_lock);
- }
- spin_lock(&mm->peertask_lock);
- list_add_tail(&tsk->mm_peers, &mm->tasklist);
- ckrm_mem_evaluate_mm(mm);
- spin_unlock(&mm->peertask_lock);
-#endif
if (old_mm) {
if (active_mm != old_mm) BUG();
mmput(old_mm);
if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
/* Set-uid? */
if (mode & S_ISUID) {
- current->personality &= ~PER_CLEAR_ON_SETID;
bprm->e_uid = inode->i_uid;
+#ifdef __i386__
+ /* reset personality */
+ current->personality = PER_LINUX;
+#endif
}
/* Set-gid? */
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
- current->personality &= ~PER_CLEAR_ON_SETID;
bprm->e_gid = inode->i_gid;
+#ifdef __i386__
+ /* reset personality */
+ current->personality = PER_LINUX;
+#endif
}
}
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/namei.h>
-#include <linux/vs_base.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
{
int mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
- return -EACCES;
/* Nobody gets write access to a read-only fs */
- if ((mask & MAY_WRITE) && (IS_RDONLY(inode) ||
- (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/* Nobody gets write access to an immutable file */
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
int need_revalidate = (filp->f_version != inode->i_version);
- int ret;
+ int ret = 0;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
- goto success;
+ goto done;
if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
types = ext2_filetype_table;
le32_to_cpu(de->inode), d_type);
if (over) {
ext2_put_page(page);
- goto success;
+ goto done;
}
}
}
ext2_put_page(page);
}
-success:
- ret = 0;
done:
filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
filp->f_version = inode->i_version;
- return ret;
+ return 0;
}
/*
if (!inode)
return ERR_PTR(-ENOMEM);
- if (sb->s_flags & MS_TAGXID)
- inode->i_xid = current->xid;
- else
- inode->i_xid = 0;
-
if (DLIMIT_ALLOC_INODE(sb, inode->i_xid)) {
err = -ENOSPC;
goto fail_dlim;
{
unsigned int flags = EXT2_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_IUNLINK|S_BARRIER|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT2_APPEND_FL)
uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
- inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid,
- le16_to_cpu(raw_inode->i_raw_xid));
+ inode->i_uid = INOXID_UID(uid, gid);
+ inode->i_gid = INOXID_GID(uid, gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ inode->i_xid = INOXID_XID(uid, gid, le16_to_cpu(raw_inode->i_raw_xid));
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
struct ext2_inode_info *ei = EXT2_I(inode);
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
- uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
struct buffer_head * bh;
struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
int n;
case EXT2_IOC_SETFLAGS: {
unsigned int oldflags;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if (((oldflags & EXT2_IMMUTABLE_FL) ||
+ if ((oldflags & EXT2_IMMUTABLE_FL) ||
((flags ^ oldflags) &
- (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL | EXT2_IUNLINK_FL)))
- && !capable(CAP_LINUX_IMMUTABLE)) {
- return -EPERM;
+ (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
}
flags = flags & EXT2_FL_USER_MODIFIABLE;
case EXT2_IOC_SETVERSION:
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if (get_user(inode->i_generation, (int __user *) arg))
return -EFAULT;
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/namei.h>
#include <linux/ext3_jbd.h>
#include <linux/ext3_fs.h>
-#include <linux/vs_base.h>
#include "xattr.h"
#include "acl.h"
{
int mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
- return -EACCES;
/* Nobody gets write access to a read-only fs */
- if ((mask & MAY_WRITE) && (IS_RDONLY(inode) ||
- (nd && nd->mnt && MNT_IS_RDONLY(nd->mnt))) &&
+ if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/* Nobody gets write access to an immutable file */
#include <linux/ext3_jbd.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
-#include <linux/vs_base.h>
#include <linux/vs_dlimit.h>
/*
static int ext3_has_free_blocks(struct super_block *sb)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
- int free_blocks, root_blocks, cond;
+ int free_blocks, root_blocks;
free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
- vxdprintk(VXD_CBIT(dlim, 3),
- "ext3_has_free_blocks(%p): free=%u, root=%u",
- sb, free_blocks, root_blocks);
-
DLIMIT_ADJUST_BLOCK(sb, vx_current_xid(), &free_blocks, &root_blocks);
- cond = (free_blocks < root_blocks + 1 &&
- !capable(CAP_SYS_RESOURCE) &&
+ if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
sbi->s_resuid != current->fsuid &&
- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
-
- vxdprintk(VXD_CBIT(dlim, 3),
- "ext3_has_free_blocks(%p): %u<%u+1, %c, %u!=%u r=%d",
- sb, free_blocks, root_blocks,
- !capable(CAP_SYS_RESOURCE)?'1':'0',
- sbi->s_resuid, current->fsuid, cond?0:1);
-
- return (cond ? 0 : 1);
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+ }
+ return 1;
}
/*
io_error:
*errp = -EIO;
out:
- if (!performed_allocation)
- DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
+ DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
out_dlimit:
if (fatal) {
*errp = fatal;
/*
* Undo the block allocation
*/
- if (!performed_allocation)
+ if (!performed_allocation) {
+ DLIMIT_FREE_BLOCK(sb, inode->i_xid, 1);
DQUOT_FREE_BLOCK(inode, 1);
+ }
brelse(bitmap_bh);
return 0;
}
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
-
- if (sb->s_flags & MS_TAGXID)
- inode->i_xid = current->xid;
- else
- inode->i_xid = 0;
-
if (DLIMIT_ALLOC_INODE(sb, inode->i_xid)) {
err = -ENOSPC;
- goto out;
+ goto fail_dlim;
}
ei = EXT3_I(inode);
goto really_out;
fail:
DLIMIT_FREE_INODE(sb, inode->i_xid);
+fail_dlim:
ext3_std_error(sb, err);
out:
iput(inode);
{
unsigned int flags = EXT3_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_IUNLINK|S_BARRIER|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT3_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT3_APPEND_FL)
uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
- inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid,
- le16_to_cpu(raw_inode->i_raw_xid));
+ inode->i_uid = INOXID_UID(uid, gid);
+ inode->i_gid = INOXID_GID(uid, gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ inode->i_xid = INOXID_XID(uid, gid, le16_to_cpu(raw_inode->i_raw_xid));
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
struct ext3_inode_info *ei = EXT3_I(inode);
struct buffer_head *bh = iloc->bh;
- uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
int err = 0, rc, block;
/* For fields not not tracking in the in-memory inode,
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if ((attr->ia_valid & ATTR_XID)
- && inode->i_sb
- && (inode->i_sb->s_flags & MS_TAGXID))
+ if (attr->ia_valid & ATTR_XID)
inode->i_xid = attr->ia_xid;
error = ext3_mark_inode_dirty(handle, inode);
ext3_journal_stop(handle);
unsigned int oldflags;
unsigned int jflag;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if (((oldflags & EXT3_IMMUTABLE_FL) ||
+ if ((oldflags & EXT3_IMMUTABLE_FL) ||
((flags ^ oldflags) &
- (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL)))
- && !capable(CAP_LINUX_IMMUTABLE)) {
- return -EPERM;
+ (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
}
/*
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if (get_user(generation, (int __user *) arg))
return -EFAULT;
break;
#ifndef CONFIG_INOXID_NONE
case Opt_tagxid:
- if (is_remount) {
- printk(KERN_ERR "EXT3-fs: cannot specify "
- "tagxid on remount\n");
- return 0;
- }
set_opt (sbi->s_mount_opt, TAG_XID);
break;
#endif
return error;
}
-int dupfd(struct file *file, unsigned int start)
+static int dupfd(struct file *file, unsigned int start)
{
struct files_struct * files = current->files;
int fd;
FD_SET(fd, files->open_fds);
FD_CLR(fd, files->close_on_exec);
spin_unlock(&files->file_lock);
- // vx_openfd_inc(fd);
+ vx_openfd_inc(fd);
fd_install(fd, file);
} else {
spin_unlock(&files->file_lock);
return fd;
}
-EXPORT_SYMBOL_GPL(dupfd);
-
asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
{
int err = -EBADF;
FD_SET(newfd, files->open_fds);
FD_CLR(newfd, files->close_on_exec);
spin_unlock(&files->file_lock);
- // vx_openfd_inc(newfd);
+ vx_openfd_inc(newfd);
if (tofree)
filp_close(tofree, files);
return -EINVAL;
}
- if (filp->f_op && filp->f_op->check_flags)
- error = filp->f_op->check_flags(arg);
- if (error)
- return error;
-
lock_kernel();
if ((arg ^ filp->f_flags) & FASYNC) {
if (filp->f_op && filp->f_op->fasync) {
EXPORT_SYMBOL(f_delown);
-static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
- struct file *filp)
+long generic_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp)
{
long err = -EINVAL;
}
return err;
}
+EXPORT_SYMBOL(generic_file_fcntl);
+
+static long do_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp)
+{
+ if (filp->f_op && filp->f_op->fcntl)
+ return filp->f_op->fcntl(fd, cmd, arg, filp);
+ return generic_file_fcntl(fd, cmd, arg, filp);
+}
asmlinkage long sys_fcntl(int fd, unsigned int cmd, unsigned long arg)
{
}
filp->f_version = 0;
- /* We can only do regular read/write on fifos */
- filp->f_mode &= (FMODE_READ | FMODE_WRITE);
-
switch (filp->f_mode) {
case 1:
/*
memset(filp, 0, sizeof(*filp));
eventpoll_init_file(filp);
filp->f_flags = flags;
- filp->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+ filp->f_mode = (flags+1) & O_ACCMODE;
atomic_set(&filp->f_count, 1);
filp->f_dentry = dentry;
filp->f_mapping = dentry->d_inode->i_mapping;
} else if (inode->i_state & I_DIRTY) {
/*
* Someone redirtied the inode while were writing back
- * the pages.
+ * the pages: nothing to do.
*/
- list_move(&inode->i_list, &sb->s_dirty);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct page *page;
- unsigned int size;
+ unsigned int shift, size;
tree = kmalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
goto fail_page;
if (!tree->node_count)
goto fail_page;
- tree->node_size_shift = ffs(size) - 1;
+ for (shift = 0; size >>= 1; shift += 1)
+ ;
+ tree->node_size_shift = shift;
+
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
kunmap(page);
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct page *page;
- unsigned int size;
+ unsigned int shift, size;
tree = kmalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
goto fail_page;
if (!tree->node_count)
goto fail_page;
- tree->node_size_shift = ffs(size) - 1;
+ for (shift = 0; size >>= 1; shift += 1)
+ ;
+ tree->node_size_shift = shift;
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
flags |= EXT2_FLAG_NODUMP; /* EXT2_NODUMP_FL */
return put_user(flags, (int __user *)arg);
case HFSPLUS_IOC_EXT2_SETFLAGS: {
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
loff_t len, vma_len;
int ret;
- if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
- return -EINVAL;
-
if (vma->vm_start & ~HPAGE_MASK)
return -EINVAL;
unsigned long v_length;
unsigned long v_offset;
- h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
+ h_vm_pgoff = vma->vm_pgoff << (HPAGE_SHIFT - PAGE_SHIFT);
+ v_length = vma->vm_end - vma->vm_start;
v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT;
+
/*
* Is this VMA fully outside the truncation point?
*/
if (h_vm_pgoff >= h_pgoff)
v_offset = 0;
- v_length = vma->vm_end - vma->vm_start;
-
zap_hugepage_range(vma,
vma->vm_start + v_offset,
v_length - v_offset);
struct file *hugetlb_zero_setup(size_t size)
{
- int error = -ENOMEM;
+ int error;
struct file *file;
struct inode *inode;
struct dentry *dentry, *root;
struct qstr quick_string;
char buf[16];
- if (!capable(CAP_IPC_LOCK))
+ if (!can_do_mlock())
return ERR_PTR(-EPERM);
if (!is_hugepage_mem_enough(size))
return ERR_PTR(-ENOMEM);
- if (!user_shm_lock(size, current->user))
- return ERR_PTR(-ENOMEM);
-
root = hugetlbfs_vfsmount->mnt_root;
snprintf(buf, 16, "%lu", hugetlbfs_counter());
quick_string.name = buf;
quick_string.hash = 0;
dentry = d_alloc(root, &quick_string);
if (!dentry)
- goto out_shm_unlock;
+ return ERR_PTR(-ENOMEM);
error = -ENFILE;
file = get_empty_filp();
put_filp(file);
out_dentry:
dput(dentry);
-out_shm_unlock:
- user_shm_unlock(size, current->user);
return ERR_PTR(error);
}
struct address_space * const mapping = &inode->i_data;
inode->i_sb = sb;
+ if (sb->s_flags & MS_TAGXID)
+ inode->i_xid = current->xid;
+ else
+ inode->i_xid = 0; /* maybe xid -1 would be better? */
// inode->i_dqh = dqhget(sb->s_dqh);
-
- /* important because of inode slab reuse */
- inode->i_xid = 0;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_rdev = 0;
+ // inode->i_xid = 0; /* maybe not too wise ... */
inode->i_security = NULL;
inode->dirtied_when = 0;
if (security_inode_alloc(inode)) {
list_add(&inode->i_list, &inode_in_use);
inode->i_ino = ++last_ino;
inode->i_state = 0;
+ inode->i_xid = vx_current_xid();
spin_unlock(&inode_lock);
}
return inode;
* When ctime_too is specified update the ctime too.
*/
-void inode_update_time(struct inode *inode, struct vfsmount *mnt, int ctime_too)
+void inode_update_time(struct inode *inode, int ctime_too)
{
struct timespec now;
int sync_it = 0;
if (IS_NOCMTIME(inode))
return;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
+ if (IS_RDONLY(inode))
return;
now = current_kernel_time();
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
-#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/vserver/inode.h>
#include <linux/vserver/xid.h>
error = vx_proc_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
break;
#endif
- case FIOC_SETIATTR:
- case FIOC_GETIATTR:
- /*
- * Verify that this filp is a file object,
- * not (say) a socket.
- */
- error = -ENOTTY;
- if (S_ISREG(filp->f_dentry->d_inode->i_mode) ||
- S_ISDIR(filp->f_dentry->d_inode->i_mode))
- error = vc_iattr_ioctl(filp->f_dentry,
- cmd, arg);
- break;
-
default:
error = -ENOTTY;
if (S_ISREG(filp->f_dentry->d_inode->i_mode))
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
- jh = journal_grab_journal_head(bh);
- if (!jh)
- goto zap_buffer_no_jh;
+ /*
+ * Now we have the locks, check again to see whether kjournald has
+ * taken the buffer off the transaction.
+ */
+ if (!buffer_jbd(bh))
+ goto zap_buffer;
+ jh = bh2jh(bh);
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
- journal_put_journal_head(jh);
return ret;
} else {
/* There is no currently-running transaction. So the
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
- journal_put_journal_head(jh);
return ret;
} else {
/* The orphan record's transaction has
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
- journal_put_journal_head(jh);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
}
zap_buffer:
- journal_put_journal_head(jh);
-zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
int jffs_register_jffs_proc_dir(int mtd, struct jffs_control *c)
{
struct jffs_partition_dir *part_dir;
- struct proc_dir_entry *part_info = NULL;
- struct proc_dir_entry *part_layout = NULL;
- struct proc_dir_entry *part_root = NULL;
+ struct proc_dir_entry *part_info = 0;
+ struct proc_dir_entry *part_layout = 0;
+ struct proc_dir_entry *part_root = 0;
char name[10];
sprintf(name, "%d", mtd);
int jffs_unregister_jffs_proc_dir(struct jffs_control *c)
{
struct jffs_partition_dir *part_dir = jffs_part_dirs;
- struct jffs_partition_dir *prev_part_dir = NULL;
+ struct jffs_partition_dir *prev_part_dir = 0;
while (part_dir) {
if (part_dir->c == c) {
int count, int *eof, void *data)
{
struct jffs_control *c = (struct jffs_control *) data;
- struct jffs_fm *fm = NULL;
- struct jffs_fm *last_fm = NULL;
+ struct jffs_fm *fm = 0;
+ struct jffs_fm *last_fm = 0;
int len = 0;
/* Get the first item in the list */
#
# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
#
-# $Id: Makefile.common,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
+# $Id: Makefile.common,v 1.5 2004/07/15 16:06:41 dwmw2 Exp $
#
obj-$(CONFIG_JFFS2_FS) += jffs2.o
jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+jffs2-$(CONFIG_JFFS2_PROC) += proc.o
* For licensing information, see the file 'LICENCE' in the
* jffs2 directory.
*
- * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
+ * $Id: compr.h,v 1.5 2004/06/23 16:34:39 havasi Exp $
*
*/
void jffs2_lzo_exit(void);
#endif
+/* Prototypes from proc.c */
+int jffs2_proc_init(void);
+int jffs2_proc_exit(void);
+
#endif /* __JFFS2_COMPR_H__ */
goto bad2;
}
if (retlen != sizeof(marker)) {
- printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
+ printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %d, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto bad2;
}
continue;
}
if (retlen != rawlen) {
- printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %zd) reading header from obsolete node at %08x\n",
retlen, rawlen, ref_offset(raw));
continue;
}
--- /dev/null
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: proc.c,v 1.3 2004/06/24 09:51:38 havasi Exp $
+ *
+ * Files in /proc/fs/jffs2 directory:
+ * compr_list
+ * read: shows the list of the loaded compressors
+ * (name, priority, enadbled/disabled)
+ * write: compressors can be enabled/disabled and
+ * the priority of them can be changed,
+ * required formats:
+ * enable COMPRESSOR_NAME
+ * disble COMPRESSOR_NAME
+ * priority NEW_PRIORITY COMPRESSOR_NAME
+ * compr_mode
+ * read: shows the name of the actual compression mode
+ * write: sets the actual comperession mode
+ * compr_stat
+ * read: shows compression statistics
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/jffs.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include "compr.h"
+
+extern struct proc_dir_entry *jffs_proc_root;
+
+/* Structure for top-level entry in '/proc/fs' directory */
+static struct proc_dir_entry *jffs2_proc_root;
+
+/* Structure for files in /proc/fs/jffs2 directory */
+static struct proc_dir_entry *jffs2_proc_compr_stat;
+static struct proc_dir_entry *jffs2_proc_compr_mode;
+
+/* Read the JFFS2 'compr_stat' file */
+
+static int jffs2_proc_stat_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = 0,i;
+ char *stat = jffs2_stats();
+
+ if (strlen(stat)<off) {
+ *eof = 1;
+ kfree(stat);
+ return len;
+ }
+ for (i=off;((stat[i]!=0)&&(len<count));i++,len++) {
+ page[len]=stat[i];
+ }
+ if (off+len>=strlen(stat)) *eof = 1;
+ else *eof = 0;
+ kfree(stat);
+ return len;
+}
+
+
+/* Read the JFFS2 'compr_mode' file */
+
+static int jffs2_proc_mode_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ if (strlen(jffs2_get_compression_mode_name())+1>count) {
+ /* it should not happen */
+ *eof = 1;
+ return 0;
+ }
+ len += sprintf(page, "%s\n",jffs2_get_compression_mode_name());
+ *eof = 1;
+ return len;
+}
+
+/* Write the JFFS2 'compr_mode' file
+ * sets the actual compression mode
+ */
+
+static int jffs2_proc_mode_write(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char *compr_name;
+
+ /* collect the name of the compression mode and set it */
+ compr_name = kmalloc(count+1,GFP_KERNEL);
+ if (sscanf(buffer,"%s",compr_name)>0) {
+ if (jffs2_set_compression_mode_name(compr_name)) {
+ printk(KERN_WARNING "JFFS2: error switching compression mode. Invalid parameter (%s)?\n",compr_name);
+ }
+ }
+ else {
+ printk(KERN_WARNING "JFFS2: error: parameter missing\n");
+ }
+ kfree(compr_name);
+ return count;
+}
+
+/* Read the JFFS2 'compr_list' file */
+
+static int jffs2_proc_list_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ char *list = jffs2_list_compressors();
+ if (strlen(list)+1>count) {
+ /* it should not happen */
+ *eof = 1;
+ kfree(list);
+ return 0;
+ }
+ len += sprintf(page,"%s",list);
+ *eof = 1;
+ kfree(list);
+ return len;
+}
+
+/* Write the JFFS2 'compr_list' file
+ * enable/disable a compressor or set the priority of it
+ */
+
+static int jffs2_proc_list_write(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int prior;
+ char *compr_name,*compr_cmd;
+
+ compr_name = kmalloc(count+1,GFP_KERNEL);
+ compr_cmd = kmalloc(count+1,GFP_KERNEL);
+ if (!compr_name) {
+ printk(KERN_WARNING "JFFS2: unable to allocate memory\n");
+ goto list_write_end;
+ }
+ compr_name[0] = 0;
+
+ if (sscanf(buffer,"priority %d %s",&prior,compr_name)>1) {
+ jffs2_set_compressor_priority(compr_name, prior);
+ goto list_write_end;
+ }
+ if (sscanf(buffer,"enable %s",compr_name)>0) {
+ jffs2_enable_compressor_name(compr_name);
+ goto list_write_end;
+ }
+ if (sscanf(buffer,"disable %s",compr_name)>0) {
+ jffs2_disable_compressor_name(compr_name);
+ goto list_write_end;
+ }
+ printk(KERN_WARNING "JFFS2: usage of /proc/fs/jffs2/compr_list:\n"
+ " echo \"enable COMPRESSOR_NAME\" >/proc/fs/jffs2/compr_list\n"
+ " echo \"disable COMPRESSOR_NAME\" >/proc/fs/jffs2/compr_list\n"
+ " echo \"priority NEW_PRIORITY COMPRESSOR_NAME\" >/proc/fs/jffs2/compr_list\n");
+list_write_end:
+ kfree(compr_cmd);
+ kfree(compr_name);
+ return count;
+}
+
+/* Register a JFFS2 proc directory */
+
+int jffs2_proc_init(void)
+{
+ jffs2_proc_root = proc_mkdir("jffs2", proc_root_fs);
+
+ /* create entry for 'compr_stat' file */
+ if ((jffs2_proc_compr_stat = create_proc_entry ("compr_stat", 0, jffs2_proc_root))) {
+ jffs2_proc_compr_stat->read_proc = jffs2_proc_stat_read;
+ }
+ else {
+ return -ENOMEM;
+ }
+ /* create entry for 'compr_mode' file */
+ if ((jffs2_proc_compr_mode = create_proc_entry ("compr_mode", 0, jffs2_proc_root))) {
+ jffs2_proc_compr_mode->read_proc = jffs2_proc_mode_read;
+ jffs2_proc_compr_mode->write_proc = jffs2_proc_mode_write;
+ }
+ else {
+ return -ENOMEM;
+ }
+ /* create entry for 'compr_list' file */
+ if ((jffs2_proc_compr_mode = create_proc_entry ("compr_list", 0, jffs2_proc_root))) {
+ jffs2_proc_compr_mode->read_proc = jffs2_proc_list_read;
+ jffs2_proc_compr_mode->write_proc = jffs2_proc_list_write;
+ }
+ else {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+
+/* Unregister a JFFS2 proc directory */
+
+int jffs2_proc_exit(void)
+{
+#if LINUX_VERSION_CODE < 0x020300
+ remove_proc_entry ("compr_stat", &jffs2_proc_root);
+ remove_proc_entry ("compr_mode", &jffs2_proc_root);
+ remove_proc_entry ("compr_list", &jffs2_proc_root);
+ remove_proc_entry ("jffs2", &proc_root_fs);
+#else
+ remove_proc_entry ("compr_stat", jffs2_proc_root);
+ remove_proc_entry ("compr_mode", jffs2_proc_root);
+ remove_proc_entry ("compr_list", jffs2_proc_root);
+ remove_proc_entry ("jffs2", proc_root_fs);
+#endif
+ return 0;
+}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: super.c,v 1.97 2004/07/16 15:17:57 dwmw2 Exp $
+ * $Id: super.c,v 1.96 2004/07/13 08:57:30 dwmw2 Exp $
*
*/
printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
return -ENOMEM;
}
+#ifdef CONFIG_JFFS2_PROC
+ ret = jffs2_proc_init();
+ if (ret) {
+ printk(KERN_ERR "JFFS2 error: Failed to initialise proc interface\n");
+ goto out;
+ }
+#endif
ret = jffs2_compressors_init();
if (ret) {
printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
jffs2_destroy_slab_caches();
out_compressors:
jffs2_compressors_exit();
+#ifdef CONFIG_JFFS2_PROC
+ jffs2_proc_exit();
+#endif
out:
return ret;
}
unregister_filesystem(&jffs2_fs_type);
jffs2_destroy_slab_caches();
jffs2_compressors_exit();
+#ifdef CONFIG_JFFS2_PROC
+ jffs2_proc_exit();
+#endif
kmem_cache_destroy(jffs2_inode_cachep);
}
/*
* Nobody gets write access to a read-only fs.
*/
- if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
uid = le32_to_cpu(dip->di_uid);
gid = le32_to_cpu(dip->di_gid);
- ip->i_uid = INOXID_UID(XID_TAG(ip), uid, gid);
- ip->i_gid = INOXID_GID(XID_TAG(ip), uid, gid);
- ip->i_xid = INOXID_XID(XID_TAG(ip), uid, gid, 0);
+ ip->i_uid = INOXID_UID(uid, gid);
+ ip->i_gid = INOXID_GID(uid, gid);
+ ip->i_xid = INOXID_XID(uid, gid, 0);
ip->i_size = le64_to_cpu(dip->di_size);
ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
dip->di_nlink = cpu_to_le32(ip->i_nlink);
- uid = XIDINO_UID(XID_TAG(ip), ip->i_uid, ip->i_xid);
- gid = XIDINO_GID(XID_TAG(ip), ip->i_gid, ip->i_xid);
+ uid = XIDINO_UID(ip->i_uid, ip->i_xid);
+ gid = XIDINO_GID(ip->i_gid, ip->i_xid);
dip->di_uid = cpu_to_le32(uid);
dip->di_gid = cpu_to_le32(gid);
/*
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/vfs.h>
-#include <asm/uaccess.h>
int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
mntput(mnt);
}
-ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
- const void *from, size_t available)
-{
- loff_t pos = *ppos;
- if (pos < 0)
- return -EINVAL;
- if (pos >= available)
- return 0;
- if (count > available - pos)
- count = available - pos;
- if (copy_to_user(to, from + pos, count))
- return -EFAULT;
- *ppos = pos + count;
- return count;
-}
-
EXPORT_SYMBOL(dcache_dir_close);
EXPORT_SYMBOL(dcache_dir_lseek);
EXPORT_SYMBOL(dcache_dir_open);
EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_unlink);
-EXPORT_SYMBOL(simple_read_from_buffer);
*
* Initial implementation of mandatory locks. SunOS turned out to be
* a rotten model, so I implemented the "obvious" semantics.
- * See 'Documentation/mandatory.txt' for details.
+ * See 'linux/Documentation/mandatory.txt' for details.
* Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
*
* Don't allow mandatory locks on mmap()'ed files. Added simple functions to
bh = bh->b_this_page;
} while (bh != head);
- /*
- * we cannot drop the bh if the page is not uptodate
- * or a concurrent readpage would fail to serialize with the bh
- * and it would read from disk before we reach the platter.
- */
- if (buffer_heads_over_limit && PageUptodate(page))
+ if (buffer_heads_over_limit)
try_to_free_buffers(page);
}
{
umode_t mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
+ if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN|VX_WATCH))
return -EACCES;
if (mask & MAY_WRITE) {
return -EACCES;
}
+static inline int xid_permission(struct inode *inode)
+{
+ if (inode->i_xid == 0)
+ return 0;
+ if (vx_check(inode->i_xid, VX_ADMIN|VX_WATCH|VX_IDENT))
+ return 0;
+ return -EACCES;
+}
+
int permission(struct inode * inode,int mask, struct nameidata *nd)
{
int retval;
int submask;
- umode_t mode = inode->i_mode;
/* Ordinary permission routines do not understand MAY_APPEND. */
submask = mask & ~MAY_APPEND;
- if (nd && (mask & MAY_WRITE) && MNT_IS_RDONLY(nd->mnt) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
-
+ if ((retval = xid_permission(inode)))
+ return retval;
if (inode->i_op && inode->i_op->permission)
retval = inode->i_op->permission(inode, submask, nd);
else
{
struct path next;
struct inode *inode;
- int err, atomic;
+ int err;
unsigned int lookup_flags = nd->flags;
-
- atomic = (lookup_flags & LOOKUP_ATOMIC);
-
+
while (*name=='/')
name++;
if (!*name)
if (err < 0)
break;
}
- err = -EWOULDBLOCKIO;
- if (atomic)
- break;
nd->flags |= LOOKUP_CONTINUE;
/* This does the actual lookups.. */
err = do_lookup(nd, &this, &next);
if (err < 0)
break;
}
- err = -EWOULDBLOCKIO;
- if (atomic)
- break;
err = do_lookup(nd, &this, &next);
if (err)
break;
return permission(dir,MAY_WRITE | MAY_EXEC, nd);
}
-static inline int mnt_may_create(struct vfsmount *mnt, struct inode *dir, struct dentry *child) {
- if (child->d_inode)
- return -EEXIST;
- if (IS_DEADDIR(dir))
- return -ENOENT;
- if (mnt->mnt_flags & MNT_RDONLY)
- return -EROFS;
- return 0;
-}
-
-static inline int mnt_may_unlink(struct vfsmount *mnt, struct inode *dir, struct dentry *child) {
- if (!child->d_inode)
- return -ENOENT;
- if (mnt->mnt_flags & MNT_RDONLY)
- return -EROFS;
- return 0;
-}
-
/*
* Special case: O_CREAT|O_EXCL implies O_NOFOLLOW for security
* reasons.
if (f & O_DIRECTORY)
retval |= LOOKUP_DIRECTORY;
- if (f & O_ATOMICLOOKUP)
- retval |= LOOKUP_ATOMIC;
return retval;
}
return -EACCES;
flag &= ~O_TRUNC;
- } else if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt)))
- && (flag & FMODE_WRITE))
+ } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE))
return -EROFS;
/*
* An append-only file must be opened in append mode for writing.
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
{
struct dentry *dentry;
- int error;
down(&nd->dentry->d_inode->i_sem);
- error = -EEXIST;
+ dentry = ERR_PTR(-EEXIST);
if (nd->last_type != LAST_NORM)
- goto out;
+ goto fail;
nd->flags &= ~LOOKUP_PARENT;
dentry = lookup_hash(&nd->last, nd->dentry);
if (IS_ERR(dentry))
- goto ret;
- error = mnt_may_create(nd->mnt, nd->dentry->d_inode, dentry);
- if (error)
goto fail;
- error = -ENOENT;
if (!is_dir && nd->last.name[nd->last.len] && !dentry->d_inode)
- goto fail;
-ret:
+ goto enoent;
return dentry;
-fail:
+enoent:
dput(dentry);
-out:
- return ERR_PTR(error);
+ dentry = ERR_PTR(-ENOENT);
+fail:
+ return dentry;
}
int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
dentry = lookup_hash(&nd.last, nd.dentry);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
- error = mnt_may_unlink(nd.mnt, nd.dentry->d_inode, dentry);
- if (error)
- goto exit2;
error = vfs_rmdir(nd.dentry->d_inode, dentry);
- exit2:
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
/* Why not before? Because we want correct error value */
if (nd.last.name[nd.last.len])
goto slashes;
- error = mnt_may_unlink(nd.mnt, nd.dentry->d_inode, dentry);
- if (error)
- goto exit2;
inode = dentry->d_inode;
if (inode)
atomic_inc(&inode->i_count);
error = path_lookup(to, LOOKUP_PARENT, &nd);
if (error)
goto out;
- /*
- * We allow hard-links to be created to a bind-mount as long
- * as the bind-mount is not read-only. Checking for cross-dev
- * links is subsumed by the superblock check in vfs_link().
- */
- error = -EROFS;
- if (MNT_IS_RDONLY(old_nd.mnt))
+ error = -EXDEV;
+ if (old_nd.mnt != nd.mnt)
goto out_release;
new_dentry = lookup_create(&nd, 0);
error = PTR_ERR(new_dentry);
error = -EINVAL;
if (old_dentry == trap)
goto exit4;
- error = -EROFS;
- if (MNT_IS_RDONLY(newnd.mnt))
- goto exit4;
new_dentry = lookup_hash(&newnd.last, new_dir);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
struct vfsmount *mnt = v;
int err = 0;
static struct proc_fs_info {
- int s_flag;
- int mnt_flag;
- char *set_str;
- char *unset_str;
+ int flag;
+ char *str;
} fs_info[] = {
- { MS_RDONLY, MNT_RDONLY, "ro", "rw" },
- { MS_SYNCHRONOUS, 0, ",sync", NULL },
- { MS_DIRSYNC, 0, ",dirsync", NULL },
- { MS_MANDLOCK, 0, ",mand", NULL },
- { MS_NOATIME, MNT_NOATIME, ",noatime", NULL },
- { MS_NODIRATIME, MNT_NODIRATIME, ",nodiratime", NULL },
- { MS_TAGXID, MS_TAGXID, ",tagxid", NULL },
- { 0, MNT_NOSUID, ",nosuid", NULL },
- { 0, MNT_NODEV, ",nodev", NULL },
- { 0, MNT_NOEXEC, ",noexec", NULL },
- { 0, 0, NULL, NULL }
+ { MS_SYNCHRONOUS, ",sync" },
+ { MS_DIRSYNC, ",dirsync" },
+ { MS_MANDLOCK, ",mand" },
+ { MS_NOATIME, ",noatime" },
+ { MS_NODIRATIME, ",nodiratime" },
+ { 0, NULL }
};
- struct proc_fs_info *p;
- unsigned long s_flags = mnt->mnt_sb->s_flags;
- int mnt_flags = mnt->mnt_flags;
+ static struct proc_fs_info mnt_info[] = {
+ { MNT_NOSUID, ",nosuid" },
+ { MNT_NODEV, ",nodev" },
+ { MNT_NOEXEC, ",noexec" },
+ { 0, NULL }
+ };
+ struct proc_fs_info *fs_infop;
if (vx_flags(VXF_HIDE_MOUNT, 0))
return 0;
seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
seq_putc(m, ' ');
mangle(m, mnt->mnt_sb->s_type->name);
- seq_putc(m, ' ');
- for (p = fs_info; (p->s_flag | p->mnt_flag) ; p++) {
- if ((s_flags & p->s_flag) || (mnt_flags & p->mnt_flag)) {
- if (p->set_str)
- seq_puts(m, p->set_str);
- } else {
- if (p->unset_str)
- seq_puts(m, p->unset_str);
- }
+ seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
+ for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ if (mnt->mnt_sb->s_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
+ }
+ for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ if (mnt->mnt_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
}
if (mnt->mnt_sb->s_op->show_options)
err = mnt->mnt_sb->s_op->show_options(m, mnt);
/*
* do loopback mount.
*/
-static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags)
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
{
struct nameidata old_nd;
struct vfsmount *mnt = NULL;
- int recurse = flags & MS_REC;
int err = mount_is_safe(nd);
-
if (err)
return err;
if (!old_name || !*old_name)
spin_unlock(&vfsmount_lock);
} else
mntput(mnt);
- mnt->mnt_flags = mnt_flags;
}
up_write(¤t->namespace->sem);
((char *)data_page)[PAGE_SIZE - 1] = 0;
/* Separate the per-mountpoint flags */
- if (flags & MS_RDONLY)
- mnt_flags |= MNT_RDONLY;
if (flags & MS_NOSUID)
mnt_flags |= MNT_NOSUID;
if (flags & MS_NODEV)
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
- if (flags & MS_NOATIME)
- mnt_flags |= MNT_NOATIME;
- if (flags & MS_NODIRATIME)
- mnt_flags |= MNT_NODIRATIME;
flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
if (vx_ccaps(VXC_SECURE_MOUNT))
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
data_page);
else if (flags & MS_BIND)
- retval = do_loopback(&nd, dev_name, flags, mnt_flags);
+ retval = do_loopback(&nd, dev_name, flags & MS_REC);
else if (flags & MS_MOVE)
retval = do_move_mount(&nd, dev_name);
else
struct namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
struct fs_struct *fs = tsk->fs;
- struct vfsmount *p, *q;
if (!namespace)
return 0;
list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
spin_unlock(&vfsmount_lock);
- /*
- * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
- * as belonging to new namespace. We have already acquired a private
- * fs_struct, so tsk->fs->lock is not needed.
- */
- p = namespace->root;
- q = new_ns->root;
- while (p) {
- q->mnt_namespace = new_ns;
- if (fs) {
+ /* Second pass: switch the tsk->fs->* elements */
+ if (fs) {
+ struct vfsmount *p, *q;
+ write_lock(&fs->lock);
+
+ p = namespace->root;
+ q = new_ns->root;
+ while (p) {
if (p == fs->rootmnt) {
rootmnt = p;
fs->rootmnt = mntget(q);
altrootmnt = p;
fs->altrootmnt = mntget(q);
}
+ p = next_mnt(p, namespace->root);
+ q = next_mnt(q, new_ns->root);
}
- p = next_mnt(p, namespace->root);
- q = next_mnt(q, new_ns->root);
+ write_unlock(&fs->lock);
}
up_write(&tsk->namespace->sem);
*ppos = pos;
- if (!IS_RDONLY(inode) || (file && MNT_IS_RDONLY(file->f_vfsmnt))) {
+ if (!IS_RDONLY(inode)) {
inode->i_atime = CURRENT_TIME;
}
#endif
}
if (!result)
- result = inode_setattr(inode, attr);
+ inode_setattr(inode, attr);
out:
unlock_kernel();
return result;
if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff
> (1U << (32 - PAGE_SHIFT)))
return -EFBIG;
- if (!IS_RDONLY(inode) || (file && MNT_IS_RDONLY(file->f_vfsmnt))) {
+ if (!IS_RDONLY(inode)) {
inode->i_atime = CURRENT_TIME;
}
#include "ncpsign_kernel.h"
-static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
+static int _recv(struct socket *sock, unsigned char *ubuf, int size,
+ unsigned flags)
{
- struct msghdr msg = {NULL, };
- struct kvec iov = {buf, size};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
-}
+ struct iovec iov;
+ struct msghdr msg;
-static inline int do_send(struct socket *sock, struct kvec *vec, int count,
- int len, unsigned flags)
-{
- struct msghdr msg = { .msg_flags = flags };
- return kernel_sendmsg(sock, &msg, vec, count, len);
+ iov.iov_base = ubuf;
+ iov.iov_len = size;
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ return sock_recvmsg(sock, &msg, size, flags);
}
-static int _send(struct socket *sock, const void *buff, int len)
+static inline int _send(struct socket *sock, const void *buff, int len)
{
- struct kvec vec;
- vec.iov_base = (void *) buff;
- vec.iov_len = len;
- return do_send(sock, &vec, 1, len, 0);
+ struct iovec iov;
+ struct msghdr msg;
+
+ iov.iov_base = (void *) buff;
+ iov.iov_len = len;
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_flags = 0;
+
+ return sock_sendmsg(sock, &msg, len);
}
struct ncp_request_reply {
size_t datalen;
int result;
enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status;
- struct kvec* tx_ciov;
+ struct iovec* tx_ciov;
size_t tx_totallen;
size_t tx_iovlen;
- struct kvec tx_iov[3];
+ struct iovec tx_iov[3];
u_int16_t tx_type;
u_int32_t sign[6];
};
-void ncp_tcp_data_ready(struct sock *sk, int len)
-{
+void ncp_tcp_data_ready(struct sock *sk, int len) {
struct ncp_server *server = sk->sk_user_data;
server->data_ready(sk, len);
schedule_work(&server->rcv.tq);
}
-void ncp_tcp_error_report(struct sock *sk)
-{
+void ncp_tcp_error_report(struct sock *sk) {
struct ncp_server *server = sk->sk_user_data;
server->error_report(sk);
schedule_work(&server->rcv.tq);
}
-void ncp_tcp_write_space(struct sock *sk)
-{
+void ncp_tcp_write_space(struct sock *sk) {
struct ncp_server *server = sk->sk_user_data;
/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
not vice versa... */
server->write_space(sk);
- if (server->tx.creq)
+ if (server->tx.creq) {
schedule_work(&server->tx.tq);
+ }
}
-void ncpdgram_timeout_call(unsigned long v)
-{
+void ncpdgram_timeout_call(unsigned long v) {
struct ncp_server *server = (void*)v;
schedule_work(&server->timeout_tq);
}
-static inline void ncp_finish_request(struct ncp_request_reply *req, int result)
-{
+static inline void ncp_finish_request(struct ncp_request_reply *req, int result) {
req->result = result;
req->status = RQ_DONE;
wake_up_all(&req->wq);
}
-static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err)
-{
+static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err) {
struct ncp_request_reply *req;
ncp_invalidate_conn(server);
}
}
-static inline int get_conn_number(struct ncp_reply_header *rp)
-{
+static inline int get_conn_number(struct ncp_reply_header *rp) {
return rp->conn_low | (rp->conn_high << 8);
}
-static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
-{
+static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) {
/* If req is done, we got signal, but we also received answer... */
switch (req->status) {
case RQ_IDLE:
}
}
-static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
-{
+static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) {
down(&server->rcv.creq_sem);
__ncp_abort_request(server, req, err);
up(&server->rcv.creq_sem);
}
-static inline void __ncptcp_abort(struct ncp_server *server)
-{
+static inline void __ncptcp_abort(struct ncp_server *server) {
__abort_ncp_connection(server, NULL, 0);
}
-static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
-{
- struct kvec vec[3];
+static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) {
+ struct msghdr msg;
+ struct iovec iov[3];
+
/* sock_sendmsg updates iov pointers for us :-( */
- memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
- return do_send(sock, vec, req->tx_iovlen,
- req->tx_totallen, MSG_DONTWAIT);
+ memcpy(iov, req->tx_ciov, req->tx_iovlen * sizeof(iov[0]));
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = req->tx_iovlen;
+ msg.msg_flags = MSG_DONTWAIT;
+ return sock_sendmsg(sock, &msg, req->tx_totallen);
}
-static void __ncptcp_try_send(struct ncp_server *server)
-{
+static void __ncptcp_try_send(struct ncp_server *server) {
struct ncp_request_reply *rq;
- struct kvec *iov;
- struct kvec iovc[3];
+ struct msghdr msg;
+ struct iovec* iov;
+ struct iovec iovc[3];
int result;
rq = server->tx.creq;
- if (!rq)
+ if (!rq) {
return;
+ }
/* sock_sendmsg updates iov pointers for us :-( */
memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
- result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
- rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
-
- if (result == -EAGAIN)
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = iovc;
+ msg.msg_iovlen = rq->tx_iovlen;
+ msg.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;
+ result = sock_sendmsg(server->ncp_sock, &msg, rq->tx_totallen);
+ if (result == -EAGAIN) {
return;
-
+ }
if (result < 0) {
printk(KERN_ERR "ncpfs: tcp: Send failed: %d\n", result);
__ncp_abort_request(server, rq, result);
rq->tx_ciov = iov;
}
-static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
-{
+static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) {
req->status = RQ_INPROGRESS;
h->conn_low = server->connection;
h->conn_high = server->connection >> 8;
h->sequence = ++server->sequence;
}
-static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
size_t signlen;
struct ncp_request_header* h;
#define NCP_TCP_XMIT_VERSION (1)
#define NCP_TCP_RCVD_MAGIC (0x744E6350)
-static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
size_t signlen;
struct ncp_request_header* h;
__ncptcp_try_send(server);
}
-static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
if (server->ncp_sock->type == SOCK_STREAM)
ncptcp_start_request(server, req);
else
ncpdgram_start_request(server, req);
}
-static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
-{
+static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) {
down(&server->rcv.creq_sem);
if (!ncp_conn_valid(server)) {
up(&server->rcv.creq_sem);
return 0;
}
-static void __ncp_next_request(struct ncp_server *server)
-{
+static void __ncp_next_request(struct ncp_server *server) {
struct ncp_request_reply *req;
server->rcv.creq = NULL;
__ncp_start_request(server, req);
}
-static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
-{
+static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) {
if (server->info_sock) {
- struct kvec iov[2];
+ struct iovec iov[2];
+ struct msghdr msg;
__u32 hdr[2];
hdr[0] = cpu_to_be32(len + 8);
iov[1].iov_base = (void *) data;
iov[1].iov_len = len;
- do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 2;
+ msg.msg_flags = MSG_NOSIGNAL;
+
+ sock_sendmsg(server->info_sock, &msg, len + 8);
}
}
-void ncpdgram_rcv_proc(void *s)
-{
+static void __ncpdgram_rcv_proc(void *s) {
struct ncp_server *server = s;
struct socket* sock;
struct ncp_reply_header reply;
int result;
- result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
+ result = _recv(sock, (void*)&reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
if (result < 0) {
break;
}
up(&server->rcv.creq_sem);
}
drop:;
- _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
+ _recv(sock, (void*)&reply, sizeof(reply), MSG_DONTWAIT);
}
}
-static void __ncpdgram_timeout_proc(struct ncp_server *server)
-{
+void ncpdgram_rcv_proc(void *s) {
+ mm_segment_t fs;
+ struct ncp_server *server = s;
+
+ fs = get_fs();
+ set_fs(get_ds());
+ __ncpdgram_rcv_proc(server);
+ set_fs(fs);
+}
+
+static void __ncpdgram_timeout_proc(struct ncp_server *server) {
/* If timer is pending, we are processing another request... */
if (!timer_pending(&server->timeout_tm)) {
struct ncp_request_reply* req;
}
}
-void ncpdgram_timeout_proc(void *s)
-{
+void ncpdgram_timeout_proc(void *s) {
+ mm_segment_t fs;
struct ncp_server *server = s;
+
+ fs = get_fs();
+ set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncpdgram_timeout_proc(server);
up(&server->rcv.creq_sem);
+ set_fs(fs);
}
-static inline void ncp_init_req(struct ncp_request_reply* req)
-{
+static inline void ncp_init_req(struct ncp_request_reply* req) {
init_waitqueue_head(&req->wq);
req->status = RQ_IDLE;
}
-static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
-{
+static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) {
int result;
if (buffer) {
return result;
}
-static int __ncptcp_rcv_proc(struct ncp_server *server)
-{
+static int __ncptcp_rcv_proc(struct ncp_server *server) {
/* We have to check the result, so store the complete header */
while (1) {
int result;
}
}
-void ncp_tcp_rcv_proc(void *s)
-{
+void ncp_tcp_rcv_proc(void *s) {
+ mm_segment_t fs;
struct ncp_server *server = s;
+ fs = get_fs();
+ set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncptcp_rcv_proc(server);
up(&server->rcv.creq_sem);
+ set_fs(fs);
+ return;
}
-void ncp_tcp_tx_proc(void *s)
-{
+void ncp_tcp_tx_proc(void *s) {
+ mm_segment_t fs;
struct ncp_server *server = s;
+ fs = get_fs();
+ set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncptcp_try_send(server);
up(&server->rcv.creq_sem);
+ set_fs(fs);
+ return;
}
static int do_ncp_rpc_call(struct ncp_server *server, int size,
ncp_init_req(&req);
req.reply_buf = reply_buf;
req.datalen = max_reply_size;
- req.tx_iov[1].iov_base = server->packet;
+ req.tx_iov[1].iov_base = (void *) server->packet;
req.tx_iov[1].iov_len = size;
req.tx_iovlen = 1;
req.tx_totallen = size;
return -EIO;
}
{
+ mm_segment_t fs;
sigset_t old_set;
unsigned long mask, flags;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ fs = get_fs();
+ set_fs(get_ds());
+
result = do_ncp_rpc_call(server, size, reply, max_reply_size);
+ set_fs(fs);
+
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = old_set;
recalc_sigpending();
if (nd->flags & LOOKUP_DIRECTORY)
return 0;
/* Are we trying to write to a read only partition? */
- if ((IS_RDONLY(dir) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
- (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+ if (IS_RDONLY(dir) && (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
return 0;
return 1;
}
int error;
int open_flags = 0;
- dfprintk(VFS, "NFS: create(%s/%ld, %s\n", dir->i_sb->s_id,
+ dfprintk(VFS, "NFS: create(%s/%ld, %s)\n", dir->i_sb->s_id,
dir->i_ino, dentry->d_name.name);
attr.ia_mode = mode;
*/
lock_kernel();
nfs_begin_data_update(dir);
+ dfprintk(VFS, "NFS: attr %d.%d #%d\n", attr.ia_uid, attr.ia_gid, attr.ia_xid);
inode = NFS_PROTO(dir)->create(dir, &dentry->d_name, &attr, open_flags);
nfs_end_data_update(dir);
if (!IS_ERR(inode)) {
+ dfprintk(VFS, "NFS: inode=%p %d.%d #%d\n", inode,
+ inode->i_uid, inode->i_gid, inode->i_xid);
d_instantiate(dentry, inode);
nfs_renew_times(dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
* Nobody gets write access to a read-only fs.
*
*/
- if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
#define NFSDBG_FACILITY NFSDBG_FILE
+static long nfs_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp);
static int nfs_file_open(struct inode *, struct file *);
static int nfs_file_release(struct inode *, struct file *);
static int nfs_file_mmap(struct file *, struct vm_area_struct *);
static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
static int nfs_file_flush(struct file *);
static int nfs_fsync(struct file *, struct dentry *dentry, int datasync);
-static int nfs_check_flags(int flags);
struct file_operations nfs_file_operations = {
.llseek = remote_llseek,
.fsync = nfs_fsync,
.lock = nfs_lock,
.sendfile = nfs_file_sendfile,
- .check_flags = nfs_check_flags,
+ .fcntl = nfs_file_fcntl,
};
struct inode_operations nfs_file_inode_operations = {
# define IS_SWAPFILE(inode) (0)
#endif
-static int nfs_check_flags(int flags)
+#define nfs_invalid_flags (O_APPEND | O_DIRECT)
+
+/*
+ * Check for special cases that NFS doesn't support, and
+ * pass the rest to the generic fcntl function.
+ */
+static long
+nfs_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp)
{
- if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
- return -EINVAL;
+ switch (cmd) {
+ case F_SETFL:
+ if ((filp->f_flags & nfs_invalid_flags) == nfs_invalid_flags)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
- return 0;
+ return generic_file_fcntl(fd, cmd, arg, filp);
}
/*
{
struct nfs_server *server = NFS_SERVER(inode);
int (*open)(struct inode *, struct file *);
- int res;
+ int res = 0;
- res = nfs_check_flags(filp->f_flags);
- if (res)
- return res;
+ if ((filp->f_flags & nfs_invalid_flags) == nfs_invalid_flags)
+ return -EINVAL;
lock_kernel();
/* Do NFSv4 open() call */
}
static ssize_t
-nfs_file_sendfile(struct file *f
+nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count,
+ read_actor_t actor, void *target)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ ssize_t res;
+
+ dfprintk(VFS, "nfs: sendfile(%s/%s, %lu@%Lu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (!res)
+ res = generic_file_sendfile(filp, ppos, count, actor, target);
+ return res;
+}
+
+static int
+nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ int status;
+
+ dfprintk(VFS, "nfs: mmap(%s/%s)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+ status = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (!status)
+ status = generic_file_mmap(file, vma);
+ return status;
+}
+
+/*
+ * Flush any dirty pages for this process, and check for write errors.
+ * The return status from this call provides a reliable indication of
+ * whether any write errors occurred for this process.
+ */
+static int
+nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ struct inode *inode = dentry->d_inode;
+ int status;
+
+ dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
+
+ lock_kernel();
+ status = nfs_wb_all(inode);
+ if (!status) {
+ status = file->f_error;
+ file->f_error = 0;
+ }
+ unlock_kernel();
+ return status;
+}
+
+/*
+ * This does the "real" work of the write. The generic routine has
+ * allocated the page, locked it, done all the page alignment stuff
+ * calculations etc. Now we should just copy the data from user
+ * space and write it back to the real medium..
+ *
+ * If the writer ends up delaying the write, the writer needs to
+ * increment the page use counts until he is done with the page.
+ */
+static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+ return nfs_flush_incompatible(file, page);
+}
+
+static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+{
+ long status;
+
+ lock_kernel();
+ status = nfs_updatepage(file, page, offset, to-offset);
+ unlock_kernel();
+ return status;
+}
+
+struct address_space_operations nfs_file_aops = {
+ .readpage = nfs_readpage,
+ .readpages = nfs_readpages,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .writepage = nfs_writepage,
+ .writepages = nfs_writepages,
+ .prepare_write = nfs_prepare_write,
+ .commit_write = nfs_commit_write,
+#ifdef CONFIG_NFS_DIRECTIO
+ .direct_IO = nfs_direct_IO,
+#endif
+};
+
+/*
+ * Write to a file (through the page cache).
+ */
+static ssize_t
+nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+{
+ struct dentry * dentry = iocb->ki_filp->f_dentry;
+ struct inode * inode = dentry->d_inode;
+ ssize_t result;
+
+#ifdef CONFIG_NFS_DIRECTIO
+ if (iocb->ki_filp->f_flags & O_DIRECT)
+ return nfs_file_direct_write(iocb, buf, count, pos);
+#endif
+
+ dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%lu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ inode->i_ino, (unsigned long) count, (unsigned long) pos);
+
+ result = -EBUSY;
+ if (IS_SWAPFILE(inode))
+ goto out_swapfile;
+ result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (result)
+ goto out;
+
+ result = count;
+ if (!count)
+ goto out;
+
+ result = generic_file_aio_write(iocb, buf, count, pos);
+out:
+ return result;
+
+out_swapfile:
+ printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
+ goto out;
+}
+
+/*
+ * Lock a (portion of) a file
+ */
+int
+nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode * inode = filp->f_mapping->host;
+ int status = 0;
+ int status2;
+
+ dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
+ inode->i_sb->s_id, inode->i_ino,
+ fl->fl_type, fl->fl_flags,
+ (long long)fl->fl_start, (long long)fl->fl_end);
+
+ if (!inode)
+ return -EINVAL;
+
+ /* No mandatory locks over NFS */
+ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return -ENOLCK;
+
+ if (NFS_PROTO(inode)->version != 4) {
+ /* Fake OK code if mounted without NLM support */
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) {
if (IS_GETLK(cmd))
status = LOCK_USE_CLNT;
goto out_ok;
printk(KERN_ERR "nfs_delete_inode: inode %ld has pending RPC requests\n", inode->i_ino);
}
+// DLIMIT_FREE_INODE(inode->i_sb, inode->i_xid);
clear_inode(inode);
}
clnt->cl_intr = (server->flags & NFS_MOUNT_INTR) ? 1 : 0;
clnt->cl_softrtry = (server->flags & NFS_MOUNT_SOFT) ? 1 : 0;
clnt->cl_droppriv = (server->flags & NFS_MOUNT_BROKEN_SUID) ? 1 : 0;
- clnt->cl_tagxid = (server->flags & NFS_MOUNT_TAGXID) ? 1 : 0;
clnt->cl_chatty = 1;
return clnt;
if (inode->i_state & I_NEW) {
struct nfs_inode *nfsi = NFS_I(inode);
+/* if (DLIMIT_ALLOC_INODE(sb, inode->i_xid)) {
+ err = -ENOSPC;
+ goto fail_dlim;
+ }
+*/
/* We set i_ino for the few things that still rely on it,
* such as stat(2) */
inode->i_ino = hash;
nfsi->change_attr = fattr->change_attr;
inode->i_size = nfs_size_to_loff_t(fattr->size);
inode->i_nlink = fattr->nlink;
- inode->i_uid = INOXID_UID(XID_TAG(inode), fattr->uid, fattr->gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), fattr->uid, fattr->gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), fattr->uid, fattr->gid, 0);
+ inode->i_uid = INOXID_UID(fattr->uid, fattr->gid);
+ inode->i_gid = INOXID_GID(fattr->uid, fattr->gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ inode->i_xid = INOXID_XID(fattr->uid, fattr->gid, 0);
/* maybe fattr->xid someday */
if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
/*
} else if (S_ISREG(inode->i_mode) && new_isize > cur_size)
nfsi->flags |= NFS_INO_INVALID_ATTR;
- uid = INOXID_UID(XID_TAG(inode), fattr->uid, fattr->gid);
- gid = INOXID_GID(XID_TAG(inode), fattr->uid, fattr->gid);
- xid = INOXID_XID(XID_TAG(inode), fattr->uid, fattr->gid, 0);
+ uid = INOXID_UID(fattr->uid, fattr->gid);
+ gid = INOXID_GID(fattr->uid, fattr->gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ xid = INOXID_XID(fattr->uid, fattr->gid, 0);
/* Have any file permissions changed? */
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
- uid = INOXID_UID(XID_TAG(inode), fattr->uid, fattr->gid);
- gid = INOXID_GID(XID_TAG(inode), fattr->uid, fattr->gid);
- xid = INOXID_XID(XID_TAG(inode), fattr->uid, fattr->gid, 0);
+ uid = INOXID_UID(fattr->uid, fattr->gid);
+ gid = INOXID_GID(fattr->uid, fattr->gid);
+ if (inode->i_sb->s_flags & MS_TAGXID)
+ xid = INOXID_XID(fattr->uid, fattr->gid, 0);
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) ||
inode->i_uid != uid ||
static int
nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
+ struct iovec *iov = req->rq_rcv_buf.head;
int status, count, recvd, hdrlen;
if ((status = ntohl(*p++)))
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
int status, nr;
nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
}
static inline u32 *
-xdr_encode_sattr(u32 *p, struct iattr *attr, int tagxid)
+xdr_encode_sattr(u32 *p, struct iattr *attr)
{
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
} else {
*p++ = xdr_zero;
}
- if (attr->ia_valid & ATTR_UID ||
- (tagxid && (attr->ia_valid & ATTR_XID))) {
+ if (attr->ia_valid & ATTR_UID || attr->ia_valid & ATTR_XID) {
*p++ = xdr_one;
- *p++ = htonl(XIDINO_UID(tagxid, attr->ia_uid, attr->ia_xid));
+ *p++ = htonl(XIDINO_UID(attr->ia_uid, attr->ia_xid));
} else {
*p++ = xdr_zero;
}
- if (attr->ia_valid & ATTR_GID ||
- (tagxid && (attr->ia_valid & ATTR_XID))) {
+ if (attr->ia_valid & ATTR_GID || attr->ia_valid & ATTR_XID) {
*p++ = xdr_one;
- *p++ = htonl(XIDINO_GID(tagxid, attr->ia_gid, attr->ia_xid));
+ *p++ = htonl(XIDINO_GID(attr->ia_gid, attr->ia_xid));
} else {
*p++ = xdr_zero;
}
nfs3_xdr_sattrargs(struct rpc_rqst *req, u32 *p, struct nfs3_sattrargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
*p++ = htonl(args->guard);
if (args->guard)
p = xdr_encode_time3(p, &args->guardtime);
*p++ = args->verifier[0];
*p++ = args->verifier[1];
} else
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
{
p = xdr_encode_fhandle(p, args->fromfh);
p = xdr_encode_array(p, args->fromname, args->fromlen);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
p = xdr_encode_array(p, args->topath, args->tolen);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
*p++ = htonl(args->type);
- p = xdr_encode_sattr(p, args->sattr,
- req->rq_task->tk_client->cl_tagxid);
+ p = xdr_encode_sattr(p, args->sattr);
if (args->type == NF3CHR || args->type == NF3BLK) {
*p++ = htonl(MAJOR(args->rdev));
*p++ = htonl(MINOR(args->rdev));
nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
int status, nr;
nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
static int
nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
+ struct iovec *iov = req->rq_rcv_buf.head;
int status, count, ocount, recvd, hdrlen;
status = ntohl(*p++);
WRITE32(FATTR4_WORD0_FILEID);
WRITE32(0);
- /* set up reply kvec
+ /* set up reply iovec
* toplevel_status + taglen + rescount + OP_PUTFH + status
* + OP_READDIR + status + verifer(2) = 9
*/
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
- /* set up reply kvec
+ /* set up reply iovec
* toplevel_status + taglen + rescount + OP_PUTFH + status
* + OP_READLINK + status = 7
*/
if (status)
goto out;
- /* set up reply kvec
+ /* set up reply iovec
* toplevel status + taglen=0 + rescount + OP_PUTFH + status
* + OP_READ + status + eof + datalen = 9
*/
static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
+ struct iovec *iov = req->rq_rcv_buf.head;
uint32_t *p;
uint32_t count, eof, recvd, hdrlen;
int status;
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct page *page = *rcvbuf->pages;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
unsigned int nr, pglen = rcvbuf->page_len;
uint32_t *end, *entry, *p, *kaddr;
uint32_t len, attrlen, word;
static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
+ struct iovec *iov = rcvbuf->head;
uint32_t *strlen;
unsigned int hdrlen, len;
char *string;
int status;
fattr.valid = 0;
+ memset(&fattr, 0, sizeof(struct nfs_fattr));
+
+
dprintk("NFS call create %s\n", name->name);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
dprintk("NFS reply create: %d\n", status);
}
if (cred->cr_uid != (uid_t) -1)
- current->fsuid = INOXID_UID(1, cred->cr_uid, cred->cr_gid);
+ current->fsuid = INOXID_UID(cred->cr_uid, cred->cr_gid);
else
current->fsuid = exp->ex_anon_uid;
if (cred->cr_gid != (gid_t) -1)
- current->fsgid = INOXID_GID(1, cred->cr_uid, cred->cr_gid);
+ current->fsgid = INOXID_GID(cred->cr_uid, cred->cr_gid);
else
current->fsgid = exp->ex_anon_gid;
- current->xid = INOXID_XID(1, cred->cr_uid, cred->cr_gid, 0);
+ current->xid = INOXID_XID(cred->cr_uid, cred->cr_gid, 0);
if (!cred->cr_group_info)
return -ENOMEM;
ret = set_current_groups(cred->cr_group_info);
- if (INOXID_UID(1, cred->cr_uid, cred->cr_gid)) {
+ if ((cred->cr_uid)) {
cap_t(current->cap_effective) &= ~CAP_NFSD_MASK;
} else {
cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
static inline u32 *
encode_fh(u32 *p, struct svc_fh *fhp)
{
- unsigned int size = fhp->fh_handle.fh_size;
+ int size = fhp->fh_handle.fh_size;
*p++ = htonl(size);
if (size) p[XDR_QUADLEN(size)-1]=0;
memcpy(p, &fhp->fh_handle.fh_base, size);
decode_sattr3(u32 *p, struct iattr *iap)
{
u32 tmp;
- uid_t uid = 0;
- gid_t gid = 0;
iap->ia_valid = 0;
}
if (*p++) {
iap->ia_valid |= ATTR_UID;
- uid = ntohl(*p++);
+ iap->ia_uid = ntohl(*p++);
}
if (*p++) {
iap->ia_valid |= ATTR_GID;
- gid = ntohl(*p++);
+ iap->ia_gid = ntohl(*p++);
}
- iap->ia_uid = INOXID_UID(1, uid, gid);
- iap->ia_gid = INOXID_GID(1, uid, gid);
- iap->ia_xid = INOXID_XID(1, uid, gid, 0);
if (*p++) {
u64 newsize;
*p++ = htonl((u32) stat.mode);
*p++ = htonl((u32) stat.nlink);
*p++ = htonl((u32) nfsd_ruid(rqstp,
- XIDINO_UID(XID_TAG(dentry->d_inode), stat.uid, stat.xid)));
+ XIDINO_UID(stat.uid, stat.xid)));
*p++ = htonl((u32) nfsd_rgid(rqstp,
- XIDINO_GID(XID_TAG(dentry->d_inode), stat.gid, stat.xid)));
+ XIDINO_GID(stat.gid, stat.xid)));
if (S_ISLNK(stat.mode) && stat.size > NFS3_MAXPATHLEN) {
p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
} else {
nfs3svc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_readargs *args)
{
- unsigned int len;
+ int len;
int v,pn;
if (!(p = decode_fh(p, &args->fh))
if (len > NFSSVC_MAXBLKSIZE)
len = NFSSVC_MAXBLKSIZE;
- /* set up the kvec */
+ /* set up the iovec */
v=0;
while (len > 0) {
pn = rqstp->rq_resused;
svc_take_page(rqstp);
args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
args->vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE;
- len -= args->vec[v].iov_len;
v++;
+ len -= PAGE_SIZE;
}
args->vlen = v;
return xdr_argsize_check(rqstp, p);
nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_writeargs *args)
{
- unsigned int len, v, hdr;
+ int len, v;
if (!(p = decode_fh(p, &args->fh))
|| !(p = xdr_decode_hyper(p, &args->offset)))
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- if (rqstp->rq_arg.len < len + hdr)
- return 0;
-
args->vec[0].iov_base = (void*)p;
- args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len -
+ (((void*)p) - rqstp->rq_arg.head[0].iov_base);
if (len > NFSSVC_MAXBLKSIZE)
len = NFSSVC_MAXBLKSIZE;
nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_symlinkargs *args)
{
- unsigned int len;
+ int len;
int avail;
char *old, *new;
- struct kvec *vec;
+ struct iovec *vec;
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
*/
svc_take_page(rqstp);
len = ntohl(*p++);
- if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
+ if (len <= 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
return 0;
args->tname = new = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
args->tlen = len;
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
status = get_write_access(filp->f_dentry->d_inode);
- if (status)
+ if (!status)
+ filp->f_mode = FMODE_WRITE;
+ else
return nfserrno(status);
- filp->f_mode = (filp->f_mode | FMODE_WRITE) & ~FMODE_READ;
}
return nfs_ok;
}
{
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
put_write_access(filp->f_dentry->d_inode);
- filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
+ filp->f_mode = FMODE_READ;
}
}
}
if (bmval1 & FATTR4_WORD1_OWNER) {
status = nfsd4_encode_user(rqstp,
- XIDINO_UID(XID_TAG(dentry->d_inode),
- stat.uid, stat.xid), &p, &buflen);
+ XIDINO_UID(stat.uid, stat.xid), &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
status = nfsd4_encode_group(rqstp,
- XIDINO_GID(XID_TAG(dentry->d_inode),
- stat.gid, stat.xid), &p, &buflen);
+ XIDINO_GID(stat.gid, stat.xid), &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
/*
* All that remains is to write the tag and operation count...
*/
- struct kvec *iov;
+ struct iovec *iov;
p = resp->tagp;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
static struct svc_cacherep * nfscache;
static int cache_disabled = 1;
-static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
+static int nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *vec);
/*
* locking for the reply cache:
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
{
struct svc_cacherep *rp;
- struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
+ struct iovec *resv = &rqstp->rq_res.head[0], *cachv;
int len;
if (!(rp = rqstp->rq_cacherep) || cache_disabled)
* keep a refcount....
*/
static int
-nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *data)
{
- struct kvec *vec = &rqstp->rq_res.head[0];
+ struct iovec *vec = &rqstp->rq_res.head[0];
if (vec->iov_len + data->iov_len > PAGE_SIZE) {
printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
decode_sattr(u32 *p, struct iattr *iap)
{
u32 tmp, tmp1;
- uid_t uid = 0;
- gid_t gid = 0;
iap->ia_valid = 0;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_UID;
- uid = tmp;
+ iap->ia_uid = tmp;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_GID;
- gid = tmp;
+ iap->ia_gid = tmp;
}
- iap->ia_uid = INOXID_UID(1, uid, gid);
- iap->ia_gid = INOXID_GID(1, uid, gid);
- iap->ia_xid = INOXID_XID(1, uid, gid, 0);
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
iap->ia_size = tmp;
*p++ = htonl((u32) stat.mode);
*p++ = htonl((u32) stat.nlink);
*p++ = htonl((u32) nfsd_ruid(rqstp,
- XIDINO_UID(XID_TAG(dentry->d_inode), stat.uid, stat.xid)));
+ XIDINO_UID(stat.uid, stat.xid)));
*p++ = htonl((u32) nfsd_rgid(rqstp,
- XIDINO_GID(XID_TAG(dentry->d_inode), stat.gid, stat.xid)));
+ XIDINO_GID(stat.gid, stat.xid)));
if (S_ISLNK(type) && stat.size > NFS_MAXPATHLEN) {
*p++ = htonl(NFS_MAXPATHLEN);
nfssvc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_readargs *args)
{
- unsigned int len;
+ int len;
int v,pn;
if (!(p = decode_fh(p, &args->fh)))
return 0;
svc_take_page(rqstp);
args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
args->vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE;
- len -= args->vec[v].iov_len;
v++;
+ len -= PAGE_SIZE;
}
args->vlen = v;
return xdr_argsize_check(rqstp, p);
nfssvc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_writeargs *args)
{
- unsigned int len;
+ int len;
int v;
if (!(p = decode_fh(p, &args->fh)))
return 0;
*/
int
nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct kvec *vec, int vlen, unsigned long *count)
+ struct iovec *vec, int vlen, unsigned long *count)
{
struct raparms *ra;
mm_segment_t oldfs;
} else {
oldfs = get_fs();
set_fs(KERNEL_DS);
- err = vfs_readv(&file, (struct iovec __user *)vec, vlen, &offset);
+ err = vfs_readv(&file, vec, vlen, &offset);
set_fs(oldfs);
}
*/
int
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct kvec *vec, int vlen,
+ struct iovec *vec, int vlen,
unsigned long cnt, int *stablep)
{
struct svc_export *exp;
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
- err = vfs_writev(&file, (struct iovec __user *)vec, vlen, &offset);
+ err = vfs_writev(&file, vec, vlen, &offset);
set_fs(oldfs);
if (err >= 0) {
nfsdstats.io_write += cnt;
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
{
int err = fh_verify(rqstp, fhp, 0, MAY_NOP);
+
if (!err && vfs_statfs(fhp->fh_dentry->d_inode->i_sb,stat))
err = nfserr_io;
return err;
if (acc == MAY_NOP)
return 0;
#if 0
- dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
+ printk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
acc,
(acc & MAY_READ)? " read" : "",
(acc & MAY_WRITE)? " write" : "",
IS_IMMUTABLE(inode)? " immut" : "",
IS_APPEND(inode)? " append" : "",
IS_RDONLY(inode)? " ro" : "");
- dprintk(" owner %d/%d user %d/%d\n",
+ printk(" owner %d/%d user %d/%d\n",
inode->i_uid, inode->i_gid, current->fsuid, current->fsgid);
#endif
*/
if (!(acc & MAY_LOCAL_ACCESS))
if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
- if (EX_RDONLY(exp) || IS_RDONLY(inode)
- || (exp && MNT_IS_RDONLY(exp->ex_mnt)))
+ if (EX_RDONLY(exp) || IS_RDONLY(inode))
return nfserr_rofs;
if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
do_next_sb:
- ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
+ ntfs_debug("Beginning sub-block at offset = 0x%x in the cb.",
cb - cb_start);
/*
* Have we reached the end of the compression block or the end of the
* or signals an error (both covered by the rc test).
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
- ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir);
+ ntfs_debug("In index root, offset 0x%x.", (u8*)ie - (u8*)ir);
/* Bounds checks. */
if (unlikely((u8*)ie < (u8*)ir || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
goto read_partial_upcase_page;
}
vol->upcase_len = ino->i_size >> UCHAR_T_SIZE_BITS;
- ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
+ ntfs_debug("Read %llu bytes from $UpCase (expected %u bytes).",
ino->i_size, 64 * 1024 * sizeof(ntfschar));
iput(ino);
down(&ntfs_lock);
goto dput_and_out;
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
error = -EPERM;
inode = nd.dentry->d_inode;
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
/* Don't worry, the checks are done in inode_change_ok() */
inode = nd.dentry->d_inode;
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
/* Don't worry, the checks are done in inode_change_ok() */
if (!res) {
res = permission(nd.dentry->d_inode, mode, &nd);
/* SuS v2 requires we report a read only fs too */
- if(!res && (mode & S_IWOTH)
- && (IS_RDONLY(nd.dentry->d_inode) || MNT_IS_RDONLY(nd.mnt))
+ if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
&& !special_file(nd.dentry->d_inode->i_mode))
res = -EROFS;
path_release(&nd);
dentry = file->f_dentry;
inode = dentry->d_inode;
+ err = -EPERM;
+ if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
+ goto out_putf;
err = -EROFS;
- if (IS_RDONLY(inode) || (file && MNT_IS_RDONLY(file->f_vfsmnt)))
+ if (IS_RDONLY(inode))
goto out_putf;
err = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out;
inode = nd.dentry->d_inode;
+ error = -EPERM;
+ if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
+ goto dput_and_out;
+
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.mnt))
+ if (IS_RDONLY(inode))
goto dput_and_out;
error = -EPERM;
return error;
}
-static int chown_common(struct dentry *dentry, struct vfsmount *mnt,
- uid_t user, gid_t group)
+static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
{
struct inode * inode;
int error;
goto out;
}
error = -EROFS;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
+ if (IS_RDONLY(inode))
goto out;
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
error = user_path_walk(filename, &nd);
if (!error) {
- error = chown_common(nd.dentry, nd.mnt, user, group);
+ error = chown_common(nd.dentry, user, group);
path_release(&nd);
}
return error;
error = user_path_walk_link(filename, &nd);
if (!error) {
- error = chown_common(nd.dentry, nd.mnt, user, group);
+ error = chown_common(nd.dentry, user, group);
path_release(&nd);
}
return error;
file = fget(fd);
if (file) {
- error = chown_common(file->f_dentry, file->f_vfsmnt, user, group);
+ error = chown_common(file->f_dentry, user, group);
fput(file);
}
return error;
if (!f)
goto cleanup_dentry;
f->f_flags = flags;
- f->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
+ f->f_mode = (flags+1) & O_ACCMODE;
inode = dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
error = get_write_access(inode);
FD_SET(fd, files->open_fds);
FD_CLR(fd, files->close_on_exec);
files->next_fd = fd + 1;
- // vx_openfd_inc(fd);
+ vx_openfd_inc(fd);
#if 1
/* Sanity check */
if (files->fd[fd] != NULL) {
__FD_CLR(fd, files->open_fds);
if (fd < files->next_fd)
files->next_fd = fd;
- // vx_openfd_dec(fd);
+ vx_openfd_dec(fd);
}
void fastcall put_unused_fd(unsigned int fd)
}
EXPORT_SYMBOL(generic_file_open);
-
-/*
- * This is used by subsystems that don't want seekable
- * file descriptors
- */
-int nonseekable_open(struct inode *inode, struct file *filp)
-{
- filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- return 0;
-}
-
-EXPORT_SYMBOL(nonseekable_open);
static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
static int openpromfs_unlink (struct inode *, struct dentry *dentry);
-static ssize_t nodenum_read(struct file *file, char __user *buf,
+static ssize_t nodenum_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
return count;
}
-static ssize_t property_read(struct file *filp, char __user *buf,
+static ssize_t property_read(struct file *filp, char *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
i = ((u32)(long)inode->u.generic_ip) >> 16;
if ((u16)((long)inode->u.generic_ip) == aliases) {
if (i >= aliases_nodes)
- p = NULL;
+ p = 0;
else
p = alias_names [i];
} else
return -EIO;
op->value [k] = 0;
if (k) {
- for (s = NULL, p = op->value; p < op->value + k; p++) {
+ for (s = 0, p = op->value; p < op->value + k; p++) {
if ((*p >= ' ' && *p <= '~') || *p == '\n') {
op->flag |= OPP_STRING;
s = p;
return count;
}
-static ssize_t property_write(struct file *filp, const char __user *buf,
+static ssize_t property_write(struct file *filp, const char *buf,
size_t count, loff_t *ppos)
{
int i, j, k;
if (filp->f_pos >= 0xffffff || count >= 0xffffff)
return -EINVAL;
if (!filp->private_data) {
- i = property_read (filp, NULL, 0, NULL);
+ i = property_read (filp, NULL, 0, 0);
if (i)
return i;
}
mask &= mask2;
if (mask) {
*first &= ~mask;
- *first |= simple_strtoul (tmp, NULL, 16);
+ *first |= simple_strtoul (tmp, 0, 16);
op->flag |= OPP_DIRTY;
}
} else {
for (j = 0; j < first_off; j++)
mask >>= 1;
*q &= ~mask;
- *q |= simple_strtoul (tmp,NULL,16);
+ *q |= simple_strtoul (tmp,0,16);
}
buf += 9;
} else if ((q == last - 1) && last_cnt
for (j = 0; j < 8 - last_cnt; j++)
mask <<= 1;
*q &= ~mask;
- *q |= simple_strtoul (tmp, NULL, 16);
+ *q |= simple_strtoul (tmp, 0, 16);
buf += last_cnt;
} else {
char tchars[17]; /* XXX yuck... */
if (copy_from_user(tchars, buf, 16))
return -EFAULT;
- *q = simple_strtoul (tchars, NULL, 16);
+ *q = simple_strtoul (tchars, 0, 16);
buf += 9;
}
}
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
- return -EIO;
+ return res;
for (p = 1; p < state->limit; p++) {
sector_t size = state->parts[p].size;
sector_t from = state->parts[p].from;
#endif
}
kfree(state);
- return 0;
+ return res;
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
+ /* pread is not allowed on pipes. */
+ if (unlikely(ppos != &filp->f_pos))
+ return -ESPIPE;
+
total_len = iov_length(iov, nr_segs);
/* Null read succeeds. */
if (unlikely(total_len == 0))
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
+ /* pwrite is not allowed on pipes. */
+ if (unlikely(ppos != &filp->f_pos))
+ return -ESPIPE;
+
total_len = iov_length(iov, nr_segs);
/* Null write succeeds. */
if (unlikely(total_len == 0))
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
}
if (ret > 0)
- inode_update_time(inode, filp->f_vfsmnt, 1); /* mtime and ctime */
+ inode_update_time(inode, 1); /* mtime and ctime */
return ret;
}
f1->f_pos = f2->f_pos = 0;
f1->f_flags = O_RDONLY;
f1->f_op = &read_pipe_fops;
- f1->f_mode = FMODE_READ;
+ f1->f_mode = 1;
f1->f_version = 0;
/* write file */
f2->f_flags = O_WRONLY;
f2->f_op = &write_pipe_fops;
- f2->f_mode = FMODE_WRITE;
+ f2->f_mode = 2;
f2->f_version = 0;
fd_install(i, f1);
return error;
}
-EXPORT_SYMBOL_GPL(do_pipe);
-
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
TASK_INTERRUPTIBLE |
TASK_UNINTERRUPTIBLE |
TASK_ZOMBIE |
- TASK_DEAD |
TASK_STOPPED |
TASK_ONHOLD);
const char **p = &task_state_array[0];
{
struct group_info *group_info;
int g;
- pid_t pid, ppid, tgid;
+ pid_t ppid;
read_lock(&tasklist_lock);
- tgid = vx_map_tgid(current->vx_info, p->tgid);
- pid = vx_map_tgid(current->vx_info, p->pid);
ppid = vx_map_tgid(current->vx_info, p->real_parent->pid);
buffer += sprintf(buffer,
"State:\t%s\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
(p->sleep_avg/1024)*100/(1020000000/1024),
- tgid, pid, p->pid ? ppid : 0,
+ p->tgid,
+ p->pid, p->pid ? ppid : 0,
p->pid && p->ptrace ? p->parent->pid : 0,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
sigset_t sigign, sigcatch;
char state;
int res;
- pid_t pid, ppid, pgid = -1, sid = -1;
+ pid_t ppid, pgid = -1, sid = -1;
int num_threads = 0;
struct mm_struct *mm;
unsigned long long start_time;
if (bias_jiffies > task->start_time)
bias_jiffies = task->start_time;
}
- pid = vx_map_tgid(task->vx_info, task->pid);
mm = task->mm;
if(mm)
res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
%lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
- pid,
+ task->pid,
task->comm,
state,
ppid,
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/idr.h>
-#include <linux/namei.h>
#include <linux/vs_base.h>
#include <linux/vserver/inode.h>
+#include <linux/namei.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
- inode->i_xid = vx_current_xid();
break;
}
}
child = NULL;
while ((child = of_get_next_child(np, child))) {
p = strrchr(child->full_name, '/');
- if (!p)
+ if (p == 0)
p = child->full_name;
else
++p;
lastp = &al->next;
}
of_node_put(child);
- *lastp = NULL;
+ *lastp = 0;
de->subdir = list;
}
struct device_node *root;
if ( !have_of )
return;
- proc_device_tree = proc_mkdir("device-tree", NULL);
+ proc_device_tree = proc_mkdir("device-tree", 0);
if (proc_device_tree == 0)
return;
root = of_find_node_by_path("/");
// create the default set of magic files
clstype = (RCFS_I(dentry->d_inode))->core->classtype;
rcfs_create_magic(dentry, &(((struct rcfs_magf *)clstype->mfdesc)[1]),
- clstype->mfcount - 3);
+ clstype->mfcount - 2);
return retval;
*resstr = NULL; \
\
if (!options) \
- return 0; \
+ return -EINVAL; \
\
while ((p = strsep(&options, ",")) != NULL) { \
substring_t args[MAX_OPT_ARGS]; \
switch (token) { \
case FUNC ## _res_type: \
*resstr = match_strdup(args); \
- if (!strcmp(#FUNC, "config")) { \
- char *str = p + strlen(p) + 1; \
- *otherstr = kmalloc(strlen(str) + 1, \
- GFP_KERNEL); \
- if (*otherstr == NULL) { \
- kfree(*resstr); \
- *resstr = NULL; \
- return 0; \
- } else { \
- strcpy(*otherstr, str); \
- return 1; \
- } \
- } \
break; \
case FUNC ## _str: \
*otherstr = match_strdup(args); \
break; \
default: \
- return 0; \
+ return -EINVAL; \
} \
} \
- return (*resstr != NULL); \
+ if (*resstr) \
+ return 0; \
+ return -EINVAL; \
}
#define MAGIC_WRITE(FUNC,CLSTYPEFUN) \
EXPORT_SYMBOL(FUNC ## _fileops);
/******************************************************************************
- * Shared function used by Target / Reclassify
+ * Target
*
+ * pseudo file for manually reclassifying members to a class
*
*****************************************************************************/
#define TARGET_MAX_INPUT_SIZE 100
static ssize_t
-target_reclassify_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos, int manual)
+target_write(struct file *file, const char __user * buf,
+ size_t count, loff_t * ppos)
{
struct rcfs_inode_info *ri = RCFS_I(file->f_dentry->d_inode);
char *optbuf;
clstype = ri->core->classtype;
if (clstype->forced_reclassify)
- rc = (*clstype->forced_reclassify) (manual ? ri->core: NULL, optbuf);
+ rc = (*clstype->forced_reclassify) (ri->core, optbuf);
up(&(ri->vfs_inode.i_sem));
kfree(optbuf);
}
-/******************************************************************************
- * Target
- *
- * pseudo file for manually reclassifying members to a class
- *
- *****************************************************************************/
-
-static ssize_t
-target_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos)
-{
- return target_reclassify_write(file,buf,count,ppos,1);
-}
-
struct file_operations target_fileops = {
.write = target_write,
};
EXPORT_SYMBOL(target_fileops);
-/******************************************************************************
- * Reclassify
- *
- * pseudo file for reclassification of an object through CE
- *
- *****************************************************************************/
-
-static ssize_t
-reclassify_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos)
-{
- return target_reclassify_write(file,buf,count,ppos,0);
-}
-
-struct file_operations reclassify_fileops = {
- .write = reclassify_write,
-};
-
-EXPORT_SYMBOL(reclassify_fileops);
-
/******************************************************************************
* Config
*
static match_table_t config_tokens = {
{config_res_type, "res=%s"},
+ {config_str, "config=%s"},
{config_err, NULL},
};
}
}
- printk(KERN_DEBUG "Set %s shares to %d %d %d %d\n",
+ printk(KERN_ERR "Set %s shares to %d %d %d %d\n",
resname,
newshares.my_guarantee,
newshares.my_limit,
return -EINVAL;
rootdesc = &mfdesc[0];
- printk(KERN_DEBUG "allocating classtype root <%s>\n", rootdesc->name);
+ printk("allocating classtype root <%s>\n", rootdesc->name);
dentry = rcfs_create_internal(rcfs_rootde, rootdesc, 0);
if (!dentry) {
.i_op = &my_iops,
.i_fop = &target_fileops,
},
- {
- .name = "reclassify",
- .mode = RCFS_DEFAULT_FILE_MODE,
- .i_op = &my_iops,
- .i_fop = &reclassify_fileops,
- },
};
struct rcfs_magf sock_magf[] = {
clstype = ckrm_classtypes[i];
if (clstype == NULL)
continue;
- printk(KERN_DEBUG "A non null classtype\n");
+ printk("A non null classtype\n");
if ((rc = rcfs_register_classtype(clstype)))
continue; // could return with an error too
#define TC_FILE_MODE (S_IFREG | S_IRUGO | S_IWUSR)
-#define NR_TCROOTMF 7
+#define NR_TCROOTMF 6
struct rcfs_magf tc_rootdesc[NR_TCROOTMF] = {
/* First entry must be root */
{
.i_fop = &shares_fileops,
.i_op = &rcfs_file_inode_operations,
},
- // Reclassify and Config should be made available only at the
- // root level. Make sure they are the last two entries, as
- // rcfs_mkdir depends on it
- {
- .name = "reclassify",
- .mode = TC_FILE_MODE,
- .i_fop = &reclassify_fileops,
- .i_op = &rcfs_file_inode_operations,
- },
+ // Config should be made available only at the root level
+ // Make sure this is the last entry, as rcfs_mkdir depends on it
{
.name = "config",
.mode = TC_FILE_MODE,
{
loff_t (*fn)(struct file *, loff_t, int);
- fn = no_llseek;
- if (file->f_mode & FMODE_LSEEK) {
- fn = default_llseek;
- if (file->f_op && file->f_op->llseek)
- fn = file->f_op->llseek;
- }
+ fn = default_llseek;
+ if (file->f_op && file->f_op->llseek)
+ fn = file->f_op->llseek;
return fn(file, offset, origin);
}
EXPORT_SYMBOL(vfs_llseek);
EXPORT_SYMBOL(vfs_write);
-static inline loff_t file_pos_read(struct file *file)
-{
- return file->f_pos;
-}
-
-static inline void file_pos_write(struct file *file, loff_t pos)
-{
- file->f_pos = pos;
-}
-
asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
{
struct file *file;
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_read(file, buf, count, &pos);
- file_pos_write(file, pos);
+ ret = vfs_read(file, buf, count, &file->f_pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_write(file, buf, count, &pos);
- file_pos_write(file, pos);
+ ret = vfs_write(file, buf, count, &file->f_pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = -ESPIPE;
- if (file->f_mode & FMODE_PREAD)
- ret = vfs_read(file, buf, count, &pos);
+ ret = vfs_read(file, buf, count, &pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = -ESPIPE;
- if (file->f_mode & FMODE_PWRITE)
- ret = vfs_write(file, buf, count, &pos);
+ ret = vfs_write(file, buf, count, &pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_readv(file, vec, vlen, &pos);
- file_pos_write(file, pos);
+ ret = vfs_readv(file, vec, vlen, &file->f_pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- loff_t pos = file_pos_read(file);
- ret = vfs_writev(file, vec, vlen, &pos);
- file_pos_write(file, pos);
+ ret = vfs_writev(file, vec, vlen, &file->f_pos);
fput_light(file, fput_needed);
}
goto fput_in;
if (!in_file->f_op || !in_file->f_op->sendfile)
goto fput_in;
- retval = -ESPIPE;
if (!ppos)
ppos = &in_file->f_pos;
- else
- if (!(in_file->f_mode & FMODE_PREAD))
- goto fput_in;
retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, *ppos, count);
if (retval)
goto fput_in;
if (res)
goto out;
- inode_update_time(inode, file->f_vfsmnt, 1); /* Both mtime and ctime */
+ inode_update_time(inode, 1); /* Both mtime and ctime */
// Ok, we are done with all the checks.
REISERFS_I(inode)->i_attrs = sd_v2_attrs( sd );
sd_attrs_to_i_attrs( sd_v2_attrs( sd ), inode );
}
- inode->i_uid = INOXID_UID(XID_TAG(inode), uid, gid);
- inode->i_gid = INOXID_GID(XID_TAG(inode), uid, gid);
- inode->i_xid = INOXID_XID(XID_TAG(inode), uid, gid, 0);
+ inode->i_uid = INOXID_UID(uid, gid);
+ inode->i_gid = INOXID_GID(uid, gid);
+ inode->i_xid = INOXID_XID(uid, gid, 0);
pathrelse (path);
if (S_ISREG (inode->i_mode)) {
static void inode2sd (void * sd, struct inode * inode, loff_t size)
{
struct stat_data * sd_v2 = (struct stat_data *)sd;
- uid_t uid = XIDINO_UID(XID_TAG(inode), inode->i_uid, inode->i_xid);
- gid_t gid = XIDINO_GID(XID_TAG(inode), inode->i_gid, inode->i_xid);
+ uid_t uid = XIDINO_UID(inode->i_uid, inode->i_xid);
+ gid_t gid = XIDINO_GID(inode->i_gid, inode->i_xid);
__u16 flags;
set_sd_v2_uid(sd_v2, uid );
flags &= REISERFS_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case REISERFS_IOC_SETFLAGS: {
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EFAULT;
oldflags = REISERFS_I(inode) -> i_attrs;
- if ( ( (oldflags & REISERFS_IMMUTABLE_FL) ||
- ( (flags ^ oldflags) &
- (REISERFS_IMMUTABLE_FL | REISERFS_IUNLINK_FL |
- REISERFS_APPEND_FL) ) ) &&
- !capable( CAP_LINUX_IMMUTABLE ) )
+ if ( (oldflags & REISERFS_IMMUTABLE_FL) || ( ( (flags ^ oldflags) &
+ (REISERFS_IMMUTABLE_FL | REISERFS_IUNLINK_FL | REISERFS_APPEND_FL)) &&
+ !capable( CAP_LINUX_IMMUTABLE ) ) )
return -EPERM;
if( ( flags & REISERFS_NOTAIL_FL ) &&
case REISERFS_IOC_SETVERSION:
if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
- if (IS_RDONLY(inode) ||
- (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
+ if (IS_RDONLY(inode))
return -EROFS;
if (get_user(inode->i_generation, (int __user *) arg))
return -EFAULT;
{"noattrs", .clrmask = 1<<REISERFS_ATTRS},
{"user_xattr", .setmask = 1<<REISERFS_XATTRS_USER},
{"nouser_xattr",.clrmask = 1<<REISERFS_XATTRS_USER},
- {"tagxid", .setmask = 1<<REISERFS_TAGXID},
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
{"acl", .setmask = 1<<REISERFS_POSIXACL},
{"noacl", .clrmask = 1<<REISERFS_POSIXACL},
{"commit", .arg_required = 'c', .values = NULL},
{"usrquota",},
{"grpquota",},
+ {"tagxid", .setmask = 1<<REISERFS_TAGXID},
{NULL,}
};
{
umode_t mode = inode->i_mode;
- /* Prevent vservers from escaping chroot() barriers */
- if (IS_BARRIER(inode) && !vx_check(0, VX_ADMIN))
- return -EACCES;
-
if (mask & MAY_WRITE) {
/*
* Nobody gets write access to a read-only fs.
*/
- if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->mnt))) &&
+ if (IS_RDONLY(inode) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
}
#endif
} else {
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
check_groups:
-#endif
if (in_group_p(inode->i_gid))
mode >>= 3;
}
if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
return 0;
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
check_capabilities:
-#endif
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable if at least one exec bit is set.
sema_init(&p->sem, 1);
p->op = op;
file->private_data = p;
-
- /* SEQ files support lseek, but not pread/pwrite */
- file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
return 0;
}
EXPORT_SYMBOL(seq_open);
void *p;
int err = 0;
+ if (ppos != &file->f_pos)
+ return -EPIPE;
+
down(&m->sem);
/* grab buffer if we didn't have one */
if (!m->buf) {
int rq_bytes_sent;
int rq_iovlen;
- struct kvec rq_iov[4];
+ struct iovec rq_iov[4];
int (*rq_setup_read) (struct smb_request *);
void (*rq_callback) (struct smb_request *);
static int
_recvfrom(struct socket *socket, unsigned char *ubuf, int size, unsigned flags)
{
- struct kvec iov = {ubuf, size};
- struct msghdr msg = {.msg_flags = flags};
- msg.msg_flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
- return kernel_recvmsg(socket, &msg, &iov, 1, size, msg.msg_flags);
+ struct iovec iov;
+ struct msghdr msg;
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs(get_ds());
+ flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
+
+ msg.msg_flags = flags;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ iov.iov_base = ubuf;
+ iov.iov_len = size;
+
+ size = sock_recvmsg(socket, &msg, size, flags);
+
+ set_fs(fs);
+ return size;
}
/*
}
/*
- * Adjust the kvec to move on 'n' bytes (from nfs/sunrpc)
+ * Adjust the iovec to move on 'n' bytes (from nfs/sunrpc)
*/
static int
-smb_move_iov(struct kvec **data, size_t *num, struct kvec *vec, unsigned amount)
+smb_move_iov(struct msghdr *msg, struct iovec *niv, unsigned amount)
{
- struct kvec *iv = *data;
+ struct iovec *iv = msg->msg_iov;
int i;
int len;
/*
- * Eat any sent kvecs
+ * Eat any sent iovecs
*/
while (iv->iov_len <= amount) {
amount -= iv->iov_len;
iv++;
- (*num)--;
+ msg->msg_iovlen--;
}
/*
* And chew down the partial one
*/
- vec[0].iov_len = iv->iov_len-amount;
- vec[0].iov_base =((unsigned char *)iv->iov_base)+amount;
+ niv[0].iov_len = iv->iov_len-amount;
+ niv[0].iov_base =((unsigned char *)iv->iov_base)+amount;
iv++;
- len = vec[0].iov_len;
+ len = niv[0].iov_len;
/*
* And copy any others
*/
- for (i = 1; i < *num; i++) {
- vec[i] = *iv++;
- len += vec[i].iov_len;
+ for (i = 1; i < msg->msg_iovlen; i++) {
+ niv[i] = *iv++;
+ len += niv[i].iov_len;
}
- *data = vec;
+ msg->msg_iov = niv;
return len;
}
{
struct socket *sock;
unsigned int flags;
- struct kvec iov;
+ struct iovec iov;
struct msghdr msg;
+ mm_segment_t fs;
int rlen = smb_len(server->header) - server->smb_read + 4;
int result = -EIO;
- if (rlen > PAGE_SIZE)
- rlen = PAGE_SIZE;
-
sock = server_sock(server);
if (!sock)
goto out;
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
+ fs = get_fs();
+ set_fs(get_ds());
+
flags = MSG_DONTWAIT | MSG_NOSIGNAL;
iov.iov_base = drop_buffer;
iov.iov_len = PAGE_SIZE;
msg.msg_flags = flags;
msg.msg_name = NULL;
msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
msg.msg_control = NULL;
- result = kernel_recvmsg(sock, &msg, &iov, 1, rlen, flags);
+ if (rlen > PAGE_SIZE)
+ rlen = PAGE_SIZE;
+
+ result = sock_recvmsg(sock, &msg, rlen, flags);
+
+ set_fs(fs);
VERBOSE("read: %d\n", result);
if (result < 0) {
{
struct socket *sock;
unsigned int flags;
- struct kvec iov[4];
- struct kvec *p = req->rq_iov;
- size_t num = req->rq_iovlen;
+ struct iovec iov[4];
struct msghdr msg;
+ mm_segment_t fs;
int rlen;
int result = -EIO;
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
+ fs = get_fs();
+ set_fs(get_ds());
+
flags = MSG_DONTWAIT | MSG_NOSIGNAL;
msg.msg_flags = flags;
msg.msg_name = NULL;
msg.msg_namelen = 0;
+ msg.msg_iov = req->rq_iov;
+ msg.msg_iovlen = req->rq_iovlen;
msg.msg_control = NULL;
/* Dont repeat bytes and count available bufferspace */
- rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd);
+ rlen = smb_move_iov(&msg, iov, req->rq_bytes_recvd);
if (req->rq_rlen < rlen)
rlen = req->rq_rlen;
- result = kernel_recvmsg(sock, &msg, p, num, rlen, flags);
+ result = sock_recvmsg(sock, &msg, rlen, flags);
+
+ set_fs(fs);
VERBOSE("read: %d\n", result);
if (result < 0) {
int
smb_send_request(struct smb_request *req)
{
+ mm_segment_t fs;
struct smb_sb_info *server = req->rq_server;
struct socket *sock;
- struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
+ struct msghdr msg;
int slen = req->rq_slen - req->rq_bytes_sent;
int result = -EIO;
- struct kvec iov[4];
- struct kvec *p = req->rq_iov;
- size_t num = req->rq_iovlen;
+ struct iovec iov[4];
sock = server_sock(server);
if (!sock)
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_iov = req->rq_iov;
+ msg.msg_iovlen = req->rq_iovlen;
+ msg.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;
+
/* Dont repeat bytes */
if (req->rq_bytes_sent)
- smb_move_iov(&p, &num, iov, req->rq_bytes_sent);
+ smb_move_iov(&msg, iov, req->rq_bytes_sent);
- result = kernel_sendmsg(sock, &msg, p, num, slen);
+ fs = get_fs();
+ set_fs(get_ds());
+ result = sock_sendmsg(sock, &msg, slen);
+ set_fs(fs);
if (result >= 0) {
req->rq_bytes_sent += result;
#include <linux/vfs.h>
#include <linux/writeback.h> /* for the emergency remount stuff */
#include <linux/idr.h>
-#include <linux/devpts_fs.h>
-#include <linux/proc_fs.h>
#include <asm/uaccess.h>
sb = type->get_sb(type, flags, name, data);
if (IS_ERR(sb))
goto out_free_secdata;
-
- error = -EPERM;
- if (!capable(CAP_SYS_ADMIN) && !sb->s_bdev &&
- (sb->s_magic != PROC_SUPER_MAGIC) &&
- (sb->s_magic != DEVPTS_SUPER_MAGIC))
- goto out_sb;
-
error = security_sb_kern_mount(sb, secdata);
if (error)
goto out_sb;
#include "sysfs.h"
+/* Random magic number */
+#define SYSFS_MAGIC 0x62656572
struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = SYSFS_SUPER_MAGIC;
+ sb->s_magic = SYSFS_MAGIC;
sb->s_op = &sysfs_ops;
sysfs_sb = sb;
int write,
struct file *filp,
void *buffer,
- size_t *lenp,
- loff_t *ppos)
+ size_t *lenp)
{
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
- ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
#define O_DIRECTORY 0100000 /* must be a directory */
#define O_NOFOLLOW 0200000 /* don't follow links */
#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
-#define O_ATOMICLOOKUP 01000000 /* do atomic file lookup */
#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
#define O_NOATIME 04000000
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
#endif /* __KERNEL__ */
#endif /* _ALPHA_PAGE_H */
{INR_OPEN, INR_OPEN}, /* RLIMIT_NOFILE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_NPROC */ \
- {32768, 32768 }, /* RLIMIT_MEMLOCK */ \
+ {PAGE_SIZE, PAGE_SIZE}, /* RLIMIT_MEMLOCK */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_LOCKS */ \
{MAX_SIGPENDING, MAX_SIGPENDING}, /* RLIMIT_SIGPENDING */ \
{MQ_BYTES_MAX, MQ_BYTES_MAX}, /* RLIMIT_MSGQUEUE */ \
#define __get_user_check(x,ptr,size,segment) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
__gu_err = 0; \
#define __put_user_check(x,ptr,size,segment) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
__pu_err = 0; \
--- /dev/null
+/* include/asm-arm/arch-lh7a40x/ide.h
+ *
+ * Copyright (C) 2004 Logic Product Development
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_ARCH_IDE_H
+#define __ASM_ARCH_IDE_H
+
+#if defined (CONFIG_MACH_LPD7A400) || defined (CONFIG_MACH_LPD7A404)
+
+/* This implementation of ide.h only applies to the LPD CardEngines.
+ * Thankfully, there is less to do for the KEV.
+ */
+
+#include <linux/config.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/arch/registers.h>
+
+#define IDE_REG_LINE (1<<12) /* A12 drives !REG */
+#define IDE_ALT_LINE (1<<11) /* Unused A11 allows non-overlapping regions */
+#define IDE_CONTROLREG_OFFSET (0xe)
+
+void lpd7a40x_hwif_ioops (struct hwif_s* hwif);
+
+static __inline__ void ide_init_hwif_ports (hw_regs_t *hw, int data_port,
+ int ctrl_port, int *irq)
+{
+ ide_ioreg_t reg;
+ int i;
+ int regincr = 1;
+
+ memset (hw, 0, sizeof (*hw));
+
+ reg = (ide_ioreg_t) data_port;
+
+ for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+ hw->io_ports[i] = reg;
+ reg += regincr;
+ }
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = (ide_ioreg_t) ctrl_port;
+
+ if (irq)
+ *irq = IDE_NO_IRQ;
+}
+
+static __inline__ void ide_init_default_hwifs (void)
+{
+ hw_regs_t hw;
+ struct hwif_s* hwif;
+
+ ide_init_hwif_ports (&hw,
+ CF_VIRT + IDE_REG_LINE,
+ CF_VIRT + IDE_REG_LINE + IDE_ALT_LINE
+ + IDE_CONTROLREG_OFFSET,
+ NULL);
+
+ ide_register_hw (&hw, &hwif);
+ lpd7a40x_hwif_ioops (hwif); /* Override IO routines */
+}
+#endif
+
+#endif
extern unsigned long s3c2410_hclk;
extern unsigned long s3c2410_fclk;
-/* external functions for GPIO support
- *
- * These allow various different clients to access the same GPIO
- * registers without conflicting. If your driver only owns the entire
- * GPIO register, then it is safe to ioremap/__raw_{read|write} to it.
-*/
-
-/* s3c2410_gpio_cfgpin
- *
- * set the configuration of the given pin to the value passed.
- *
- * eg:
- * s3c2410_gpio_cfgpin(S3C2410_GPA0, S3C2410_GPA0_ADDR0);
- * s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1);
-*/
-
-extern void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function);
-
-/* s3c2410_gpio_pullup
- *
- * configure the pull-up control on the given pin
- *
- * to = 1 => disable the pull-up
- * 0 => enable the pull-up
- *
- * eg;
- *
- * s3c2410_gpio_pullup(S3C2410_GPB0, 0);
- * s3c2410_gpio_pullup(S3C2410_GPE8, 0);
-*/
-
-extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to);
-
-extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to);
-
#endif /* __ASSEMBLY__ */
#include <asm/sizes.h>
/* linux/include/asm/hardware/s3c2410/
*
- * Copyright (c) 2003,2004 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
+ * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
+ * http://www.simtec.co.uk/products/SWLINUX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* 19-06-2003 BJD Created file
* 23-06-2003 BJD Updated GSTATUS registers
* 12-03-2004 BJD Updated include protection
- * 20-07-2004 BJD Added GPIO pin numbers, added Port A definitions
*/
#ifndef __ASM_ARCH_REGS_GPIO_H
#define __ASM_ARCH_REGS_GPIO_H "$Id: gpio.h,v 1.5 2003/05/19 12:51:08 ben Exp $"
-#define S3C2410_GPIONO(bank,offset) ((bank) + (offset))
-
-#define S3C2410_GPIO_BANKA (32*0)
-#define S3C2410_GPIO_BANKB (32*1)
-#define S3C2410_GPIO_BANKC (32*2)
-#define S3C2410_GPIO_BANKD (32*3)
-#define S3C2410_GPIO_BANKE (32*4)
-#define S3C2410_GPIO_BANKF (32*5)
-#define S3C2410_GPIO_BANKG (32*6)
-#define S3C2410_GPIO_BANKH (32*7)
-
-#define S3C2410_GPIO_BASE(pin) ((((pin) & ~31) >> 1) + S3C2410_VA_GPIO)
-#define S3C2410_GPIO_OFFSET(pin) ((pin) & 31)
-
-/* general configuration options */
-
-#define S3C2410_GPIO_LEAVE (0xFFFFFFFF)
-
/* configure GPIO ports A..G */
#define S3C2410_GPIOREG(x) ((x) + S3C2410_VA_GPIO)
#define S3C2410_GPACON S3C2410_GPIOREG(0x00)
#define S3C2410_GPADAT S3C2410_GPIOREG(0x04)
-#define S3C2410_GPA0 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 0)
-#define S3C2410_GPA0_OUT (0<<0)
-#define S3C2410_GPA0_ADDR0 (1<<0)
-
-#define S3C2410_GPA1 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 1)
-#define S3C2410_GPA1_OUT (0<<1)
-#define S3C2410_GPA1_ADDR16 (1<<1)
-
-#define S3C2410_GPA2 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 2)
-#define S3C2410_GPA2_OUT (0<<2)
-#define S3C2410_GPA2_ADDR17 (1<<2)
-
-#define S3C2410_GPA3 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 3)
-#define S3C2410_GPA3_OUT (0<<3)
-#define S3C2410_GPA3_ADDR18 (1<<3)
-
-#define S3C2410_GPA4 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 4)
-#define S3C2410_GPA4_OUT (0<<4)
-#define S3C2410_GPA4_ADDR19 (1<<4)
-
-#define S3C2410_GPA5 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 5)
-#define S3C2410_GPA5_OUT (0<<5)
-#define S3C2410_GPA5_ADDR20 (1<<5)
-
-#define S3C2410_GPA6 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 6)
-#define S3C2410_GPA6_OUT (0<<6)
-#define S3C2410_GPA6_ADDR21 (1<<6)
-
-#define S3C2410_GPA7 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 7)
-#define S3C2410_GPA7_OUT (0<<7)
-#define S3C2410_GPA7_ADDR22 (1<<7)
-
-#define S3C2410_GPA8 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 8)
-#define S3C2410_GPA8_OUT (0<<8)
-#define S3C2410_GPA8_ADDR23 (1<<8)
-
-#define S3C2410_GPA9 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 9)
-#define S3C2410_GPA9_OUT (0<<9)
-#define S3C2410_GPA9_ADDR24 (1<<9)
-
-#define S3C2410_GPA10 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 10)
-#define S3C2410_GPA10_OUT (0<<10)
-#define S3C2410_GPA10_ADDR25 (1<<10)
-
-#define S3C2410_GPA11 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 11)
-#define S3C2410_GPA11_OUT (0<<11)
-#define S3C2410_GPA11_ADDR26 (1<<11)
-
-#define S3C2410_GPA12 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 12)
-#define S3C2410_GPA12_OUT (0<<12)
-#define S3C2410_GPA12_nGCS1 (1<<12)
-
-#define S3C2410_GPA13 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 13)
-#define S3C2410_GPA13_OUT (0<<13)
-#define S3C2410_GPA13_nGCS2 (1<<13)
-
-#define S3C2410_GPA14 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 14)
-#define S3C2410_GPA14_OUT (0<<14)
-#define S3C2410_GPA14_nGCS3 (1<<14)
-
-#define S3C2410_GPA15 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 15)
-#define S3C2410_GPA15_OUT (0<<15)
-#define S3C2410_GPA15_nGCS4 (1<<15)
-
-#define S3C2410_GPA16 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 16)
-#define S3C2410_GPA16_OUT (0<<16)
-#define S3C2410_GPA16_nGCS5 (1<<16)
-
-#define S3C2410_GPA17 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 17)
-#define S3C2410_GPA17_OUT (0<<17)
-#define S3C2410_GPA17_CLE (1<<17)
-
-#define S3C2410_GPA18 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 18)
-#define S3C2410_GPA18_OUT (0<<18)
-#define S3C2410_GPA18_ALE (1<<18)
-
-#define S3C2410_GPA19 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 19)
-#define S3C2410_GPA19_OUT (0<<19)
-#define S3C2410_GPA19_nFWE (1<<19)
-
-#define S3C2410_GPA20 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 20)
-#define S3C2410_GPA20_OUT (0<<20)
-#define S3C2410_GPA20_nFRE (1<<20)
-
-#define S3C2410_GPA21 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 21)
-#define S3C2410_GPA21_OUT (0<<21)
-#define S3C2410_GPA21_nRSTOUT (1<<21)
-
-#define S3C2410_GPA22 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 22)
-#define S3C2410_GPA22_OUT (0<<22)
-#define S3C2410_GPA22_nFCE (1<<22)
-
/* 0x08 and 0x0c are reserved */
/* GPB is 10 IO pins, each configured by 2 bits each in GPBCON.
/* no i/o pin in port b can have value 3! */
-#define S3C2410_GPB0 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 0)
#define S3C2410_GPB0_INP (0x00 << 0)
#define S3C2410_GPB0_OUTP (0x01 << 0)
#define S3C2410_GPB0_TOUT0 (0x02 << 0)
-#define S3C2410_GPB1 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 1)
#define S3C2410_GPB1_INP (0x00 << 2)
#define S3C2410_GPB1_OUTP (0x01 << 2)
#define S3C2410_GPB1_TOUT1 (0x02 << 2)
-#define S3C2410_GPB2 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 2)
#define S3C2410_GPB2_INP (0x00 << 4)
#define S3C2410_GPB2_OUTP (0x01 << 4)
#define S3C2410_GPB2_TOUT2 (0x02 << 4)
-#define S3C2410_GPB3 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 3)
#define S3C2410_GPB3_INP (0x00 << 6)
#define S3C2410_GPB3_OUTP (0x01 << 6)
#define S3C2410_GPB3_TOUT3 (0x02 << 6)
-#define S3C2410_GPB4 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 4)
#define S3C2410_GPB4_INP (0x00 << 8)
#define S3C2410_GPB4_OUTP (0x01 << 8)
#define S3C2410_GPB4_TCLK0 (0x02 << 8)
#define S3C2410_GPB4_MASK (0x03 << 8)
-#define S3C2410_GPB5 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 5)
#define S3C2410_GPB5_INP (0x00 << 10)
#define S3C2410_GPB5_OUTP (0x01 << 10)
#define S3C2410_GPB5_nXBACK (0x02 << 10)
-#define S3C2410_GPB6 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 6)
#define S3C2410_GPB6_INP (0x00 << 12)
#define S3C2410_GPB6_OUTP (0x01 << 12)
#define S3C2410_GPB6_nXBREQ (0x02 << 12)
-#define S3C2410_GPB7 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 7)
#define S3C2410_GPB7_INP (0x00 << 14)
#define S3C2410_GPB7_OUTP (0x01 << 14)
#define S3C2410_GPB7_nXDACK1 (0x02 << 14)
-#define S3C2410_GPB8 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 8)
#define S3C2410_GPB8_INP (0x00 << 16)
#define S3C2410_GPB8_OUTP (0x01 << 16)
#define S3C2410_GPB8_nXDREQ1 (0x02 << 16)
-#define S3C2410_GPB9 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 9)
#define S3C2410_GPB9_INP (0x00 << 18)
#define S3C2410_GPB9_OUTP (0x01 << 18)
#define S3C2410_GPB9_nXDACK0 (0x02 << 18)
-#define S3C2410_GPB10 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 10)
-#define S3C2410_GPB10_INP (0x00 << 18)
-#define S3C2410_GPB10_OUTP (0x01 << 18)
+#define S3C2410_GPB10_INP (0x00 << 18)
+#define S3C2410_GPB10_OUTP (0x01 << 18)
#define S3C2410_GPB10_nXDRE0 (0x02 << 18)
/* Port C consits of 16 GPIO/Special function
#define S3C2410_GPCDAT S3C2410_GPIOREG(0x24)
#define S3C2410_GPCUP S3C2410_GPIOREG(0x28)
-#define S3C2410_GPC0 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 0)
#define S3C2410_GPC0_INP (0x00 << 0)
#define S3C2410_GPC0_OUTP (0x01 << 0)
#define S3C2410_GPC0_LEND (0x02 << 0)
-#define S3C2410_GPC1 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 1)
#define S3C2410_GPC1_INP (0x00 << 2)
#define S3C2410_GPC1_OUTP (0x01 << 2)
#define S3C2410_GPC1_VCLK (0x02 << 2)
-#define S3C2410_GPC2 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 2)
#define S3C2410_GPC2_INP (0x00 << 4)
#define S3C2410_GPC2_OUTP (0x01 << 4)
#define S3C2410_GPC2_VLINE (0x02 << 4)
-#define S3C2410_GPC3 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 3)
#define S3C2410_GPC3_INP (0x00 << 6)
#define S3C2410_GPC3_OUTP (0x01 << 6)
#define S3C2410_GPC3_VFRAME (0x02 << 6)
-#define S3C2410_GPC4 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 4)
#define S3C2410_GPC4_INP (0x00 << 8)
#define S3C2410_GPC4_OUTP (0x01 << 8)
#define S3C2410_GPC4_VM (0x02 << 8)
-#define S3C2410_GPC5 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 5)
#define S3C2410_GPC5_INP (0x00 << 10)
#define S3C2410_GPC5_OUTP (0x01 << 10)
#define S3C2410_GPC5_LCDVF0 (0x02 << 10)
-#define S3C2410_GPC6 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 6)
#define S3C2410_GPC6_INP (0x00 << 12)
#define S3C2410_GPC6_OUTP (0x01 << 12)
#define S3C2410_GPC6_LCDVF1 (0x02 << 12)
-#define S3C2410_GPC7 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 7)
#define S3C2410_GPC7_INP (0x00 << 14)
#define S3C2410_GPC7_OUTP (0x01 << 14)
#define S3C2410_GPC7_LCDVF2 (0x02 << 14)
-#define S3C2410_GPC8 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 8)
#define S3C2410_GPC8_INP (0x00 << 16)
#define S3C2410_GPC8_OUTP (0x01 << 16)
#define S3C2410_GPC8_VD0 (0x02 << 16)
-#define S3C2410_GPC9 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 9)
#define S3C2410_GPC9_INP (0x00 << 18)
#define S3C2410_GPC9_OUTP (0x01 << 18)
#define S3C2410_GPC9_VD1 (0x02 << 18)
-#define S3C2410_GPC10 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 10)
#define S3C2410_GPC10_INP (0x00 << 20)
#define S3C2410_GPC10_OUTP (0x01 << 20)
#define S3C2410_GPC10_VD2 (0x02 << 20)
-#define S3C2410_GPC11 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 11)
#define S3C2410_GPC11_INP (0x00 << 22)
#define S3C2410_GPC11_OUTP (0x01 << 22)
#define S3C2410_GPC11_VD3 (0x02 << 22)
-#define S3C2410_GPC12 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 12)
#define S3C2410_GPC12_INP (0x00 << 24)
#define S3C2410_GPC12_OUTP (0x01 << 24)
#define S3C2410_GPC12_VD4 (0x02 << 24)
-#define S3C2410_GPC13 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 13)
#define S3C2410_GPC13_INP (0x00 << 26)
#define S3C2410_GPC13_OUTP (0x01 << 26)
#define S3C2410_GPC13_VD5 (0x02 << 26)
-#define S3C2410_GPC14 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 14)
#define S3C2410_GPC14_INP (0x00 << 28)
#define S3C2410_GPC14_OUTP (0x01 << 28)
#define S3C2410_GPC14_VD6 (0x02 << 28)
-#define S3C2410_GPC15 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 15)
#define S3C2410_GPC15_INP (0x00 << 30)
#define S3C2410_GPC15_OUTP (0x01 << 30)
#define S3C2410_GPC15_VD7 (0x02 << 30)
#define S3C2410_GPDDAT S3C2410_GPIOREG(0x34)
#define S3C2410_GPDUP S3C2410_GPIOREG(0x38)
-#define S3C2410_GPD0 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 0)
#define S3C2410_GPD0_INP (0x00 << 0)
#define S3C2410_GPD0_OUTP (0x01 << 0)
#define S3C2410_GPD0_VD8 (0x02 << 0)
-#define S3C2410_GPD1 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 1)
#define S3C2410_GPD1_INP (0x00 << 2)
#define S3C2410_GPD1_OUTP (0x01 << 2)
#define S3C2410_GPD1_VD9 (0x02 << 2)
-#define S3C2410_GPD2 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 2)
#define S3C2410_GPD2_INP (0x00 << 4)
#define S3C2410_GPD2_OUTP (0x01 << 4)
#define S3C2410_GPD2_VD10 (0x02 << 4)
-#define S3C2410_GPD3 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 3)
#define S3C2410_GPD3_INP (0x00 << 6)
#define S3C2410_GPD3_OUTP (0x01 << 6)
#define S3C2410_GPD3_VD11 (0x02 << 6)
-#define S3C2410_GPD4 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 4)
#define S3C2410_GPD4_INP (0x00 << 8)
#define S3C2410_GPD4_OUTP (0x01 << 8)
#define S3C2410_GPD4_VD12 (0x02 << 8)
-#define S3C2410_GPD5 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 5)
#define S3C2410_GPD5_INP (0x00 << 10)
#define S3C2410_GPD5_OUTP (0x01 << 10)
#define S3C2410_GPD5_VD13 (0x02 << 10)
-#define S3C2410_GPD6 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 6)
#define S3C2410_GPD6_INP (0x00 << 12)
#define S3C2410_GPD6_OUTP (0x01 << 12)
#define S3C2410_GPD6_VD14 (0x02 << 12)
-#define S3C2410_GPD7 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 7)
#define S3C2410_GPD7_INP (0x00 << 14)
#define S3C2410_GPD7_OUTP (0x01 << 14)
#define S3C2410_GPD7_VD15 (0x02 << 14)
-#define S3C2410_GPD8 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 8)
#define S3C2410_GPD8_INP (0x00 << 16)
#define S3C2410_GPD8_OUTP (0x01 << 16)
#define S3C2410_GPD8_VD16 (0x02 << 16)
-#define S3C2410_GPD9 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 9)
#define S3C2410_GPD9_INP (0x00 << 18)
#define S3C2410_GPD9_OUTP (0x01 << 18)
#define S3C2410_GPD9_VD17 (0x02 << 18)
-#define S3C2410_GPD10 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 10)
#define S3C2410_GPD10_INP (0x00 << 20)
#define S3C2410_GPD10_OUTP (0x01 << 20)
#define S3C2410_GPD10_VD18 (0x02 << 20)
-#define S3C2410_GPD11 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 11)
#define S3C2410_GPD11_INP (0x00 << 22)
#define S3C2410_GPD11_OUTP (0x01 << 22)
#define S3C2410_GPD11_VD19 (0x02 << 22)
-#define S3C2410_GPD12 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 12)
#define S3C2410_GPD12_INP (0x00 << 24)
#define S3C2410_GPD12_OUTP (0x01 << 24)
#define S3C2410_GPD12_VD20 (0x02 << 24)
-#define S3C2410_GPD13 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 13)
#define S3C2410_GPD13_INP (0x00 << 26)
#define S3C2410_GPD13_OUTP (0x01 << 26)
#define S3C2410_GPD13_VD21 (0x02 << 26)
-#define S3C2410_GPD14 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 14)
#define S3C2410_GPD14_INP (0x00 << 28)
#define S3C2410_GPD14_OUTP (0x01 << 28)
#define S3C2410_GPD14_VD22 (0x02 << 28)
-#define S3C2410_GPD15 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 15)
#define S3C2410_GPD15_INP (0x00 << 30)
#define S3C2410_GPD15_OUTP (0x01 << 30)
#define S3C2410_GPD15_VD23 (0x02 << 30)
#define S3C2410_GPEDAT S3C2410_GPIOREG(0x44)
#define S3C2410_GPEUP S3C2410_GPIOREG(0x48)
-#define S3C2410_GPE0 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 0)
#define S3C2410_GPE0_INP (0x00 << 0)
#define S3C2410_GPE0_OUTP (0x01 << 0)
#define S3C2410_GPE0_I2SLRCK (0x02 << 0)
#define S3C2410_GPE0_MASK (0x03 << 0)
-#define S3C2410_GPE1 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 1)
#define S3C2410_GPE1_INP (0x00 << 2)
#define S3C2410_GPE1_OUTP (0x01 << 2)
#define S3C2410_GPE1_I2SSCLK (0x02 << 2)
#define S3C2410_GPE1_MASK (0x03 << 2)
-#define S3C2410_GPE2 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 2)
#define S3C2410_GPE2_INP (0x00 << 4)
#define S3C2410_GPE2_OUTP (0x01 << 4)
#define S3C2410_GPE2_CDCLK (0x02 << 4)
-#define S3C2410_GPE3 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 3)
#define S3C2410_GPE3_INP (0x00 << 6)
#define S3C2410_GPE3_OUTP (0x01 << 6)
#define S3C2410_GPE3_I2SSDI (0x02 << 6)
#define S3C2410_GPE3_MASK (0x03 << 6)
-#define S3C2410_GPE4 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 4)
#define S3C2410_GPE4_INP (0x00 << 8)
#define S3C2410_GPE4_OUTP (0x01 << 8)
#define S3C2410_GPE4_I2SSDO (0x02 << 8)
#define S3C2410_GPE4_MASK (0x03 << 8)
-#define S3C2410_GPE5 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 5)
#define S3C2410_GPE5_INP (0x00 << 10)
#define S3C2410_GPE5_OUTP (0x01 << 10)
#define S3C2410_GPE5_SDCLK (0x02 << 10)
-#define S3C2410_GPE6 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 6)
#define S3C2410_GPE6_INP (0x00 << 12)
#define S3C2410_GPE6_OUTP (0x01 << 12)
#define S3C2410_GPE6_SDCLK (0x02 << 12)
-#define S3C2410_GPE7 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 7)
#define S3C2410_GPE7_INP (0x00 << 14)
#define S3C2410_GPE7_OUTP (0x01 << 14)
#define S3C2410_GPE7_SDCMD (0x02 << 14)
-#define S3C2410_GPE8 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 8)
#define S3C2410_GPE8_INP (0x00 << 16)
#define S3C2410_GPE8_OUTP (0x01 << 16)
#define S3C2410_GPE8_SDDAT1 (0x02 << 16)
-#define S3C2410_GPE9 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 9)
#define S3C2410_GPE9_INP (0x00 << 18)
#define S3C2410_GPE9_OUTP (0x01 << 18)
#define S3C2410_GPE9_SDDAT2 (0x02 << 18)
-#define S3C2410_GPE10 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 10)
#define S3C2410_GPE10_INP (0x00 << 20)
#define S3C2410_GPE10_OUTP (0x01 << 20)
#define S3C2410_GPE10_SDDAT3 (0x02 << 20)
-#define S3C2410_GPE11 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 11)
#define S3C2410_GPE11_INP (0x00 << 22)
#define S3C2410_GPE11_OUTP (0x01 << 22)
#define S3C2410_GPE11_SPIMISO0 (0x02 << 22)
-#define S3C2410_GPE12 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 12)
#define S3C2410_GPE12_INP (0x00 << 24)
#define S3C2410_GPE12_OUTP (0x01 << 24)
#define S3C2410_GPE12_SPIMOSI0 (0x02 << 24)
-#define S3C2410_GPE13 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 13)
#define S3C2410_GPE13_INP (0x00 << 26)
#define S3C2410_GPE13_OUTP (0x01 << 26)
#define S3C2410_GPE13_SPICLK0 (0x02 << 26)
-#define S3C2410_GPE14 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 14)
#define S3C2410_GPE14_INP (0x00 << 28)
#define S3C2410_GPE14_OUTP (0x01 << 28)
#define S3C2410_GPE14_IICSCL (0x02 << 28)
#define S3C2410_GPE14_MASK (0x03 << 28)
-#define S3C2410_GPE15 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 15)
#define S3C2410_GPE15_INP (0x00 << 30)
#define S3C2410_GPE15_OUTP (0x01 << 30)
#define S3C2410_GPE15_IICSDA (0x02 << 30)
#define S3C2410_GPFDAT S3C2410_GPIOREG(0x54)
#define S3C2410_GPFUP S3C2410_GPIOREG(0x58)
-#define S3C2410_GPF0 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 0)
+
#define S3C2410_GPF0_INP (0x00 << 0)
#define S3C2410_GPF0_OUTP (0x01 << 0)
#define S3C2410_GPF0_EINT0 (0x02 << 0)
-#define S3C2410_GPF1 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 1)
#define S3C2410_GPF1_INP (0x00 << 2)
#define S3C2410_GPF1_OUTP (0x01 << 2)
#define S3C2410_GPF1_EINT1 (0x02 << 2)
-#define S3C2410_GPF2 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 2)
#define S3C2410_GPF2_INP (0x00 << 4)
#define S3C2410_GPF2_OUTP (0x01 << 4)
#define S3C2410_GPF2_EINT2 (0x02 << 4)
-#define S3C2410_GPF3 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 3)
#define S3C2410_GPF3_INP (0x00 << 6)
#define S3C2410_GPF3_OUTP (0x01 << 6)
#define S3C2410_GPF3_EINT3 (0x02 << 6)
-#define S3C2410_GPF4 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 4)
#define S3C2410_GPF4_INP (0x00 << 8)
#define S3C2410_GPF4_OUTP (0x01 << 8)
#define S3C2410_GPF4_EINT4 (0x02 << 8)
-#define S3C2410_GPF5 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 5)
#define S3C2410_GPF5_INP (0x00 << 10)
#define S3C2410_GPF5_OUTP (0x01 << 10)
#define S3C2410_GPF5_EINT5 (0x02 << 10)
-#define S3C2410_GPF6 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 6)
#define S3C2410_GPF6_INP (0x00 << 12)
#define S3C2410_GPF6_OUTP (0x01 << 12)
#define S3C2410_GPF6_EINT6 (0x02 << 12)
-#define S3C2410_GPF7 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 7)
#define S3C2410_GPF7_INP (0x00 << 14)
#define S3C2410_GPF7_OUTP (0x01 << 14)
#define S3C2410_GPF7_EINT7 (0x02 << 14)
#define S3C2410_GPGDAT S3C2410_GPIOREG(0x64)
#define S3C2410_GPGUP S3C2410_GPIOREG(0x68)
-#define S3C2410_GPG0 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 0)
#define S3C2410_GPG0_INP (0x00 << 0)
#define S3C2410_GPG0_OUTP (0x01 << 0)
#define S3C2410_GPG0_EINT8 (0x02 << 0)
-#define S3C2410_GPG1 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 1)
#define S3C2410_GPG1_INP (0x00 << 2)
#define S3C2410_GPG1_OUTP (0x01 << 2)
#define S3C2410_GPG1_EINT9 (0x02 << 2)
-#define S3C2410_GPG2 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 2)
#define S3C2410_GPG2_INP (0x00 << 4)
#define S3C2410_GPG2_OUTP (0x01 << 4)
#define S3C2410_GPG2_EINT10 (0x02 << 4)
-#define S3C2410_GPG3 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 3)
#define S3C2410_GPG3_INP (0x00 << 6)
#define S3C2410_GPG3_OUTP (0x01 << 6)
#define S3C2410_GPG3_EINT11 (0x02 << 6)
-#define S3C2410_GPG4 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 4)
#define S3C2410_GPG4_INP (0x00 << 8)
#define S3C2410_GPG4_OUTP (0x01 << 8)
#define S3C2410_GPG4_EINT12 (0x02 << 8)
#define S3C2410_GPG4_LCDPWREN (0x03 << 8)
-#define S3C2410_GPG5 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 5)
#define S3C2410_GPG5_INP (0x00 << 10)
#define S3C2410_GPG5_OUTP (0x01 << 10)
#define S3C2410_GPG5_EINT13 (0x02 << 10)
#define S3C2410_GPG5_SPIMISO1 (0x03 << 10)
-#define S3C2410_GPG6 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 6)
#define S3C2410_GPG6_INP (0x00 << 12)
#define S3C2410_GPG6_OUTP (0x01 << 12)
#define S3C2410_GPG6_EINT14 (0x02 << 12)
#define S3C2410_GPG6_SPIMOSI1 (0x03 << 12)
-#define S3C2410_GPG7 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 7)
#define S3C2410_GPG7_INP (0x00 << 14)
#define S3C2410_GPG7_OUTP (0x01 << 14)
#define S3C2410_GPG7_EINT15 (0x02 << 14)
#define S3C2410_GPG7_SPICLK1 (0x03 << 14)
-#define S3C2410_GPG8 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 8)
#define S3C2410_GPG8_INP (0x00 << 16)
#define S3C2410_GPG8_OUTP (0x01 << 16)
#define S3C2410_GPG8_EINT16 (0x02 << 16)
-#define S3C2410_GPG9 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 9)
#define S3C2410_GPG9_INP (0x00 << 18)
#define S3C2410_GPG9_OUTP (0x01 << 18)
#define S3C2410_GPG9_EINT17 (0x02 << 18)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG10_INP (0x00 << 20)
#define S3C2410_GPG10_OUTP (0x01 << 20)
#define S3C2410_GPG10_EINT18 (0x02 << 20)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG11_INP (0x00 << 22)
#define S3C2410_GPG11_OUTP (0x01 << 22)
#define S3C2410_GPG11_EINT19 (0x02 << 22)
#define S3C2410_GPG11_TCLK1 (0x03 << 22)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG12_INP (0x00 << 24)
#define S3C2410_GPG12_OUTP (0x01 << 24)
#define S3C2410_GPG12_EINT18 (0x02 << 24)
#define S3C2410_GPG12_XMON (0x03 << 24)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG13_INP (0x00 << 26)
#define S3C2410_GPG13_OUTP (0x01 << 26)
#define S3C2410_GPG13_EINT18 (0x02 << 26)
#define S3C2410_GPG13_nXPON (0x03 << 26)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG14_INP (0x00 << 28)
#define S3C2410_GPG14_OUTP (0x01 << 28)
#define S3C2410_GPG14_EINT18 (0x02 << 28)
#define S3C2410_GPG14_YMON (0x03 << 28)
-#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG15_INP (0x00 << 30)
#define S3C2410_GPG15_OUTP (0x01 << 30)
#define S3C2410_GPG15_EINT18 (0x02 << 30)
#define S3C2410_GPHDAT S3C2410_GPIOREG(0x74)
#define S3C2410_GPHUP S3C2410_GPIOREG(0x78)
-#define S3C2410_GPH0 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 0)
#define S3C2410_GPH0_INP (0x00 << 0)
#define S3C2410_GPH0_OUTP (0x01 << 0)
#define S3C2410_GPH0_nCTS0 (0x02 << 0)
-#define S3C2410_GPH1 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 1)
#define S3C2410_GPH1_INP (0x00 << 2)
#define S3C2410_GPH1_OUTP (0x01 << 2)
#define S3C2410_GPH1_nRTS0 (0x02 << 2)
-#define S3C2410_GPH2 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 2)
#define S3C2410_GPH2_INP (0x00 << 4)
#define S3C2410_GPH2_OUTP (0x01 << 4)
#define S3C2410_GPH2_TXD0 (0x02 << 4)
-#define S3C2410_GPH3 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 3)
#define S3C2410_GPH3_INP (0x00 << 6)
#define S3C2410_GPH3_OUTP (0x01 << 6)
#define S3C2410_GPH3_RXD0 (0x02 << 6)
-#define S3C2410_GPH4 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 4)
#define S3C2410_GPH4_INP (0x00 << 8)
#define S3C2410_GPH4_OUTP (0x01 << 8)
#define S3C2410_GPH4_TXD1 (0x02 << 8)
-#define S3C2410_GPH5 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 5)
#define S3C2410_GPH5_INP (0x00 << 10)
#define S3C2410_GPH5_OUTP (0x01 << 10)
#define S3C2410_GPH5_RXD1 (0x02 << 10)
-#define S3C2410_GPH6 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 6)
#define S3C2410_GPH6_INP (0x00 << 12)
#define S3C2410_GPH6_OUTP (0x01 << 12)
#define S3C2410_GPH6_TXD2 (0x02 << 12)
#define S3C2410_GPH6_nRTS1 (0x03 << 12)
-#define S3C2410_GPH7 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 7)
#define S3C2410_GPH7_INP (0x00 << 14)
#define S3C2410_GPH7_OUTP (0x01 << 14)
#define S3C2410_GPH7_RXD2 (0x02 << 14)
#define S3C2410_GPH7_nCTS1 (0x03 << 14)
-#define S3C2410_GPH8 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 8)
#define S3C2410_GPH8_INP (0x00 << 16)
#define S3C2410_GPH8_OUTP (0x01 << 16)
#define S3C2410_GPH8_UCLK (0x02 << 16)
-#define S3C2410_GPH9 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 9)
-#define S3C2410_GPH9_INP (0x00 << 18)
-#define S3C2410_GPH9_OUTP (0x01 << 18)
-#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
+#define S3C2410_GPH9_INP (0x00 << 18)
+#define S3C2410_GPH9_OUTP (0x01 << 18)
+#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
-#define S3C2410_GPH10 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 10)
-#define S3C2410_GPH10_INP (0x00 << 20)
-#define S3C2410_GPH10_OUTP (0x01 << 20)
-#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
+#define S3C2410_GPH10_INP (0x00 << 20)
+#define S3C2410_GPH10_OUTP (0x01 << 20)
+#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
/* miscellaneous control */
extern int _find_first_zero_bit_be(void * p, unsigned size);
extern int _find_next_zero_bit_be(void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+extern int _find_next_bit_be(unsigned long *p, int size, int offset);
/*
* The __* form of bitops are non-atomic and may be reordered.
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
- * See Documentation/cachetlb.txt for more information.
+ * See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
#define MAX_HWIFS 4
#endif
-#if defined(CONFIG_ARCH_SA1100)
+#if defined(CONFIG_ARCH_LH7A40X) || defined(CONFIG_ARCH_SA1100)
# include <asm/arch/ide.h> /* obsolete + broken */
#endif
-#if !defined(CONFIG_ARCH_L7200)
+#if !defined(CONFIG_ARCH_L7200) && !defined(CONFIG_ARCH_LH7A40X)
# define IDE_ARCH_OBSOLETE_INIT
# ifdef CONFIG_ARCH_CLPS7500
# define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
# else
# define ide_default_io_ctl(base) (0)
# endif
-#endif /* !ARCH_L7200 */
+#endif /* !ARCH_L7200 && !ARCH_LH7A40X */
#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * Documentation/IO-mapping.txt.
+ * linux/Documentation/IO-mapping.txt.
*/
extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
extern void __iounmap(void *addr);
* See arch/arm/kernel/sys-arm.c for ugly details..
*/
struct ipc_kludge {
- struct msgbuf __user *msgp;
+ struct msgbuf *msgp;
long msgtyp;
};
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING}, \
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See linux/Documentation/arm/Setup
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
-
-typedef void __restorefn_t(void);
-typedef __restorefn_t __user *__sigrestore_t;
+typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
sigset_t sa_mask; /* mask last for extensibility */
};
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void __user *ss_sp;
+ void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
- __chk_user_ptr(addr); \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
#define get_user(x,p) \
({ \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ const register typeof(*(p)) *__p asm("r0") = (p); \
register typeof(*(p)) __r1 asm("r1"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
- __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
#define put_user(x,p) \
({ \
const register typeof(*(p)) __r1 asm("r1") = (x); \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ const register typeof(*(p)) *__p asm("r0") = (p); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * Documentation/IO-mapping.txt.
+ * linux/Documentation/IO-mapping.txt.
*/
extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
extern void __iounmap(void *addr);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING}, \
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See Documentation/arm/Setup
+ * hardware it's running on. See linux/Documentation/arm/Setup
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _CRIS_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define page_test_and_clear_young(page) (0)
#endif
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
#endif /* _ASM_GENERIC_PGTABLE_H */
#endif /* __ASSEMBLY__ */
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _H8300_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
+/* child inherits the personality of the parent */
#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
-/*
- * An executable for which elf_read_implies_exec() returns TRUE will
- * have the READ_IMPLIES_EXEC personality flag set automatically.
- */
-#define elf_read_implies_exec_binary(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
-
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define _ASM_IRQ_VECTORS_LIMITS_H
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
return order;
}
-extern int devmem_is_allowed(unsigned long pagenr);
-
#endif /* __ASSEMBLY__ */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define VM_DATA_DEFAULT_FLAGS \
- (VM_READ | VM_WRITE | \
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#define check_pgt_cache() do { } while (0)
+#define HAVE_ARCH_UNMAPPED_AREA 1
+
#endif /* _I386_PGALLOC_H */
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
+#define SHLIB_BASE 0x00111000
+
#define __HAVE_ARCH_ALIGN_STACK
extern unsigned long arch_align_stack(unsigned long sp);
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#define __HAVE_ARCH_MMAP_TOP
+extern unsigned long mmap_top(void);
/*
* Size of io_bitmap, covering ports 0 to 0x3ff.
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
*/
#if !defined(IN_STRING_C)
-#define __HAVE_ARCH_STRCPY
static inline char * strcpy(char * dest,const char *src)
{
int d0, d1, d2;
return dest;
}
-#define __HAVE_ARCH_STRNCPY
static inline char * strncpy(char * dest,const char *src,size_t count)
{
int d0, d1, d2, d3;
return count;
}
-#define __HAVE_ARCH_STRCAT
static inline char * strcat(char * dest,const char * src)
{
int d0, d1, d2, d3;
return dest;
}
-#define __HAVE_ARCH_STRNCAT
static inline char * strncat(char * dest,const char * src,size_t count)
{
int d0, d1, d2, d3;
return dest;
}
-#define __HAVE_ARCH_STRCMP
static inline int strcmp(const char * cs,const char * ct)
{
int d0, d1;
return __res;
}
-#define __HAVE_ARCH_STRNCMP
static inline int strncmp(const char * cs,const char * ct,size_t count)
{
register int __res;
return __res;
}
-#define __HAVE_ARCH_STRCHR
static inline char * strchr(const char * s, int c)
{
int d0;
return __res;
}
-#define __HAVE_ARCH_STRRCHR
static inline char * strrchr(const char * s, int c)
{
int d0, d1;
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
+#include <linux/compiler.h>
#include <asm/page.h>
#define VERIFY_READ 0
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
might_sleep(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
: "m"(__m(addr)), "i"(errret), "0"(err))
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
-unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
/*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long
+static inline unsigned long __must_check
__direct_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static inline unsigned long __must_check
__direct_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long
+static inline unsigned long __must_check
direct_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static inline unsigned long __must_check
direct_copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
#define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5)
#define __NR_sys_kexec_load 283
-#define __NR_ioprio_set 284
-#define __NR_ioprio_get 285
-#define NR_syscalls 286
+#define NR_syscalls 284
-#ifndef __KERNEL_SYSCALLS_NO_ERRNO__
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
#define __syscall_return(type, res) \
return (type) (res); \
} while (0)
-#else
-# define __syscall_return(type, res) return (type) (res)
-#endif
-
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
#define _syscall0(type,name) \
type name(void) \
* won't be any messing with the stack from main(), but we define
* some others too.
*/
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifdef CONFIG_IA64_CYCLONE
extern int use_cyclone;
-extern void __init cyclone_setup(void);
+extern int __init cyclone_setup(char*);
#else /* CONFIG_IA64_CYCLONE */
#define use_cyclone 0
-static inline void cyclone_setup(void)
+static inline void cyclone_setup(char* s)
{
printk(KERN_ERR "Cyclone Counter: System not configured"
" w/ CONFIG_IA64_CYCLONE.\n");
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
-#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
-#define elf_read_implies_exec(ex, have_pt_gnu_stack) \
- (!(have_pt_gnu_stack) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
+struct elf64_hdr;
+extern void ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter);
+#define SET_PERSONALITY(ex, ibcs2) ia64_set_personality(&(ex), ibcs2)
struct task_struct;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
-typedef u8 ia64_mv_irq_to_vector (unsigned int);
-typedef unsigned int ia64_mv_local_vector_to_irq (u8);
+typedef u8 ia64_mv_irq_to_vector (u8);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
#ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */
#define NODES_SHIFT 3
-#elif defined(CONFIG_IA64_HP_ZX1)
-/* Max 32 Nodes */
-#define NODES_SHIFT 5
#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 256 Nodes */
#define NODES_SHIFT 8
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
- (((current->personality & READ_IMPLIES_EXEC) != 0) \
+ (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
? VM_EXEC : 0))
-#define devmem_is_allowed(x) 1
-
#endif /* _ASM_IA64_PAGE_H */
static inline pgd_t*
pgd_alloc_one_fast (struct mm_struct *mm)
{
- unsigned long *ret = NULL;
+ unsigned long *ret = pgd_quicklist;
- preempt_disable();
-
- ret = pgd_quicklist;
if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
} else
ret = NULL;
-
- preempt_enable();
-
return (pgd_t *) ret;
}
static inline void
pgd_free (pgd_t *pgd)
{
- preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
++pgtable_cache_size;
- preempt_enable();
}
static inline void
static inline pmd_t*
pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
{
- unsigned long *ret = NULL;
+ unsigned long *ret = (unsigned long *)pmd_quicklist;
- preempt_disable();
-
- ret = (unsigned long *)pmd_quicklist;
if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
}
-
- preempt_enable();
-
return (pmd_t *)ret;
}
static inline void
pmd_free (pmd_t *pmd)
{
- preempt_disable();
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
++pgtable_cache_size;
- preempt_enable();
}
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
#define pgd_offset_k(addr) \
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
-/* Look up a pgd entry in the gate area. On IA-64, the gate-area
- resides in the kernel-mapped segment, hence we use pgd_offset_k()
- here. */
-#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
-#define __HAVE_ARCH_PGD_OFFSET_GATE
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */
#define _ASM_IA64_PROCESSOR_H
/*
- * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
/* bit 5 is currently unused */
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
+#define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */
#define IA64_THREAD_UAC_SHIFT 3
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#ifndef _ASM_SN_SN2_IO_H
#define _ASM_SN_SN2_IO_H
-#include <linux/compiler.h>
-#include <asm/intrinsics.h>
-extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
+extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
+#include <asm/intrinsics.h>
#define __sn_mf_a() ia64_mfa()
* Specify the minimum PROM revsion required for this kernel.
* Note that they're stored in hex format...
*/
-#define SN_SAL_MIN_MAJOR 0x3 /* SN2 kernels need at least PROM 3.40 */
-#define SN_SAL_MIN_MINOR 0x40
+#define SN_SAL_MIN_MAJOR 0x1 /* SN2 kernels need at least PROM 1.0 */
+#define SN_SAL_MIN_MINOR 0x0
u64 ia64_sn_probe_io_slot(long paddr, long size, void *data_ptr);
#define __NR_syslog 1117
#define __NR_setitimer 1118
#define __NR_getitimer 1119
-#define __NR_tux 1120 /* was __NR_old_stat */
+/* 1120 was __NR_old_stat */
/* 1121 was __NR_old_lstat */
/* 1122 was __NR_old_fstat */
#define __NR_vhangup 1123
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
-static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
+static inline void __constant_set_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
-static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
+static inline void __generic_set_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
__generic_clear_bit(nr, vaddr))
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
-static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
+static inline void __constant_clear_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
-static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
+static inline void __generic_clear_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
-#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cache.h>
struct fp_ext temp[2];
};
-#ifdef FPU_EMU_DEBUG
+#if FPU_EMU_DEBUG
extern unsigned int fp_debugprint;
#define dprint(bit, fmt, args...) ({ \
#define _MOTOROLA_PGALLOC_H
#include <asm/tlb.h>
-#include <asm/tlbflush.h>
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _M68K_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
atomic_t count;
atomic_t waking;
wait_queue_head_t wait;
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
long __magic;
#endif
};
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
# define __SEM_DEBUG_INIT(name) \
, (long)&(name).__magic
#else
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
-#ifdef WAITQUEUE_DEBUG
+#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
__free_page(page);
}
-#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+{
+ tlb_remove_page(tlb, page);
+}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
#endif /* __ASSEMBLY__ */
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _M68KNOMMU_PAGE_H */
#define WANT_PAGE_VIRTUAL
#endif
-#define devmem_is_allowed(x) 1
-
#endif /* _ASM_PAGE_H */
#ifdef __LP64__
#define LDREG ldd
#define STREG std
-#define LDREGX ldd,s
#define LDREGM ldd,mb
#define STREGM std,ma
#define RP_OFFSET 16
#else
#define LDREG ldw
#define STREG stw
-#define LDREGX ldwx,s
#define LDREGM ldwm
#define STREGM stwm
#define RP_OFFSET 20
#define FRAME_SIZE 64
#endif
-#ifdef CONFIG_PA20
-#define BL b,l
-#else
-#define BL bl
-#endif
-
#ifdef __ASSEMBLY__
#ifdef __LP64__
depd,z \r, 63-\sa, 64-\sa, \t
.endm
- /* Shift Right - note the r and t can NOT be the same! */
- .macro shr r, sa, t
- extru \r, 31-\sa, 32-\sa, \t
- .endm
-
- /* pa20w version of shift right */
- .macro shrd r, sa, t
- extrd,u \r, 63-\sa, 64-\sa, \t
- .endm
-
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-static __inline__ void set_bit(int nr, volatile unsigned long * address)
+static __inline__ void set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __set_bit(int nr, volatile unsigned long * address)
+static __inline__ void __set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
*addr |= mask;
}
-static __inline__ void clear_bit(int nr, volatile unsigned long * address)
+static __inline__ void clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address)
+static __inline__ void __clear_bit(unsigned long nr, volatile void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
*addr &= ~mask;
}
-static __inline__ void change_bit(int nr, volatile unsigned long * address)
+static __inline__ void change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
}
-static __inline__ void __change_bit(int nr, volatile unsigned long * address)
+static __inline__ void __change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
*addr ^= mask;
}
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
+static __inline__ int __test_and_set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
+static __inline__ int __test_and_clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address)
+static __inline__ int test_and_change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
+static __inline__ int __test_and_change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
return oldbit;
}
-static __inline__ int test_bit(int nr, const volatile unsigned long *address)
+static __inline__ int test_bit(int nr, const void *address)
{
unsigned long mask;
- const unsigned long *addr = (const unsigned long *)address;
+ unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
-static inline int sched_find_first_bit(const unsigned long *b)
+static inline int sched_find_first_bit(unsigned long *b)
{
#ifndef __LP64__
if (unlikely(b[0]))
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
-static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
{
- const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
+ unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
return result + ffz(tmp);
}
-static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
{
- const unsigned long *p = addr + (offset >> 6);
+ unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
* disabling interrupts.
*/
#ifdef __LP64__
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, addr)
+#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, addr)
+#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, addr)
#else
-#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
+#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
+#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, addr)
#endif
#endif /* __KERNEL__ */
#endif
}
-extern void flush_dcache_page(struct page *page);
+extern void __flush_dcache_page(struct page *page);
+
+static inline void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+
+ if (mapping && !mapping_mapped(mapping)) {
+ set_bit(PG_dcache_dirty, &page->flags);
+ } else {
+ __flush_dcache_page(page);
+ }
+}
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
-static inline pte_t *__translation_exists(struct mm_struct *mm,
- unsigned long addr)
+static inline int translation_exists(struct vm_area_struct *vma,
+ unsigned long addr)
{
- pgd_t *pgd = pgd_offset(mm, addr);
+ pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
pte_t *pte;
if(pgd_none(*pgd))
- return NULL;
+ return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
- return NULL;
+ return 0;
pte = pte_offset_map(pmd, addr);
/* The PA flush mappings show up as pte_none, but they're
* valid none the less */
if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
- return NULL;
- return pte;
+ return 0;
+ return 1;
}
-#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
/* Private function to flush a page from the cache of a non-current
#include <linux/mm.h>
#include <linux/config.h>
#include <asm/cacheflush.h>
-#include <asm/scatterlist.h>
/* See Documentation/DMA-mapping.txt */
struct hppa_dma_ops {
#define HPHW_IOA 12
#define HPHW_BRIDGE 13
#define HPHW_FABRIC 14
-#define HPHW_MC 15
#define HPHW_FAULTY 31
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
+
/* Memory mapped IO */
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
-#ifdef CONFIG_DISCONTIGMEM
-
-#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
-extern int npmem_ranges;
-
struct node_map_data {
pg_data_t pg_data;
+ struct page *adj_node_mem_map;
};
extern struct node_map_data node_data[];
+extern unsigned char *chunkmap;
-#define NODE_DATA(nid) (&node_data[nid].pg_data)
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
-#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
-})
-#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
-
-#define local_mapnr(kvaddr) \
-({ \
- unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
- (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
-})
-
-#define pfn_to_page(pfn) \
-({ \
- unsigned long __pfn = (pfn); \
- int __node = pfn_to_nid(__pfn); \
- &node_mem_map(__node)[node_localnr(__pfn,__node)]; \
-})
-
-#define page_to_pfn(pg) \
-({ \
- struct page *__page = pg; \
- struct zone *__zone = page_zone(__page); \
- BUG_ON(__zone == NULL); \
- (unsigned long)(__page - __zone->zone_mem_map) \
- + __zone->zone_start_pfn; \
-})
+#define BADCHUNK ((unsigned char)0xff)
+#define CHUNKSZ (256*1024*1024)
+#define CHUNKSHIFT 28
+#define CHUNKMASK (~(CHUNKSZ - 1))
+#define CHUNKNUM(paddr) ((paddr) >> CHUNKSHIFT)
-/* We have these possible memory map layouts:
- * Astro: 0-3.75, 67.75-68, 4-64
- * zx1: 0-1, 257-260, 4-256
- * Stretch (N-class): 0-2, 4-32, 34-xxx
- */
-
-/* Since each 1GB can only belong to one region (node), we can create
- * an index table for pfn to nid lookup; each entry in pfnnid_map
- * represents 1GB, and contains the node that the memory belongs to. */
-
-#define PFNNID_SHIFT (30 - PAGE_SHIFT)
-#define PFNNID_MAP_MAX 512 /* support 512GB */
-extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
-
-#ifndef __LP64__
-#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
-#else
-/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
-#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
-#endif
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- unsigned int i;
- unsigned char r;
-
- if (unlikely(pfn_is_io(pfn)))
- return 0;
-
- i = pfn >> PFNNID_SHIFT;
- BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
- r = pfnnid_map[i];
- BUG_ON(r == 0xff);
+#define NODE_DATA(nid) (&node_data[nid].pg_data)
+#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
+#define ADJ_NODE_MEM_MAP(nid) (node_data[nid].adj_node_mem_map)
- return (int)r;
-}
+#define phys_to_page(paddr) \
+ (ADJ_NODE_MEM_MAP(chunkmap[CHUNKNUM((paddr))]) \
+ + ((paddr) >> PAGE_SHIFT))
-static inline int pfn_valid(int pfn)
-{
- int nid = pfn_to_nid(pfn);
+#define virt_to_page(kvaddr) phys_to_page(__pa(kvaddr))
- if (nid >= 0)
- return (pfn < node_end_pfn(nid));
- return 0;
-}
+/* This is kind of bogus, need to investigate performance of doing it right */
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-#else /* !CONFIG_DISCONTIGMEM */
-#define MAX_PHYSMEM_RANGES 1
-#endif
-#endif /* _PARISC_MMZONE_H */
+#endif /* !_PARISC_MMZONE_H */
+++ /dev/null
-#ifndef _ASM_MAX_NUMNODES_H
-#define _ASM_MAX_NUMNODES_H
-
-#include <linux/config.h>
-
-/* Max 8 Nodes */
-#define NODES_SHIFT 3
-
-#endif /* _ASM_MAX_NUMNODES_H */
#else
#define pte_flags(x) ((x).flags)
#endif
-
-/* These do not work lvalues, so make sure we don't use them as such. */
-#define pmd_val(x) ((x).pmd + 0)
-#define pgd_val(x) ((x).pgd + 0)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
-#define __pmd_val_set(x,n) (x).pmd = (n)
-#define __pgd_val_set(x,n) (x).pgd = (n)
-
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
return order;
}
+#ifdef __LP64__
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+#else
+#define MAX_PHYSMEM_RANGES 1 /* First range is only range that fits in 32 bits */
+#endif
+
typedef struct __physmem_range {
unsigned long start_pfn;
unsigned long pages; /* PAGE_SIZE pages */
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#ifndef CONFIG_DISCONTIGMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif /* CONFIG_DISCONTIGMEM */
-
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#ifndef CONFIG_DISCONTIGMEM
+#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _PARISC_PAGE_H */
*/
#define PCI_MAX_BUSSES 256
+/* [soapbox on]
+** Who the hell can develop stuff without ASSERT or VASSERT?
+** No one understands all the modules across all platforms.
+** For linux add another dimension - processor architectures.
+**
+** This should be a standard/global macro used liberally
+** in all code. Every respectable engineer I know in HP
+** would support this argument. - grant
+** [soapbox off]
+*/
+#ifdef PCI_DEBUG
+#define ASSERT(expr) \
+ if(!(expr)) { \
+ printk("\n%s:%d: Assertion " #expr " failed!\n", \
+ __FILE__, __LINE__); \
+ panic(#expr); \
+ }
+#else
+#define ASSERT(expr)
+#endif
+
+
/*
** pci_hba_data (aka H2P_OBJECT in HP/UX)
**
#define OSTAT_RUN 6
#define OSTAT_ON 7
+#ifdef __LP64__
+/* PDC PAT CELL */
+#define PDC_PAT_CELL 64L /* Interface for gaining and
+ * manipulating cell state within PD */
+#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
+#define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */
+#define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */
+#define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */
+#define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */
+#define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */
+#define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */
+#define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */
+#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size*/
+#define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */
+#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
+#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
+#define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */
+#define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Checking */
+
+/*
+** Arg to PDC_PAT_CELL_MODULE memaddr[4]
+**
+** Addresses on the Merced Bus != all Runway Bus addresses.
+** This is intended for programming SBA/LBA chips range registers.
+*/
+#define IO_VIEW 0UL
+#define PA_VIEW 1UL
+
+/* PDC_PAT_CELL_MODULE entity type values */
+#define PAT_ENTITY_CA 0 /* central agent */
+#define PAT_ENTITY_PROC 1 /* processor */
+#define PAT_ENTITY_MEM 2 /* memory controller */
+#define PAT_ENTITY_SBA 3 /* system bus adapter */
+#define PAT_ENTITY_LBA 4 /* local bus adapter */
+#define PAT_ENTITY_PBC 5 /* processor bus converter */
+#define PAT_ENTITY_XBC 6 /* crossbar fabric connect */
+#define PAT_ENTITY_RC 7 /* fabric interconnect */
+
+/* PDC_PAT_CELL_MODULE address range type values */
+#define PAT_PBNUM 0 /* PCI Bus Number */
+#define PAT_LMMIO 1 /* < 4G MMIO Space */
+#define PAT_GMMIO 2 /* > 4G MMIO Space */
+#define PAT_NPIOP 3 /* Non Postable I/O Port Space */
+#define PAT_PIOP 4 /* Postable I/O Port Space */
+#define PAT_AHPA 5 /* Additional HPA Space */
+#define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */
+#define PAT_GNIP 7 /* GNI Reserved Space */
+
+
+/* PDC PAT CHASSIS LOG */
+#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
+ ** progress functions */
+#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
+#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
+
+
+/* PDC PAT CPU */
+#define PDC_PAT_CPU 67L /* Interface to CPU configuration
+ * within the protection domain */
+#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
+#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
+#define PDC_PAT_CPU_ADD 2L /* Add CPU */
+#define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */
+#define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */
+#define PDC_PAT_CPU_STOP 5L /* Stop CPU */
+#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
+#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
+#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
+#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
+#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
+ * Cleansing Mode */
+
+/* PDC PAT EVENT */
+#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
+#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
+#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
+#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
+#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
+#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args*/
+
+/* PDC PAT HPMC */
+#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
+ ** loop, and wait for wake up from
+ ** Monarch Processor */
+#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
+#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
+ * will use to interrupt OS during machine
+ * check rendezvous */
+
+/* parameters for PDC_PAT_HPMC_SET_PARAMS */
+#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
+#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
+
+/* PDC PAT IO */
+#define PDC_PAT_IO 71L /* On-line services for I/O modules */
+#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info */
+#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
+ /* Hardware Path */
+#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from
+ * Physical Location */
+#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
+ * Address from Hardware Path */
+#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path
+ * from PCI Configuration Address */
+#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */
+#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table
+ * Size */
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */
+#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */
+#define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */
+#define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */
+#define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */
+#define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in
+ * Cabinet */
+#define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */
+ /* Bay Slots in Cabinet */
+#define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */
+#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
+#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
+
+/* PDC PAT MEM */
+#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
+#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
+#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
+#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
+#define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */
+#define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */
+#define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */
+#define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */
+#define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */
+#define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */
+#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
+#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From*/
+ /* Memory Address */
+#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
+#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
+#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
+#define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/
+#define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/
+#define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/
+#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
+#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
+
+/* PDC PAT NVOLATILE */
+#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory*/
+#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
+#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
+#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
+
+/* PDC PAT PD */
+#define PDC_PAT_PD 74L /* Protection Domain Info */
+#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
+
+/* PDC_PAT_PD_GET_ADDR_MAP entry types */
+#define PAT_MEMORY_DESCRIPTOR 1
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory types */
+#define PAT_MEMTYPE_MEMORY 0
+#define PAT_MEMTYPE_FIRMWARE 4
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
+#define PAT_MEMUSE_GENERAL 0
+#define PAT_MEMUSE_GI 128
+#define PAT_MEMUSE_GNI 129
+#endif /* __LP64__ */
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
+#ifdef CONFIG_PARISC64
+#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
+#else
+#define is_pdc_pat() (0)
+#endif
+
struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
unsigned long actcnt; /* actual number of bytes returned */
unsigned long maxcnt; /* maximum number of bytes that could be returned */
#ifdef __LP64__
cc_padW:32,
#endif
- cc_alias: 4, /* alias boundaries for virtual addresses */
+ cc_alias:4, /* alias boundaries for virtual addresses */
cc_block: 4, /* to determine most efficient stride */
cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
- cc_shift: 2, /* how much to shift cc_block left */
+ cc_pad0 : 2, /* reserved */
cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
unsigned long tod_usec;
};
+#ifdef __LP64__
+struct pdc_pat_cell_num {
+ unsigned long cell_num;
+ unsigned long cell_loc;
+};
+
+struct pdc_pat_cpu_num {
+ unsigned long cpu_num;
+ unsigned long cpu_loc;
+};
+
+struct pdc_pat_pd_addr_map_entry {
+ unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
+ unsigned char reserve1[5];
+ unsigned char memory_type;
+ unsigned char memory_usage;
+ unsigned long paddr;
+ unsigned int pages; /* Length in 4K pages */
+ unsigned int reserve2;
+ unsigned long cell_map;
+};
+
+/* FIXME: mod[508] should really be a union of the various mod components */
+struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
+ unsigned long cba; /* function 0 configuration space address */
+ unsigned long mod_info; /* module information */
+ unsigned long mod_location; /* physical location of the module */
+ struct hardware_path mod_path; /* hardware path */
+ unsigned long mod[508]; /* PAT cell module components */
+};
+
+typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
+#endif /* __LP64__ */
+
/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
struct pdc_hpmc_pim_11 { /* PDC_PIM */
unsigned long inptr, unsigned long outputr,
unsigned long glob_cfg);
+#ifdef __LP64__
+int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
+int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
+int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
+ unsigned long view_type, void *mem_addr);
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
+int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
+int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
+ unsigned long count, unsigned long offset);
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
+* ----------------------------------------------------------
+* Bit 0 to 51 - conf_base_addr
+* Bit 52 to 62 - reserved
+* Bit 63 - endianess bit
+********************************************************************/
+#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
+* ----------------------------------------------------
+* Bit 0 to 7 - entity type
+* 0 = central agent, 1 = processor,
+* 2 = memory controller, 3 = system bus adapter,
+* 4 = local bus adapter, 5 = processor bus converter,
+* 6 = crossbar fabric connect, 7 = fabric interconnect,
+* 8 to 254 reserved, 255 = unknown.
+* Bit 8 to 15 - DVI
+* Bit 16 to 23 - IOC functions
+* Bit 24 to 39 - reserved
+* Bit 40 to 63 - mod_pages
+* number of 4K pages a module occupies starting at conf_base_addr
+********************************************************************/
+#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
+#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
+#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
+#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
+
+#else /* !__LP64__ */
+/* No PAT support for 32-bit kernels...sorry */
+#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
+#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
+#endif /* !__LP64__ */
+
extern void pdc_init(void);
#endif /* __ASSEMBLY__ */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>)
- * Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org>
+ * Copyright (c) Hewlett Packard (Paul Bame <bame@puffin.external.hp.com>)
+ * Copyright 2000 (c) Grant Grundler <grundler@puffin.external.hp.com>
*/
+/* PDC PAT CELL */
#define PDC_PAT_CELL 64L /* Interface for gaining and
* manipulatin g cell state within PD */
#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
#define PAT_GNIP 7 /* GNI Reserved Space */
+/* PDC PAT CHASSIS LOG */
-/* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */
-
-#define PDC_PAT_CHASSIS_LOG 65L
+#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
+ ** progress functions */
#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
+/* PDC PAT CPU */
-/* PDC PAT CPU -- CPU configuration within the protection domain */
-
-#define PDC_PAT_CPU 67L
+#define PDC_PAT_CPU 67L /* Interface to CPU configuration
+ * within the protection domain */
#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
#define PDC_PAT_CPU_ADD 2L /* Add CPU */
#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
* Cleansing Mode */
-/* PDC PAT EVENT -- Platform Events */
+/* PDC PAT EVENT */
-#define PDC_PAT_EVENT 68L
+#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args */
-/* PDC PAT HPMC -- Cause processor to go into spin loop, and wait
- * for wake up from Monarch Processor.
- */
+/* PDC PAT HPMC */
-#define PDC_PAT_HPMC 70L
+#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
+ ** loop, and wait for wake up from
+ ** Monarch Processor */
#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
- * will use to interrupt OS during
- * machine check rendezvous */
+ * will use to interrupt OS during machine
+ * check rendezvous */
/* parameters for PDC_PAT_HPMC_SET_PARAMS: */
#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
+/* PDC PAT IO */
-/* PDC PAT IO -- On-line services for I/O modules */
-
-#define PDC_PAT_IO 71L
+#define PDC_PAT_IO 71L /* On-line services for I/O modules */
#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info*/
#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
/* Hardware Path */
#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
+/* PDC PAT MEM */
-/* PDC PAT MEM -- Manage memory page deallocation */
-
-#define PDC_PAT_MEM 72L
+#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
+/* PDC PAT NVOLATILE */
-/* PDC PAT NVOLATILE -- Access Non-Volatile Memory */
-
-#define PDC_PAT_NVOLATILE 73L
-#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
-#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
-#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
-#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
-
-/* PDC PAT PD */
-#define PDC_PAT_PD 74L /* Protection Domain Info */
-#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
-
-/* PDC_PAT_PD_GET_ADDR_MAP entry types */
-#define PAT_MEMORY_DESCRIPTOR 1
-
-/* PDC_PAT_PD_GET_ADDR_MAP memory types */
-#define PAT_MEMTYPE_MEMORY 0
-#define PAT_MEMTYPE_FIRMWARE 4
-
-/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
-#define PAT_MEMUSE_GENERAL 0
-#define PAT_MEMUSE_GI 128
-#define PAT_MEMUSE_GNI 129
-
+#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
+#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
+#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
#ifndef __ASSEMBLY__
#include <linux/types.h>
-#ifdef CONFIG_PARISC64
-#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
-extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
-extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
-#else /* ! CONFIG_PARISC64 */
-/* No PAT support for 32-bit kernels...sorry */
-#define is_pdc_pat() (0)
-#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
-#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
-#endif /* ! CONFIG_PARISC64 */
-
-
-struct pdc_pat_cell_num {
- unsigned long cell_num;
- unsigned long cell_loc;
-};
-
-struct pdc_pat_cpu_num {
- unsigned long cpu_num;
- unsigned long cpu_loc;
-};
-
-struct pdc_pat_pd_addr_map_entry {
- unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
- unsigned char reserve1[5];
- unsigned char memory_type;
- unsigned char memory_usage;
- unsigned long paddr;
- unsigned int pages; /* Length in 4K pages */
- unsigned int reserve2;
- unsigned long cell_map;
-};
-
-/********************************************************************
-* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
-* ----------------------------------------------------------
-* Bit 0 to 51 - conf_base_addr
-* Bit 52 to 62 - reserved
-* Bit 63 - endianess bit
-********************************************************************/
-#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
-
-/********************************************************************
-* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
-* ----------------------------------------------------
-* Bit 0 to 7 - entity type
-* 0 = central agent, 1 = processor,
-* 2 = memory controller, 3 = system bus adapter,
-* 4 = local bus adapter, 5 = processor bus converter,
-* 6 = crossbar fabric connect, 7 = fabric interconnect,
-* 8 to 254 reserved, 255 = unknown.
-* Bit 8 to 15 - DVI
-* Bit 16 to 23 - IOC functions
-* Bit 24 to 39 - reserved
-* Bit 40 to 63 - mod_pages
-* number of 4K pages a module occupies starting at conf_base_addr
-********************************************************************/
-#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
-#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
-#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
-#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
-
-
/*
** PDC_PAT_CELL_GET_INFO return block
*/
/* FIXME: mod[508] should really be a union of the various mod components */
struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
- unsigned long cba; /* func 0 cfg space address */
- unsigned long mod_info; /* module information */
- unsigned long mod_location; /* physical location of the module */
- struct hardware_path mod_path; /* module path (device path - layers) */
+ unsigned long cba; /* function 0 configuration space address */
+ unsigned long mod_info; /* module information */
+ unsigned long mod_location; /* physical location of the module */
+ unsigned long mod_path; /* module path (device path - layers) */
unsigned long mod[508]; /* PAT cell module components */
} __attribute__((aligned(8))) ;
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
-extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
-extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
-extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
+extern int pdc_pat_cell_get_number(void *);
+extern int pdc_pat_cell_module(void *, unsigned long, unsigned long, unsigned long, void *);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
-extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
-
-extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
-
-
-extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
-extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
-
-
/* Flag to indicate this is a PAT box...don't use this unless you
** really have to...it might go away some day.
*/
+#ifdef __LP64__
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
+#endif
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
* kernel for machines with under 4GB of memory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|GFP_DMA,
PGD_ALLOC_ORDER);
pgd_t *actual_pgd = pgd;
#ifdef __LP64__
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
- * with PxD_FLAG_ATTACHED as a signal to the system that this
+ * with _PAGE_GATEWAY as a signal to the system that this
* pmd entry may not be cleared. */
- __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
+ pgd_val(*actual_pgd) = (_PAGE_TABLE | _PAGE_GATEWAY) +
+ (__u32)__pa((unsigned long)pgd);
/* The first pmd entry also is marked with _PAGE_GATEWAY as
* a signal that this pmd may not be freed */
- __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
+ pgd_val(*pgd) = _PAGE_GATEWAY;
#endif
}
return actual_pgd;
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
- __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
- (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ pgd_val(*pgd) = _PAGE_TABLE + (__u32)__pa((unsigned long)pmd);
}
+/* NOTE: pmd must be in ZONE_DMA (<4GB) so the pgd pointer can be
+ * housed in 32 bits */
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT|GFP_DMA,
PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
static inline void pmd_free(pmd_t *pmd)
{
#ifdef __LP64__
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ if(pmd_val(*pmd) & _PAGE_GATEWAY)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
return;
#ifdef __LP64__
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+ if(pmd_val(*pmd) & _PAGE_GATEWAY)
+ pmd_val(*pmd) = (_PAGE_TABLE | _PAGE_GATEWAY)
+ + (__u32)__pa((unsigned long)pte);
else
#endif
- __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+ pmd_val(*pmd) = _PAGE_TABLE + (__u32)__pa((unsigned long)pte);
}
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+/* NOTE: pte must be in ZONE_DMA (<4GB) so that the pmd pointer
+ * can be housed in 32 bits */
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|GFP_DMA);
if (likely(page != NULL))
clear_page(page_address(page));
return page;
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|GFP_DMA);
if (likely(pte != NULL))
clear_page(pte);
return pte;
/* This is the size of the initially mapped kernel memory (i.e. currently
* 0 to 1<<23 == 8MB */
-#ifdef CONFIG_64BIT
-#define KERNEL_INITIAL_ORDER 24
-#else
#define KERNEL_INITIAL_ORDER 23
-#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
-#ifdef CONFIG_64BIT
+#ifdef __LP64__
#define PT_NLEVELS 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
-/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
- * are page-aligned, we don't care about the PAGE_OFFSET bits, except
- * for a few meta-information bits, so we shift the address to be
- * able to effectively address 40-bits of physical address space. */
-#define _PxD_PRESENT_BIT 31
-#define _PxD_ATTACHED_BIT 30
-#define _PxD_VALID_BIT 29
-
-#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
-#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
-#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
-#define PxD_FLAG_MASK (0xf)
-#define PxD_FLAG_SHIFT (4)
-#define PxD_VALUE_SHIFT (8)
-
#ifndef __ASSEMBLY__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
-#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
-#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
-#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-
-#ifdef CONFIG_64BIT
+#ifdef __LP64__
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
-#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
+#define pmd_none(x) (!pmd_val(x) || pmd_val(x) == _PAGE_GATEWAY)
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE && (pmd_val(x) & ~PAGE_MASK) != (_PAGE_TABLE | _PAGE_GATEWAY))
#else
#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
#endif
-#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
-#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
-#ifdef CONFIG_64BIT
- if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+#ifdef __LP64__
+ if(pmd_val(*pmd) & _PAGE_GATEWAY)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
- __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
+ pmd_val(*pmd) = _PAGE_GATEWAY;
else
#endif
- __pmd_val_set(*pmd, 0);
+ pmd_val(*pmd) = 0;
}
#if PT_NLEVELS == 3
-#define pgd_page(pgd) ((unsigned long) __va(pgd_address(pgd)))
+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
/* For 64 bit we have three level tables */
#define pgd_none(x) (!pgd_val(x))
-#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
-#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
+#ifdef __LP64__
+#define pgd_bad(x) ((pgd_val(x) & ~PAGE_MASK) != _PAGE_TABLE && (pgd_val(x) & ~PAGE_MASK) != (_PAGE_TABLE | _PAGE_GATEWAY))
+#else
+#define pgd_bad(x) ((pgd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
+#endif
+#define pgd_present(x) (pgd_val(x) & _PAGE_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
-#ifdef CONFIG_64BIT
- if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
+#ifdef __LP64__
+ if(pgd_val(*pgd) & _PAGE_GATEWAY)
/* This is the permanent pmd attached to the pgd; cannot
* free it */
return;
#endif
- __pgd_val_set(*pgd, 0);
+ pgd_val(*pgd) = 0;
}
#else
/*
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
-#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+#ifdef CONFIG_DISCONTIGMEM
+#define pte_page(x) (phys_to_page(pte_val(x)))
+#else
+#define pte_page(x) (mem_map+(pte_val(x) >> PAGE_SHIFT))
+#endif
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_address(pmd)))
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
+#define __pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#ifdef CONFIG_SMP
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
+ return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), ptep);
#else
pte_t pte = *ptep;
if (!pte_young(pte))
#ifdef CONFIG_SMP
if (!pte_dirty(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
+ return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
#else
pte_t pte = *ptep;
if (!pte_dirty(pte))
#endif
}
+#ifdef CONFIG_SMP
extern spinlock_t pa_dbit_lock;
+#else
+static int pa_dbit_lock; /* dummy to keep the compilers happy */
+#endif
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
static inline void ptep_mkdirty(pte_t *ptep)
{
#ifdef CONFIG_SMP
- set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
+ set_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
extern unsigned long cpu_present_mask;
#define smp_processor_id() (current_thread_info()->cpu)
+#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#endif /* CONFIG_SMP */
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
-static inline int __cpu_disable (void) {
- return 0;
-}
-static inline void __cpu_die (unsigned int cpu) {
- while(1)
- ;
-}
-extern int __cpu_up (unsigned int cpu);
#endif /* __ASM_SMP_H */
* the semaphore address has to be 16-byte aligned.
*/
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { { 1, 1, 1, 1 } }
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
return __ldcw(a) != 0;
}
-#define spin_lock_own(LOCK, LOCATION) ((void)0)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-#define SPINLOCK_MAGIC 0x1D244B3C
-
-#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define CHECK_LOCK(x) \
- do { \
- if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
- printk(KERN_ERR "%s:%d: spin_is_locked" \
- " on uninitialized spinlock %p.\n", \
- __FILE__, __LINE__, (x)); \
- } \
- } while(0)
-
-#define spin_is_locked(x) \
- ({ \
- CHECK_LOCK(x); \
- volatile unsigned int *a = __ldcw_align(x); \
- if (unlikely((*a == 0) && (x)->babble)) { \
- (x)->babble--; \
- printk("KERN_WARNING \
- %s:%d: spin_is_locked(%s/%p) already" \
- " locked by %s:%d in %s at %p(%d)\n", \
- __FILE__,__LINE__, (x)->module, (x), \
- (x)->bfile, (x)->bline, (x)->task->comm,\
- (x)->previous, (x)->oncpu); \
- } \
- *a == 0; \
- })
-
-#define spin_unlock_wait(x) \
- do { \
- CHECK_LOCK(x); \
- volatile unsigned int *a = __ldcw_align(x); \
- if (unlikely((*a == 0) && (x)->babble)) { \
- (x)->babble--; \
- printk("KERN_WARNING \
- %s:%d: spin_unlock_wait(%s/%p)" \
- " owned by %s:%d in %s at %p(%d)\n", \
- __FILE__,__LINE__, (x)->module, (x), \
- (x)->bfile, (x)->bline, (x)->task->comm,\
- (x)->previous, (x)->oncpu); \
- } \
- barrier(); \
- } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
-
-extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
-extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
-extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
-#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
-#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
-
-/* just in case we need it */
-#define spin_lock_own(LOCK, LOCATION) \
-do { \
- volatile unsigned int *a = __ldcw_align(LOCK); \
- if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
- printk("KERN_WARNING \
- %s: called on %d from %p but lock %s on %d\n", \
- LOCATION, smp_processor_id(), \
- __builtin_return_address(0), \
- (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
-} while (0)
-
-#endif /* !(CONFIG_DEBUG_SPINLOCK) */
-
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
volatile int counter;
} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
+#define RW_LOCK_UNLOCKED (rwlock_t) { { { 1, 1, 1, 1 } }, 0 }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
-#else
static __inline__ void _raw_read_lock(rwlock_t *rw)
{
unsigned long flags;
_raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
-#endif /* CONFIG_DEBUG_RWLOCK */
static __inline__ void _raw_read_unlock(rwlock_t *rw)
{
* writers) in interrupt handlers someone fucked up and we'd dead-lock
* sooner or later anyway. prumpf */
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
-#else
static __inline__ void _raw_write_lock(rwlock_t *rw)
{
retry:
/* got it. now leave without unlocking */
rw->counter = -1; /* remember we are locked */
}
-#endif /* CONFIG_DEBUG_RWLOCK */
/* write_unlock is absolutely trivial - we don't have to wait for anything */
typedef struct {
volatile unsigned int lock[4];
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned long magic;
- volatile unsigned int babble;
- const char *module;
- char *bfile;
- int bline;
- int oncpu;
- void *previous;
- struct task_struct * task;
-#endif
} spinlock_t;
#define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain;/* execution domain */
- unsigned long flags; /* thread_info flags (see TIF_*) */
- mm_segment_t addr_limit; /* user-level address space limit */
+ __u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
- __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+ mm_segment_t addr_limit; /* user-level address space limit */
struct restart_block restart_block;
+ __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
};
#define INIT_THREAD_INFO(tsk) \
return sys_close(fd);
}
-static inline void _exit(int exitcode)
+static inline int _exit(int exitcode)
{
- sys_exit(exitcode);
+ return sys_exit(exitcode);
}
static inline pid_t waitpid(pid_t pid, int *wait_stat, int options)
};
struct unwind_frame_info {
+ unsigned long sp;
+ unsigned long ip;
struct task_struct *t;
/* Eventually we would like to be able to get at any of the registers
available; but for now we only try to get the sp and ip for each
frame */
/* struct pt_regs regs; */
- unsigned long sp, ip, rp;
unsigned long prev_sp, prev_ip;
};
void * unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
- void *start, void *end);
+ const void *start, const void *end);
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
- unsigned long sp, unsigned long ip, unsigned long rp);
+ struct pt_regs *regs);
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t);
-void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs);
int unwind_once(struct unwind_frame_info *info);
int unwind_to_user(struct unwind_frame_info *info);
int *src_err, int *dst_err);
#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
- csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
+ csum_partial_copy_generic((src), (dst), (len), (sum), (errp), 0)
/* FIXME: this needs to be written to really do no check -- Cort */
#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+ csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0)
/*
* turns a 32-bit partial checksum (e.g. from csum_partial) into a
#define CPM_DATAONLY_SIZE ((uint)0x0700)
#define CPM_DP_NOSPACE ((uint)0x7fffffff)
-static inline long IS_DPERR(const uint offset)
-{
- return (uint)offset > (uint)-1000L;
-}
-
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm8xx_t *cpmp; /* Pointer to comm processor */
-extern uint cpm_dpalloc(uint size, uint align);
-extern int cpm_dpfree(uint offset);
-extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align);
-extern void cpm_dpdump(void);
-extern void *cpm_dpram_addr(uint offset);
-extern void cpm_setbrg(uint brg, uint rate);
-
+extern void *m8xx_cpm_dpalloc(int size);
+extern int m8xx_cpm_dpfree(void *addr);
+extern void *m8xx_cpm_dpalloc_fixed(void *addr, int size);
+extern void m8xx_cpm_dpdump(void);
+extern int m8xx_cpm_dpram_offset(void *addr);
+extern void *m8xx_cpm_dpram_addr(int offset);
uint m8xx_cpm_hostalloc(uint size);
+void m8xx_cpm_setbrg(uint brg, uint rate);
/* Buffer descriptors used by many of the CPM protocols.
*/
*/
#define NUM_CPM_HOST_PAGES 2
-static inline long IS_DPERR(const uint offset)
-{
- return (uint)offset > (uint)-1000L;
-}
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm_cpm2_t *cpmp; /* Pointer to comm processor */
-extern uint cpm_dpalloc(uint size, uint align);
-extern int cpm_dpfree(uint offset);
-extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align);
-extern void cpm_dpdump(void);
-extern void *cpm_dpram_addr(uint offset);
-extern void cpm_setbrg(uint brg, uint rate);
+extern void *cpm2_dpalloc(uint size, uint align);
+extern int cpm2_dpfree(void *addr);
+extern void *cpm2_dpalloc_fixed(void *addr, uint size, uint allign);
+extern void cpm2_dpdump(void);
+extern unsigned int cpm2_dpram_offset(void *addr);
+extern void *cpm2_dpram_addr(int offset);
+extern void cpm2_setbrg(uint brg, uint rate);
extern void cpm2_fastbrg(uint brg, uint rate, int div16);
/* Buffer descriptors used by many of the CPM protocols.
#define CPU_FTR_NO_DPM 0x00008000
#define CPU_FTR_HAS_HIGH_BATS 0x00010000
#define CPU_FTR_NEED_COHERENT 0x00020000
-#define CPU_FTR_NO_BTIC 0x00040000
#ifdef __ASSEMBLY__
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 01000000 /* tux hack */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
+++ /dev/null
-/*
- * include/asm-ppc/gt64260.h
- *
- * Prototypes, etc. for the Marvell/Galileo GT64260 host bridge routines.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASMPPC_GT64260_H
-#define __ASMPPC_GT64260_H
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/gt64260_defs.h>
-
-
-extern u32 gt64260_base;
-extern u32 gt64260_irq_base; /* We handle the next 96 IRQs from here */
-extern u32 gt64260_revision;
-extern u8 gt64260_pci_exclude_bridge;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/* IRQs defined by the 64260 */
-#define GT64260_IRQ_MPSC0 40
-#define GT64260_IRQ_MPSC1 42
-#define GT64260_IRQ_SDMA 36
-
-/*
- * Define a default physical memory map to be set up on the bridge.
- * Also define a struct to pass that info from board-specific routines to
- * GT64260 generic set up routines. By passing this info in, the board
- * support developer can modify it at will.
- */
-
-/*
- * This is the default memory map:
- * CPU PCI
- * --- ---
- * PCI 0 I/O: 0xfa000000-0xfaffffff 0x00000000-0x00ffffff
- * PCI 1 I/O: 0xfb000000-0xfbffffff 0x01000000-0x01ffffff
- * PCI 0 MEM: 0x80000000-0x8fffffff 0x80000000-0x8fffffff
- * PCI 1 MEM: 0x90000000-0x9fffffff 0x90000000-0x9fffffff
- */
-
-/* Default physical memory map for the GT64260 bridge */
-
-/*
- * PCI Bus 0 Definitions
- */
-#define GT64260_PCI_0_IO_SIZE 0x01000000U
-#define GT64260_PCI_0_MEM_SIZE 0x10000000U
-
-/* Processor Physical addresses */
-#define GT64260_PCI_0_IO_START_PROC 0xfa000000U
-#define GT64260_PCI_0_IO_END_PROC (GT64260_PCI_0_IO_START_PROC + \
- GT64260_PCI_0_IO_SIZE - 1)
-
-/* PCI 0 addresses */
-#define GT64260_PCI_0_IO_START 0x00000000U
-#define GT64260_PCI_0_IO_END (GT64260_PCI_0_IO_START + \
- GT64260_PCI_0_IO_SIZE - 1)
-
-/* Processor Physical addresses */
-#define GT64260_PCI_0_MEM_START_PROC 0x80000000U
-#define GT64260_PCI_0_MEM_END_PROC (GT64260_PCI_0_MEM_START_PROC + \
- GT64260_PCI_0_MEM_SIZE - 1)
-
-/* PCI 0 addresses */
-#define GT64260_PCI_0_MEM_START 0x80000000U
-#define GT64260_PCI_0_MEM_END (GT64260_PCI_0_MEM_START + \
- GT64260_PCI_0_MEM_SIZE - 1)
-
-/*
- * PCI Bus 1 Definitions
- */
-#define GT64260_PCI_1_IO_SIZE 0x01000000U
-#define GT64260_PCI_1_MEM_SIZE 0x10000000U
-
-/* PCI 1 addresses */
-#define GT64260_PCI_1_IO_START 0x01000000U
-#define GT64260_PCI_1_IO_END (GT64260_PCI_1_IO_START + \
- GT64260_PCI_1_IO_SIZE - 1)
-
-/* Processor Physical addresses */
-#define GT64260_PCI_1_IO_START_PROC 0xfb000000U
-#define GT64260_PCI_1_IO_END_PROC (GT64260_PCI_1_IO_START_PROC + \
- GT64260_PCI_1_IO_SIZE - 1)
-
-/* PCI 1 addresses */
-#define GT64260_PCI_1_MEM_START 0x90000000U
-#define GT64260_PCI_1_MEM_END (GT64260_PCI_1_MEM_START + \
- GT64260_PCI_1_MEM_SIZE - 1)
-
-/* Processor Physical addresses */
-#define GT64260_PCI_1_MEM_START_PROC 0x90000000U
-#define GT64260_PCI_1_MEM_END_PROC (GT64260_PCI_1_MEM_START_PROC + \
- GT64260_PCI_1_MEM_SIZE - 1)
-
-/* Define struct to pass mem-map info into gt64260_common.c code */
-typedef struct {
- struct pci_controller *hose_a;
- struct pci_controller *hose_b;
-
- u32 mem_size;
-
- u32 pci_0_io_start_proc;
- u32 pci_0_io_start_pci;
- u32 pci_0_io_size;
- u32 pci_0_io_swap;
-
- u32 pci_0_mem_start_proc;
- u32 pci_0_mem_start_pci_hi;
- u32 pci_0_mem_start_pci_lo;
- u32 pci_0_mem_size;
- u32 pci_0_mem_swap;
-
- u32 pci_1_io_start_proc;
- u32 pci_1_io_start_pci;
- u32 pci_1_io_size;
- u32 pci_1_io_swap;
-
- u32 pci_1_mem_start_proc;
- u32 pci_1_mem_start_pci_hi;
- u32 pci_1_mem_start_pci_lo;
- u32 pci_1_mem_size;
- u32 pci_1_mem_swap;
-} gt64260_bridge_info_t;
-
-#define GT64260_BRIDGE_INFO_DEFAULT(ip, ms) { \
- (ip)->mem_size = (ms); \
- \
- (ip)->pci_0_io_start_proc = GT64260_PCI_0_IO_START_PROC; \
- (ip)->pci_0_io_start_pci = GT64260_PCI_0_IO_START; \
- (ip)->pci_0_io_size = GT64260_PCI_0_IO_SIZE; \
- (ip)->pci_0_io_swap = GT64260_CPU_PCI_SWAP_NONE; \
- \
- (ip)->pci_0_mem_start_proc = GT64260_PCI_0_MEM_START_PROC; \
- (ip)->pci_0_mem_start_pci_hi = 0x00000000; \
- (ip)->pci_0_mem_start_pci_lo = GT64260_PCI_0_MEM_START; \
- (ip)->pci_0_mem_size = GT64260_PCI_0_MEM_SIZE; \
- (ip)->pci_0_mem_swap = GT64260_CPU_PCI_SWAP_NONE; \
- \
- (ip)->pci_1_io_start_proc = GT64260_PCI_1_IO_START_PROC; \
- (ip)->pci_1_io_start_pci = GT64260_PCI_1_IO_START; \
- (ip)->pci_1_io_size = GT64260_PCI_1_IO_SIZE; \
- (ip)->pci_1_io_swap = GT64260_CPU_PCI_SWAP_NONE; \
- \
- (ip)->pci_1_mem_start_proc = GT64260_PCI_1_MEM_START_PROC; \
- (ip)->pci_1_mem_start_pci_hi = 0x00000000; \
- (ip)->pci_1_mem_start_pci_lo = GT64260_PCI_1_MEM_START; \
- (ip)->pci_1_mem_size = GT64260_PCI_1_MEM_SIZE; \
- (ip)->pci_1_mem_swap = GT64260_CPU_PCI_SWAP_NONE; \
-}
-
-/*
- *****************************************************************************
- *
- * I/O macros to access the 64260's registers
- *
- *****************************************************************************
- */
-
-extern inline uint32_t gt_read(uint32_t offs){
- return (in_le32((volatile uint *)(gt64260_base + offs)));
-}
-extern inline void gt_write(uint32_t offs, uint32_t d){
- out_le32((volatile uint *)(gt64260_base + offs), d);
-}
-
-#if 0 /* paranoid SMP version */
-extern inline void gt_modify(u32 offs, u32 data, u32 mask) \
-{
- uint32_t reg;
- spin_lock(>64260_lock);
- reg = gt_read(offs) & (~mask); /* zero any bits we care about*/
- reg |= data & mask; /* set bits from the data */
- gt_write(offs, reg);
- spin_unlock(>64260_lock);
-}
-#else
-extern inline void gt_modify(uint32_t offs, uint32_t data, uint32_t mask)
-{
- uint32_t reg;
- reg = gt_read(offs) & (~(mask)); /* zero any bits we care about*/
- reg |= (data) & (mask); /* set bits from the data */
- gt_write(offs, reg);
-}
-#endif
-#define gt_set_bits(offs, bits) gt_modify(offs, ~0, bits)
-
-#define gt_clr_bits(offs, bits) gt_modify(offs, 0, bits)
-
-
-/*
- *****************************************************************************
- *
- * Function Prototypes
- *
- *****************************************************************************
- */
-
-int gt64260_find_bridges(u32 phys_base_addr, gt64260_bridge_info_t *info,
- int ((*map_irq)(struct pci_dev *, unsigned char, unsigned char)));
-int gt64260_bridge_init(gt64260_bridge_info_t *info);
-int gt64260_cpu_scs_set_window(u32 window,
- u32 base_addr,
- u32 size);
-int gt64260_cpu_cs_set_window(u32 window,
- u32 base_addr,
- u32 size);
-int gt64260_cpu_boot_set_window(u32 base_addr,
- u32 size);
-int gt64260_cpu_set_pci_io_window(u32 pci_bus,
- u32 cpu_base_addr,
- u32 pci_base_addr,
- u32 size,
- u32 swap);
-int gt64260_cpu_set_pci_mem_window(u32 pci_bus,
- u32 window,
- u32 cpu_base_addr,
- u32 pci_base_addr_hi,
- u32 pci_base_addr_lo,
- u32 size,
- u32 swap_64bit);
-int gt64260_cpu_prot_set_window(u32 window,
- u32 base_addr,
- u32 size,
- u32 access_bits);
-int gt64260_cpu_snoop_set_window(u32 window,
- u32 base_addr,
- u32 size,
- u32 snoop_type);
-void gt64260_cpu_disable_all_windows(void);
-int gt64260_pci_bar_enable(u32 pci_bus, u32 enable_bits);
-int gt64260_pci_slave_scs_set_window(struct pci_controller *hose,
- u32 window,
- u32 pci_base_addr,
- u32 cpu_base_addr,
- u32 size);
-int gt64260_pci_slave_cs_set_window(struct pci_controller *hose,
- u32 window,
- u32 pci_base_addr,
- u32 cpu_base_addr,
- u32 size);
-int gt64260_pci_slave_boot_set_window(struct pci_controller *hose,
- u32 pci_base_addr,
- u32 cpu_base_addr,
- u32 size);
-int gt64260_pci_slave_p2p_mem_set_window(struct pci_controller *hose,
- u32 window,
- u32 pci_base_addr,
- u32 other_bus_base_addr,
- u32 size);
-int gt64260_pci_slave_p2p_io_set_window(struct pci_controller *hose,
- u32 pci_base_addr,
- u32 other_bus_base_addr,
- u32 size);
-int gt64260_pci_slave_dac_scs_set_window(struct pci_controller *hose,
- u32 window,
- u32 pci_base_addr_hi,
- u32 pci_base_addr_lo,
- u32 cpu_base_addr,
- u32 size);
-int gt64260_pci_slave_dac_cs_set_window(struct pci_controller *hose,
- u32 window,
- u32 pci_base_addr_hi,
- u32 pci_base_addr_lo,
- u32 cpu_base_addr,
- u32 size);
-int gt64260_pci_slave_dac_boot_set_window(struct pci_controller *hose,
- u32 pci_base_addr_hi,
- u32 pci_base_addr_lo,
- u32 cpu_base_addr,
- u32 size);
-int gt64260_pci_slave_dac_p2p_mem_set_window(struct pci_controller *hose,
- u32 window,
- u32 pci_base_addr_hi,
- u32 pci_base_addr_lo,
- u32 other_bus_base_addr,
- u32 size);
-int gt64260_pci_acc_cntl_set_window(u32 pci_bus,
- u32 window,
- u32 base_addr_hi,
- u32 base_addr_lo,
- u32 size,
- u32 features);
-int gt64260_pci_snoop_set_window(u32 pci_bus,
- u32 window,
- u32 base_addr_hi,
- u32 base_addr_lo,
- u32 size,
- u32 snoop_type);
-int gt64260_set_base(u32 new_base);
-int gt64260_get_base(u32 *base);
-int gt64260_pci_exclude_device(u8 bus, u8 devfn);
-
-void gt64260_init_irq(void);
-int gt64260_get_irq(struct pt_regs *regs);
-
-void gt64260_mpsc_progress(char *s, unsigned short hex);
-
-#endif /* __ASMPPC_GT64260_H */
+++ /dev/null
-/*
- * include/asm-ppc/gt64260_defs.h
- *
- * Register definitions for the Marvell/Galileo GT64260 host bridge.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASMPPC_GT64260_DEFS_H
-#define __ASMPPC_GT64260_DEFS_H
-
-/*
- * Define a macro to represent the supported version of the 64260.
- */
-#define GT64260 0x01
-#define GT64260A 0x10
-
-/*
- *****************************************************************************
- *
- * CPU Interface Registers
- *
- *****************************************************************************
- */
-
-/* CPU physical address of 64260's registers */
-#define GT64260_INTERNAL_SPACE_DECODE 0x0068
-#define GT64260_INTERNAL_SPACE_SIZE 0x10000
-#define GT64260_INTERNAL_SPACE_DEFAULT_ADDR 0x14000000
-
-/* CPU Memory Controller Window Registers (4 windows) */
-#define GT64260_CPU_SCS_DECODE_WINDOWS 4
-
-#define GT64260_CPU_SCS_DECODE_0_BOT 0x0008
-#define GT64260_CPU_SCS_DECODE_0_TOP 0x0010
-#define GT64260_CPU_SCS_DECODE_1_BOT 0x0208
-#define GT64260_CPU_SCS_DECODE_1_TOP 0x0210
-#define GT64260_CPU_SCS_DECODE_2_BOT 0x0018
-#define GT64260_CPU_SCS_DECODE_2_TOP 0x0020
-#define GT64260_CPU_SCS_DECODE_3_BOT 0x0218
-#define GT64260_CPU_SCS_DECODE_3_TOP 0x0220
-
-/* CPU Device Controller Window Registers (4 windows) */
-#define GT64260_CPU_CS_DECODE_WINDOWS 4
-
-#define GT64260_CPU_CS_DECODE_0_BOT 0x0028
-#define GT64260_CPU_CS_DECODE_0_TOP 0x0030
-#define GT64260_CPU_CS_DECODE_1_BOT 0x0228
-#define GT64260_CPU_CS_DECODE_1_TOP 0x0230
-#define GT64260_CPU_CS_DECODE_2_BOT 0x0248
-#define GT64260_CPU_CS_DECODE_2_TOP 0x0250
-#define GT64260_CPU_CS_DECODE_3_BOT 0x0038
-#define GT64260_CPU_CS_DECODE_3_TOP 0x0040
-
-#define GT64260_CPU_BOOT_CS_DECODE_0_BOT 0x0238
-#define GT64260_CPU_BOOT_CS_DECODE_0_TOP 0x0240
-
-/* CPU Windows to PCI space (2 PCI buses each w/ 1 I/O & 4 MEM windows) */
-#define GT64260_PCI_BUSES 2
-#define GT64260_PCI_IO_WINDOWS_PER_BUS 1
-#define GT64260_PCI_MEM_WINDOWS_PER_BUS 4
-
-#define GT64260_CPU_PCI_SWAP_BYTE 0x00000000
-#define GT64260_CPU_PCI_SWAP_NONE 0x01000000
-#define GT64260_CPU_PCI_SWAP_BYTE_WORD 0x02000000
-#define GT64260_CPU_PCI_SWAP_WORD 0x03000000
-#define GT64260_CPU_PCI_SWAP_MASK 0x07000000
-
-#define GT64260_CPU_PCI_MEM_REQ64 (1<<27)
-
-#define GT64260_CPU_PCI_0_IO_DECODE_BOT 0x0048
-#define GT64260_CPU_PCI_0_IO_DECODE_TOP 0x0050
-#define GT64260_CPU_PCI_0_MEM_0_DECODE_BOT 0x0058
-#define GT64260_CPU_PCI_0_MEM_0_DECODE_TOP 0x0060
-#define GT64260_CPU_PCI_0_MEM_1_DECODE_BOT 0x0080
-#define GT64260_CPU_PCI_0_MEM_1_DECODE_TOP 0x0088
-#define GT64260_CPU_PCI_0_MEM_2_DECODE_BOT 0x0258
-#define GT64260_CPU_PCI_0_MEM_2_DECODE_TOP 0x0260
-#define GT64260_CPU_PCI_0_MEM_3_DECODE_BOT 0x0280
-#define GT64260_CPU_PCI_0_MEM_3_DECODE_TOP 0x0288
-
-#define GT64260_CPU_PCI_0_IO_REMAP 0x00f0
-#define GT64260_CPU_PCI_0_MEM_0_REMAP_LO 0x00f8
-#define GT64260_CPU_PCI_0_MEM_0_REMAP_HI 0x0320
-#define GT64260_CPU_PCI_0_MEM_1_REMAP_LO 0x0100
-#define GT64260_CPU_PCI_0_MEM_1_REMAP_HI 0x0328
-#define GT64260_CPU_PCI_0_MEM_2_REMAP_LO 0x02f8
-#define GT64260_CPU_PCI_0_MEM_2_REMAP_HI 0x0330
-#define GT64260_CPU_PCI_0_MEM_3_REMAP_LO 0x0300
-#define GT64260_CPU_PCI_0_MEM_3_REMAP_HI 0x0338
-
-#define GT64260_CPU_PCI_1_IO_DECODE_BOT 0x0090
-#define GT64260_CPU_PCI_1_IO_DECODE_TOP 0x0098
-#define GT64260_CPU_PCI_1_MEM_0_DECODE_BOT 0x00a0
-#define GT64260_CPU_PCI_1_MEM_0_DECODE_TOP 0x00a8
-#define GT64260_CPU_PCI_1_MEM_1_DECODE_BOT 0x00b0
-#define GT64260_CPU_PCI_1_MEM_1_DECODE_TOP 0x00b8
-#define GT64260_CPU_PCI_1_MEM_2_DECODE_BOT 0x02a0
-#define GT64260_CPU_PCI_1_MEM_2_DECODE_TOP 0x02a8
-#define GT64260_CPU_PCI_1_MEM_3_DECODE_BOT 0x02b0
-#define GT64260_CPU_PCI_1_MEM_3_DECODE_TOP 0x02b8
-
-#define GT64260_CPU_PCI_1_IO_REMAP 0x0108
-#define GT64260_CPU_PCI_1_MEM_0_REMAP_LO 0x0110
-#define GT64260_CPU_PCI_1_MEM_0_REMAP_HI 0x0340
-#define GT64260_CPU_PCI_1_MEM_1_REMAP_LO 0x0118
-#define GT64260_CPU_PCI_1_MEM_1_REMAP_HI 0x0348
-#define GT64260_CPU_PCI_1_MEM_2_REMAP_LO 0x0310
-#define GT64260_CPU_PCI_1_MEM_2_REMAP_HI 0x0350
-#define GT64260_CPU_PCI_1_MEM_3_REMAP_LO 0x0318
-#define GT64260_CPU_PCI_1_MEM_3_REMAP_HI 0x0358
-
-/* CPU Control Registers */
-#define GT64260_CPU_CONFIG 0x0000
-#define GT64260_CPU_MODE 0x0120
-#define GT64260_CPU_MASTER_CNTL 0x0160
-#define GT64260_CPU_XBAR_CNTL_LO 0x0150
-#define GT64260_CPU_XBAR_CNTL_HI 0x0158
-#define GT64260_CPU_XBAR_TO 0x0168
-#define GT64260_CPU_RR_XBAR_CNTL_LO 0x0170
-#define GT64260_CPU_RR_XBAR_CNTL_HI 0x0178
-
-/* CPU Sync Barrier Registers */
-#define GT64260_CPU_SYNC_BARRIER_PCI_0 0x00c0
-#define GT64260_CPU_SYNC_BARRIER_PCI_1 0x00c8
-
-/* CPU Access Protection Registers */
-#define GT64260_CPU_PROT_WINDOWS 8
-
-#define GT64260_CPU_PROT_ACCPROTECT (1<<16)
-#define GT64260_CPU_PROT_WRPROTECT (1<<17)
-#define GT64260_CPU_PROT_CACHEPROTECT (1<<18)
-
-#define GT64260_CPU_PROT_BASE_0 0x0180
-#define GT64260_CPU_PROT_TOP_0 0x0188
-#define GT64260_CPU_PROT_BASE_1 0x0190
-#define GT64260_CPU_PROT_TOP_1 0x0198
-#define GT64260_CPU_PROT_BASE_2 0x01a0
-#define GT64260_CPU_PROT_TOP_2 0x01a8
-#define GT64260_CPU_PROT_BASE_3 0x01b0
-#define GT64260_CPU_PROT_TOP_3 0x01b8
-#define GT64260_CPU_PROT_BASE_4 0x01c0
-#define GT64260_CPU_PROT_TOP_4 0x01c8
-#define GT64260_CPU_PROT_BASE_5 0x01d0
-#define GT64260_CPU_PROT_TOP_5 0x01d8
-#define GT64260_CPU_PROT_BASE_6 0x01e0
-#define GT64260_CPU_PROT_TOP_6 0x01e8
-#define GT64260_CPU_PROT_BASE_7 0x01f0
-#define GT64260_CPU_PROT_TOP_7 0x01f8
-
-/* CPU Snoop Control Registers */
-#define GT64260_CPU_SNOOP_WINDOWS 4
-
-#define GT64260_CPU_SNOOP_NONE 0x00000000
-#define GT64260_CPU_SNOOP_WT 0x00010000
-#define GT64260_CPU_SNOOP_WB 0x00020000
-#define GT64260_CPU_SNOOP_MASK 0x00030000
-#define GT64260_CPU_SNOOP_ALL_BITS GT64260_CPU_SNOOP_MASK
-
-#define GT64260_CPU_SNOOP_BASE_0 0x0380
-#define GT64260_CPU_SNOOP_TOP_0 0x0388
-#define GT64260_CPU_SNOOP_BASE_1 0x0390
-#define GT64260_CPU_SNOOP_TOP_1 0x0398
-#define GT64260_CPU_SNOOP_BASE_2 0x03a0
-#define GT64260_CPU_SNOOP_TOP_2 0x03a8
-#define GT64260_CPU_SNOOP_BASE_3 0x03b0
-#define GT64260_CPU_SNOOP_TOP_3 0x03b8
-
-/* CPU Error Report Registers */
-#define GT64260_CPU_ERR_ADDR_LO 0x0070
-#define GT64260_CPU_ERR_ADDR_HI 0x0078
-#define GT64260_CPU_ERR_DATA_LO 0x0128
-#define GT64260_CPU_ERR_DATA_HI 0x0130
-#define GT64260_CPU_ERR_PARITY 0x0138
-#define GT64260_CPU_ERR_CAUSE 0x0140
-#define GT64260_CPU_ERR_MASK 0x0148
-
-
-/*
- *****************************************************************************
- *
- * SDRAM Cotnroller Registers
- *
- *****************************************************************************
- */
-
-/* SDRAM Config Registers */
-#define GT64260_SDRAM_CONFIG 0x0448
-#define GT64260_SDRAM_OPERATION_MODE 0x0474
-#define GT64260_SDRAM_ADDR_CNTL 0x047c
-#define GT64260_SDRAM_TIMING_PARAMS 0x04b4
-#define GT64260_SDRAM_UMA_CNTL 0x04a4
-#define GT64260_SDRAM_XBAR_CNTL_LO 0x04a8
-#define GT64260_SDRAM_XBAR_CNTL_HI 0x04ac
-#define GT64260_SDRAM_XBAR_CNTL_TO 0x04b0
-
-/* SDRAM Banks Parameters Registers */
-#define GT64260_SDRAM_BANK_PARAMS_0 0x044c
-#define GT64260_SDRAM_BANK_PARAMS_1 0x0450
-#define GT64260_SDRAM_BANK_PARAMS_2 0x0454
-#define GT64260_SDRAM_BANK_PARAMS_3 0x0458
-
-/* SDRAM Error Report Registers */
-#define GT64260_SDRAM_ERR_DATA_LO 0x0484
-#define GT64260_SDRAM_ERR_DATA_HI 0x0480
-#define GT64260_SDRAM_ERR_ADDR 0x0490
-#define GT64260_SDRAM_ERR_ECC_RCVD 0x0488
-#define GT64260_SDRAM_ERR_ECC_CALC 0x048c
-#define GT64260_SDRAM_ERR_ECC_CNTL 0x0494
-#define GT64260_SDRAM_ERR_ECC_ERR_CNT 0x0498
-
-
-/*
- *****************************************************************************
- *
- * Device/BOOT Cotnroller Registers
- *
- *****************************************************************************
- */
-
-/* Device Control Registers */
-#define GT64260_DEV_BANK_PARAMS_0 0x045c
-#define GT64260_DEV_BANK_PARAMS_1 0x0460
-#define GT64260_DEV_BANK_PARAMS_2 0x0464
-#define GT64260_DEV_BANK_PARAMS_3 0x0468
-#define GT64260_DEV_BOOT_PARAMS 0x046c
-#define GT64260_DEV_IF_CNTL 0x04c0
-#define GT64260_DEV_IF_XBAR_CNTL_LO 0x04c8
-#define GT64260_DEV_IF_XBAR_CNTL_HI 0x04cc
-#define GT64260_DEV_IF_XBAR_CNTL_TO 0x04c4
-
-/* Device Interrupt Registers */
-#define GT64260_DEV_INTR_CAUSE 0x04d0
-#define GT64260_DEV_INTR_MASK 0x04d4
-#define GT64260_DEV_INTR_ERR_ADDR 0x04d8
-
-
-/*
- *****************************************************************************
- *
- * PCI Bridge Interface Registers
- *
- *****************************************************************************
- */
-
-/* PCI Configuration Access Registers */
-#define GT64260_PCI_0_CONFIG_ADDR 0x0cf8
-#define GT64260_PCI_0_CONFIG_DATA 0x0cfc
-#define GT64260_PCI_0_IACK 0x0c34
-
-#define GT64260_PCI_1_CONFIG_ADDR 0x0c78
-#define GT64260_PCI_1_CONFIG_DATA 0x0c7c
-#define GT64260_PCI_1_IACK 0x0cb4
-
-/* PCI Control Registers */
-#define GT64260_PCI_0_CMD 0x0c00
-#define GT64260_PCI_0_MODE 0x0d00
-#define GT64260_PCI_0_TO_RETRY 0x0c04
-#define GT64260_PCI_0_RD_BUF_DISCARD_TIMER 0x0d04
-#define GT64260_PCI_0_MSI_TRIGGER_TIMER 0x0c38
-#define GT64260_PCI_0_ARBITER_CNTL 0x1d00
-#define GT64260_PCI_0_XBAR_CNTL_LO 0x1d08
-#define GT64260_PCI_0_XBAR_CNTL_HI 0x1d0c
-#define GT64260_PCI_0_XBAR_CNTL_TO 0x1d04
-#define GT64260_PCI_0_RD_RESP_XBAR_CNTL_LO 0x1d18
-#define GT64260_PCI_0_RD_RESP_XBAR_CNTL_HI 0x1d1c
-#define GT64260_PCI_0_SYNC_BARRIER 0x1d10
-#define GT64260_PCI_0_P2P_CONFIG 0x1d14
-#define GT64260_PCI_0_P2P_SWAP_CNTL 0x1d54
-
-#define GT64260_PCI_1_CMD 0x0c80
-#define GT64260_PCI_1_MODE 0x0d80
-#define GT64260_PCI_1_TO_RETRY 0x0c84
-#define GT64260_PCI_1_RD_BUF_DISCARD_TIMER 0x0d84
-#define GT64260_PCI_1_MSI_TRIGGER_TIMER 0x0cb8
-#define GT64260_PCI_1_ARBITER_CNTL 0x1d80
-#define GT64260_PCI_1_XBAR_CNTL_LO 0x1d88
-#define GT64260_PCI_1_XBAR_CNTL_HI 0x1d8c
-#define GT64260_PCI_1_XBAR_CNTL_TO 0x1d84
-#define GT64260_PCI_1_RD_RESP_XBAR_CNTL_LO 0x1d98
-#define GT64260_PCI_1_RD_RESP_XBAR_CNTL_HI 0x1d9c
-#define GT64260_PCI_1_SYNC_BARRIER 0x1d90
-#define GT64260_PCI_1_P2P_CONFIG 0x1d94
-#define GT64260_PCI_1_P2P_SWAP_CNTL 0x1dd4
-
-/* PCI Access Control Regions Registers */
-#define GT64260_PCI_ACC_CNTL_WINDOWS 8
-
-#define GT64260_PCI_ACC_CNTL_PREFETCHEN (1<<12)
-#define GT64260_PCI_ACC_CNTL_DREADEN (1<<13)
-#define GT64260_PCI_ACC_CNTL_RDPREFETCH (1<<16)
-#define GT64260_PCI_ACC_CNTL_RDLINEPREFETCH (1<<17)
-#define GT64260_PCI_ACC_CNTL_RDMULPREFETCH (1<<18)
-#define GT64260_PCI_ACC_CNTL_MBURST_4_WORDS 0x00000000
-#define GT64260_PCI_ACC_CNTL_MBURST_8_WORDS 0x00100000
-#define GT64260_PCI_ACC_CNTL_MBURST_16_WORDS 0x00200000
-#define GT64260_PCI_ACC_CNTL_MBURST_MASK 0x00300000
-#define GT64260_PCI_ACC_CNTL_SWAP_BYTE 0x00000000
-#define GT64260_PCI_ACC_CNTL_SWAP_NONE 0x01000000
-#define GT64260_PCI_ACC_CNTL_SWAP_BYTE_WORD 0x02000000
-#define GT64260_PCI_ACC_CNTL_SWAP_WORD 0x03000000
-#define GT64260_PCI_ACC_CNTL_SWAP_MASK 0x03000000
-#define GT64260_PCI_ACC_CNTL_ACCPROT (1<<28)
-#define GT64260_PCI_ACC_CNTL_WRPROT (1<<29)
-
-#define GT64260_PCI_ACC_CNTL_ALL_BITS (GT64260_PCI_ACC_CNTL_PREFETCHEN | \
- GT64260_PCI_ACC_CNTL_DREADEN | \
- GT64260_PCI_ACC_CNTL_RDPREFETCH | \
- GT64260_PCI_ACC_CNTL_RDLINEPREFETCH |\
- GT64260_PCI_ACC_CNTL_RDMULPREFETCH | \
- GT64260_PCI_ACC_CNTL_MBURST_MASK | \
- GT64260_PCI_ACC_CNTL_SWAP_MASK | \
- GT64260_PCI_ACC_CNTL_ACCPROT| \
- GT64260_PCI_ACC_CNTL_WRPROT)
-
-#define GT64260_PCI_0_ACC_CNTL_0_BASE_LO 0x1e00
-#define GT64260_PCI_0_ACC_CNTL_0_BASE_HI 0x1e04
-#define GT64260_PCI_0_ACC_CNTL_0_TOP 0x1e08
-#define GT64260_PCI_0_ACC_CNTL_1_BASE_LO 0x1e10
-#define GT64260_PCI_0_ACC_CNTL_1_BASE_HI 0x1e14
-#define GT64260_PCI_0_ACC_CNTL_1_TOP 0x1e18
-#define GT64260_PCI_0_ACC_CNTL_2_BASE_LO 0x1e20
-#define GT64260_PCI_0_ACC_CNTL_2_BASE_HI 0x1e24
-#define GT64260_PCI_0_ACC_CNTL_2_TOP 0x1e28
-#define GT64260_PCI_0_ACC_CNTL_3_BASE_LO 0x1e30
-#define GT64260_PCI_0_ACC_CNTL_3_BASE_HI 0x1e34
-#define GT64260_PCI_0_ACC_CNTL_3_TOP 0x1e38
-#define GT64260_PCI_0_ACC_CNTL_4_BASE_LO 0x1e40
-#define GT64260_PCI_0_ACC_CNTL_4_BASE_HI 0x1e44
-#define GT64260_PCI_0_ACC_CNTL_4_TOP 0x1e48
-#define GT64260_PCI_0_ACC_CNTL_5_BASE_LO 0x1e50
-#define GT64260_PCI_0_ACC_CNTL_5_BASE_HI 0x1e54
-#define GT64260_PCI_0_ACC_CNTL_5_TOP 0x1e58
-#define GT64260_PCI_0_ACC_CNTL_6_BASE_LO 0x1e60
-#define GT64260_PCI_0_ACC_CNTL_6_BASE_HI 0x1e64
-#define GT64260_PCI_0_ACC_CNTL_6_TOP 0x1e68
-#define GT64260_PCI_0_ACC_CNTL_7_BASE_LO 0x1e70
-#define GT64260_PCI_0_ACC_CNTL_7_BASE_HI 0x1e74
-#define GT64260_PCI_0_ACC_CNTL_7_TOP 0x1e78
-
-#define GT64260_PCI_1_ACC_CNTL_0_BASE_LO 0x1e80
-#define GT64260_PCI_1_ACC_CNTL_0_BASE_HI 0x1e84
-#define GT64260_PCI_1_ACC_CNTL_0_TOP 0x1e88
-#define GT64260_PCI_1_ACC_CNTL_1_BASE_LO 0x1e90
-#define GT64260_PCI_1_ACC_CNTL_1_BASE_HI 0x1e94
-#define GT64260_PCI_1_ACC_CNTL_1_TOP 0x1e98
-#define GT64260_PCI_1_ACC_CNTL_2_BASE_LO 0x1ea0
-#define GT64260_PCI_1_ACC_CNTL_2_BASE_HI 0x1ea4
-#define GT64260_PCI_1_ACC_CNTL_2_TOP 0x1ea8
-#define GT64260_PCI_1_ACC_CNTL_3_BASE_LO 0x1eb0
-#define GT64260_PCI_1_ACC_CNTL_3_BASE_HI 0x1eb4
-#define GT64260_PCI_1_ACC_CNTL_3_TOP 0x1eb8
-#define GT64260_PCI_1_ACC_CNTL_4_BASE_LO 0x1ec0
-#define GT64260_PCI_1_ACC_CNTL_4_BASE_HI 0x1ec4
-#define GT64260_PCI_1_ACC_CNTL_4_TOP 0x1ec8
-#define GT64260_PCI_1_ACC_CNTL_5_BASE_LO 0x1ed0
-#define GT64260_PCI_1_ACC_CNTL_5_BASE_HI 0x1ed4
-#define GT64260_PCI_1_ACC_CNTL_5_TOP 0x1ed8
-#define GT64260_PCI_1_ACC_CNTL_6_BASE_LO 0x1ee0
-#define GT64260_PCI_1_ACC_CNTL_6_BASE_HI 0x1ee4
-#define GT64260_PCI_1_ACC_CNTL_6_TOP 0x1ee8
-#define GT64260_PCI_1_ACC_CNTL_7_BASE_LO 0x1ef0
-#define GT64260_PCI_1_ACC_CNTL_7_BASE_HI 0x1ef4
-#define GT64260_PCI_1_ACC_CNTL_7_TOP 0x1ef8
-
-/* PCI Snoop Control Registers */
-#define GT64260_PCI_SNOOP_WINDOWS 4
-
-#define GT64260_PCI_SNOOP_NONE 0x00000000
-#define GT64260_PCI_SNOOP_WT 0x00001000
-#define GT64260_PCI_SNOOP_WB 0x00002000
-
-#define GT64260_PCI_0_SNOOP_0_BASE_LO 0x1f00
-#define GT64260_PCI_0_SNOOP_0_BASE_HI 0x1f04
-#define GT64260_PCI_0_SNOOP_0_TOP 0x1f08
-#define GT64260_PCI_0_SNOOP_1_BASE_LO 0x1f10
-#define GT64260_PCI_0_SNOOP_1_BASE_HI 0x1f14
-#define GT64260_PCI_0_SNOOP_1_TOP 0x1f18
-#define GT64260_PCI_0_SNOOP_2_BASE_LO 0x1f20
-#define GT64260_PCI_0_SNOOP_2_BASE_HI 0x1f24
-#define GT64260_PCI_0_SNOOP_2_TOP 0x1f28
-#define GT64260_PCI_0_SNOOP_3_BASE_LO 0x1f30
-#define GT64260_PCI_0_SNOOP_3_BASE_HI 0x1f34
-#define GT64260_PCI_0_SNOOP_3_TOP 0x1f38
-
-#define GT64260_PCI_1_SNOOP_0_BASE_LO 0x1f80
-#define GT64260_PCI_1_SNOOP_0_BASE_HI 0x1f84
-#define GT64260_PCI_1_SNOOP_0_TOP 0x1f88
-#define GT64260_PCI_1_SNOOP_1_BASE_LO 0x1f90
-#define GT64260_PCI_1_SNOOP_1_BASE_HI 0x1f94
-#define GT64260_PCI_1_SNOOP_1_TOP 0x1f98
-#define GT64260_PCI_1_SNOOP_2_BASE_LO 0x1fa0
-#define GT64260_PCI_1_SNOOP_2_BASE_HI 0x1fa4
-#define GT64260_PCI_1_SNOOP_2_TOP 0x1fa8
-#define GT64260_PCI_1_SNOOP_3_BASE_LO 0x1fb0
-#define GT64260_PCI_1_SNOOP_3_BASE_HI 0x1fb4
-#define GT64260_PCI_1_SNOOP_3_TOP 0x1fb8
-
-/* PCI Error Report Registers */
-#define GT64260_PCI_0_ERR_SERR_MASK 0x0c28
-#define GT64260_PCI_0_ERR_ADDR_LO 0x1d40
-#define GT64260_PCI_0_ERR_ADDR_HI 0x1d44
-#define GT64260_PCI_0_ERR_DATA_LO 0x1d48
-#define GT64260_PCI_0_ERR_DATA_HI 0x1d4c
-#define GT64260_PCI_0_ERR_CMD 0x1d50
-#define GT64260_PCI_0_ERR_CAUSE 0x1d58
-#define GT64260_PCI_0_ERR_MASK 0x1d5c
-
-#define GT64260_PCI_1_ERR_SERR_MASK 0x0ca8
-#define GT64260_PCI_1_ERR_ADDR_LO 0x1dc0
-#define GT64260_PCI_1_ERR_ADDR_HI 0x1dc4
-#define GT64260_PCI_1_ERR_DATA_LO 0x1dc8
-#define GT64260_PCI_1_ERR_DATA_HI 0x1dcc
-#define GT64260_PCI_1_ERR_CMD 0x1dd0
-#define GT64260_PCI_1_ERR_CAUSE 0x1dd8
-#define GT64260_PCI_1_ERR_MASK 0x1ddc
-
-/* PCI Slave Address Decoding Registers */
-#define GT64260_PCI_SCS_WINDOWS 4
-#define GT64260_PCI_CS_WINDOWS 4
-#define GT64260_PCI_BOOT_WINDOWS 1
-#define GT64260_PCI_P2P_MEM_WINDOWS 2
-#define GT64260_PCI_P2P_IO_WINDOWS 1
-#define GT64260_PCI_DAC_SCS_WINDOWS 4
-#define GT64260_PCI_DAC_CS_WINDOWS 4
-#define GT64260_PCI_DAC_BOOT_WINDOWS 1
-#define GT64260_PCI_DAC_P2P_MEM_WINDOWS 2
-
-#define GT64260_PCI_0_SLAVE_SCS_0_SIZE 0x0c08
-#define GT64260_PCI_0_SLAVE_SCS_1_SIZE 0x0d08
-#define GT64260_PCI_0_SLAVE_SCS_2_SIZE 0x0c0c
-#define GT64260_PCI_0_SLAVE_SCS_3_SIZE 0x0d0c
-#define GT64260_PCI_0_SLAVE_CS_0_SIZE 0x0c10
-#define GT64260_PCI_0_SLAVE_CS_1_SIZE 0x0d10
-#define GT64260_PCI_0_SLAVE_CS_2_SIZE 0x0d18
-#define GT64260_PCI_0_SLAVE_CS_3_SIZE 0x0c14
-#define GT64260_PCI_0_SLAVE_BOOT_SIZE 0x0d14
-#define GT64260_PCI_0_SLAVE_P2P_MEM_0_SIZE 0x0d1c
-#define GT64260_PCI_0_SLAVE_P2P_MEM_1_SIZE 0x0d20
-#define GT64260_PCI_0_SLAVE_P2P_IO_SIZE 0x0d24
-#define GT64260_PCI_0_SLAVE_CPU_SIZE 0x0d28
-
-#define GT64260_PCI_0_SLAVE_DAC_SCS_0_SIZE 0x0e00
-#define GT64260_PCI_0_SLAVE_DAC_SCS_1_SIZE 0x0e04
-#define GT64260_PCI_0_SLAVE_DAC_SCS_2_SIZE 0x0e08
-#define GT64260_PCI_0_SLAVE_DAC_SCS_3_SIZE 0x0e0c
-#define GT64260_PCI_0_SLAVE_DAC_CS_0_SIZE 0x0e10
-#define GT64260_PCI_0_SLAVE_DAC_CS_1_SIZE 0x0e14
-#define GT64260_PCI_0_SLAVE_DAC_CS_2_SIZE 0x0e18
-#define GT64260_PCI_0_SLAVE_DAC_CS_3_SIZE 0x0e1c
-#define GT64260_PCI_0_SLAVE_DAC_BOOT_SIZE 0x0e20
-#define GT64260_PCI_0_SLAVE_DAC_P2P_MEM_0_SIZE 0x0e24
-#define GT64260_PCI_0_SLAVE_DAC_P2P_MEM_1_SIZE 0x0e28
-#define GT64260_PCI_0_SLAVE_DAC_CPU_SIZE 0x0e2c
-
-#define GT64260_PCI_0_SLAVE_EXP_ROM_SIZE 0x0d2c
-
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_SCS_0 (1<<0)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_SCS_1 (1<<1)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_SCS_2 (1<<2)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_SCS_3 (1<<3)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_CS_0 (1<<4)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_CS_1 (1<<5)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_CS_2 (1<<6)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_CS_3 (1<<7)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_BOOT (1<<8)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_REG_MEM (1<<9)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_REG_IO (1<<10)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_P2P_MEM_0 (1<<11)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_P2P_MEM_1 (1<<12)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_P2P_IO (1<<13)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_CPU (1<<14)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_SCS_0 (1<<15)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_SCS_1 (1<<16)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_SCS_2 (1<<17)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_SCS_3 (1<<18)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_CS_0 (1<<19)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_CS_1 (1<<20)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_CS_2 (1<<21)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_CS_3 (1<<22)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_BOOT (1<<23)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_P2P_MEM_0 (1<<24)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_P2P_MEM_1 (1<<25)
-#define GT64260_PCI_SLAVE_BAR_REG_ENABLES_DAC_CPU (1<<26)
-
-#define GT64260_PCI_0_SLAVE_BAR_REG_ENABLES 0x0c3c
-#define GT64260_PCI_0_SLAVE_SCS_0_REMAP 0x0c48
-#define GT64260_PCI_0_SLAVE_SCS_1_REMAP 0x0d48
-#define GT64260_PCI_0_SLAVE_SCS_2_REMAP 0x0c4c
-#define GT64260_PCI_0_SLAVE_SCS_3_REMAP 0x0d4c
-#define GT64260_PCI_0_SLAVE_CS_0_REMAP 0x0c50
-#define GT64260_PCI_0_SLAVE_CS_1_REMAP 0x0d50
-#define GT64260_PCI_0_SLAVE_CS_2_REMAP 0x0d58
-#define GT64260_PCI_0_SLAVE_CS_3_REMAP 0x0c54
-#define GT64260_PCI_0_SLAVE_BOOT_REMAP 0x0d54
-#define GT64260_PCI_0_SLAVE_P2P_MEM_0_REMAP_LO 0x0d5c
-#define GT64260_PCI_0_SLAVE_P2P_MEM_0_REMAP_HI 0x0d60
-#define GT64260_PCI_0_SLAVE_P2P_MEM_1_REMAP_LO 0x0d64
-#define GT64260_PCI_0_SLAVE_P2P_MEM_1_REMAP_HI 0x0d68
-#define GT64260_PCI_0_SLAVE_P2P_IO_REMAP 0x0d6c
-#define GT64260_PCI_0_SLAVE_CPU_REMAP 0x0d70
-
-#define GT64260_PCI_0_SLAVE_DAC_SCS_0_REMAP 0x0f00
-#define GT64260_PCI_0_SLAVE_DAC_SCS_1_REMAP 0x0f04
-#define GT64260_PCI_0_SLAVE_DAC_SCS_2_REMAP 0x0f08
-#define GT64260_PCI_0_SLAVE_DAC_SCS_3_REMAP 0x0f0c
-#define GT64260_PCI_0_SLAVE_DAC_CS_0_REMAP 0x0f10
-#define GT64260_PCI_0_SLAVE_DAC_CS_1_REMAP 0x0f14
-#define GT64260_PCI_0_SLAVE_DAC_CS_2_REMAP 0x0f18
-#define GT64260_PCI_0_SLAVE_DAC_CS_3_REMAP 0x0f1c
-#define GT64260_PCI_0_SLAVE_DAC_BOOT_REMAP 0x0f20
-#define GT64260_PCI_0_SLAVE_DAC_P2P_MEM_0_REMAP_LO 0x0f24
-#define GT64260_PCI_0_SLAVE_DAC_P2P_MEM_0_REMAP_HI 0x0f28
-#define GT64260_PCI_0_SLAVE_DAC_P2P_MEM_1_REMAP_LO 0x0f2c
-#define GT64260_PCI_0_SLAVE_DAC_P2P_MEM_1_REMAP_HI 0x0f30
-#define GT64260_PCI_0_SLAVE_DAC_CPU_REMAP 0x0f34
-
-#define GT64260_PCI_0_SLAVE_EXP_ROM_REMAP 0x0f38
-#define GT64260_PCI_0_SLAVE_PCI_DECODE_CNTL 0x0d3c
-
-#define GT64260_PCI_1_SLAVE_SCS_0_SIZE 0x0c88
-#define GT64260_PCI_1_SLAVE_SCS_1_SIZE 0x0d88
-#define GT64260_PCI_1_SLAVE_SCS_2_SIZE 0x0c8c
-#define GT64260_PCI_1_SLAVE_SCS_3_SIZE 0x0d8c
-#define GT64260_PCI_1_SLAVE_CS_0_SIZE 0x0c90
-#define GT64260_PCI_1_SLAVE_CS_1_SIZE 0x0d90
-#define GT64260_PCI_1_SLAVE_CS_2_SIZE 0x0d98
-#define GT64260_PCI_1_SLAVE_CS_3_SIZE 0x0c94
-#define GT64260_PCI_1_SLAVE_BOOT_SIZE 0x0d94
-#define GT64260_PCI_1_SLAVE_P2P_MEM_0_SIZE 0x0d9c
-#define GT64260_PCI_1_SLAVE_P2P_MEM_1_SIZE 0x0da0
-#define GT64260_PCI_1_SLAVE_P2P_IO_SIZE 0x0da4
-#define GT64260_PCI_1_SLAVE_CPU_SIZE 0x0da8
-
-#define GT64260_PCI_1_SLAVE_DAC_SCS_0_SIZE 0x0e80
-#define GT64260_PCI_1_SLAVE_DAC_SCS_1_SIZE 0x0e84
-#define GT64260_PCI_1_SLAVE_DAC_SCS_2_SIZE 0x0e88
-#define GT64260_PCI_1_SLAVE_DAC_SCS_3_SIZE 0x0e8c
-#define GT64260_PCI_1_SLAVE_DAC_CS_0_SIZE 0x0e90
-#define GT64260_PCI_1_SLAVE_DAC_CS_1_SIZE 0x0e94
-#define GT64260_PCI_1_SLAVE_DAC_CS_2_SIZE 0x0e98
-#define GT64260_PCI_1_SLAVE_DAC_CS_3_SIZE 0x0e9c
-#define GT64260_PCI_1_SLAVE_DAC_BOOT_SIZE 0x0ea0
-#define GT64260_PCI_1_SLAVE_DAC_P2P_MEM_0_SIZE 0x0ea4
-#define GT64260_PCI_1_SLAVE_DAC_P2P_MEM_1_SIZE 0x0ea8
-#define GT64260_PCI_1_SLAVE_DAC_CPU_SIZE 0x0eac
-
-#define GT64260_PCI_1_SLAVE_EXP_ROM_SIZE 0x0dac
-
-#define GT64260_PCI_1_SLAVE_BAR_REG_ENABLES 0x0cbc
-#define GT64260_PCI_1_SLAVE_SCS_0_REMAP 0x0cc8
-#define GT64260_PCI_1_SLAVE_SCS_1_REMAP 0x0dc8
-#define GT64260_PCI_1_SLAVE_SCS_2_REMAP 0x0ccc
-#define GT64260_PCI_1_SLAVE_SCS_3_REMAP 0x0dcc
-#define GT64260_PCI_1_SLAVE_CS_0_REMAP 0x0cd0
-#define GT64260_PCI_1_SLAVE_CS_1_REMAP 0x0dd0
-#define GT64260_PCI_1_SLAVE_CS_2_REMAP 0x0dd8
-#define GT64260_PCI_1_SLAVE_CS_3_REMAP 0x0cd4
-#define GT64260_PCI_1_SLAVE_BOOT_REMAP 0x0dd4
-#define GT64260_PCI_1_SLAVE_P2P_MEM_0_REMAP_LO 0x0ddc
-#define GT64260_PCI_1_SLAVE_P2P_MEM_0_REMAP_HI 0x0de0
-#define GT64260_PCI_1_SLAVE_P2P_MEM_1_REMAP_LO 0x0de4
-#define GT64260_PCI_1_SLAVE_P2P_MEM_1_REMAP_HI 0x0de8
-#define GT64260_PCI_1_SLAVE_P2P_IO_REMAP 0x0dec
-#define GT64260_PCI_1_SLAVE_CPU_REMAP 0x0df0
-
-#define GT64260_PCI_1_SLAVE_DAC_SCS_0_REMAP 0x0f80
-#define GT64260_PCI_1_SLAVE_DAC_SCS_1_REMAP 0x0f84
-#define GT64260_PCI_1_SLAVE_DAC_SCS_2_REMAP 0x0f88
-#define GT64260_PCI_1_SLAVE_DAC_SCS_3_REMAP 0x0f8c
-#define GT64260_PCI_1_SLAVE_DAC_CS_0_REMAP 0x0f90
-#define GT64260_PCI_1_SLAVE_DAC_CS_1_REMAP 0x0f94
-#define GT64260_PCI_1_SLAVE_DAC_CS_2_REMAP 0x0f98
-#define GT64260_PCI_1_SLAVE_DAC_CS_3_REMAP 0x0f9c
-#define GT64260_PCI_1_SLAVE_DAC_BOOT_REMAP 0x0fa0
-#define GT64260_PCI_1_SLAVE_DAC_P2P_MEM_0_REMAP_LO 0x0fa4
-#define GT64260_PCI_1_SLAVE_DAC_P2P_MEM_0_REMAP_HI 0x0fa8
-#define GT64260_PCI_1_SLAVE_DAC_P2P_MEM_1_REMAP_LO 0x0fac
-#define GT64260_PCI_1_SLAVE_DAC_P2P_MEM_1_REMAP_HI 0x0fb0
-#define GT64260_PCI_1_SLAVE_DAC_CPU_REMAP 0x0fb4
-
-#define GT64260_PCI_1_SLAVE_EXP_ROM_REMAP 0x0fb8
-#define GT64260_PCI_1_SLAVE_PCI_DECODE_CNTL 0x0dbc
-
-
-/*
- *****************************************************************************
- *
- * I2O Controller Interface Registers
- *
- *****************************************************************************
- */
-
-/* FIXME: fill in */
-
-
-
-/*
- *****************************************************************************
- *
- * DMA Controller Interface Registers
- *
- *****************************************************************************
- */
-
-/* FIXME: fill in */
-
-
-/*
- *****************************************************************************
- *
- * Timer/Counter Interface Registers
- *
- *****************************************************************************
- */
-
-/* FIXME: fill in */
-
-
-/*
- *****************************************************************************
- *
- * Communications Controller (Enet, Serial, etc.) Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_ENET_0_CNTL_LO 0xf200
-#define GT64260_ENET_0_CNTL_HI 0xf204
-#define GT64260_ENET_0_RX_BUF_PCI_ADDR_HI 0xf208
-#define GT64260_ENET_0_TX_BUF_PCI_ADDR_HI 0xf20c
-#define GT64260_ENET_0_RX_DESC_ADDR_HI 0xf210
-#define GT64260_ENET_0_TX_DESC_ADDR_HI 0xf214
-#define GT64260_ENET_0_HASH_TAB_PCI_ADDR_HI 0xf218
-#define GT64260_ENET_1_CNTL_LO 0xf220
-#define GT64260_ENET_1_CNTL_HI 0xf224
-#define GT64260_ENET_1_RX_BUF_PCI_ADDR_HI 0xf228
-#define GT64260_ENET_1_TX_BUF_PCI_ADDR_HI 0xf22c
-#define GT64260_ENET_1_RX_DESC_ADDR_HI 0xf230
-#define GT64260_ENET_1_TX_DESC_ADDR_HI 0xf234
-#define GT64260_ENET_1_HASH_TAB_PCI_ADDR_HI 0xf238
-#define GT64260_ENET_2_CNTL_LO 0xf240
-#define GT64260_ENET_2_CNTL_HI 0xf244
-#define GT64260_ENET_2_RX_BUF_PCI_ADDR_HI 0xf248
-#define GT64260_ENET_2_TX_BUF_PCI_ADDR_HI 0xf24c
-#define GT64260_ENET_2_RX_DESC_ADDR_HI 0xf250
-#define GT64260_ENET_2_TX_DESC_ADDR_HI 0xf254
-#define GT64260_ENET_2_HASH_TAB_PCI_ADDR_HI 0xf258
-
-#define GT64260_MPSC_0_CNTL_LO 0xf280
-#define GT64260_MPSC_0_CNTL_HI 0xf284
-#define GT64260_MPSC_0_RX_BUF_PCI_ADDR_HI 0xf288
-#define GT64260_MPSC_0_TX_BUF_PCI_ADDR_HI 0xf28c
-#define GT64260_MPSC_0_RX_DESC_ADDR_HI 0xf290
-#define GT64260_MPSC_0_TX_DESC_ADDR_HI 0xf294
-#define GT64260_MPSC_1_CNTL_LO 0xf2c0
-#define GT64260_MPSC_1_CNTL_HI 0xf2c4
-#define GT64260_MPSC_1_RX_BUF_PCI_ADDR_HI 0xf2c8
-#define GT64260_MPSC_1_TX_BUF_PCI_ADDR_HI 0xf2cc
-#define GT64260_MPSC_1_RX_DESC_ADDR_HI 0xf2d0
-#define GT64260_MPSC_1_TX_DESC_ADDR_HI 0xf2d4
-
-#define GT64260_SER_INIT_PCI_ADDR_HI 0xf320
-#define GT64260_SER_INIT_LAST_DATA 0xf324
-#define GT64260_SER_INIT_CONTROL 0xf328
-#define GT64260_SER_INIT_STATUS 0xf32c
-
-#define GT64260_COMM_ARBITER_CNTL 0xf300
-#define GT64260_COMM_CONFIG 0xb40c
-#define GT64260_COMM_XBAR_TO 0xf304
-#define GT64260_COMM_INTR_CAUSE 0xf310
-#define GT64260_COMM_INTR_MASK 0xf314
-#define GT64260_COMM_ERR_ADDR 0xf318
-
-
-/*
- *****************************************************************************
- *
- * Fast Ethernet Controller Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_ENET_PHY_ADDR 0x2000
-#define GT64260_ENET_ESMIR 0x2010
-
-#define GT64260_ENET_E0PCR 0x2400
-#define GT64260_ENET_E0PCXR 0x2408
-#define GT64260_ENET_E0PCMR 0x2410
-#define GT64260_ENET_E0PSR 0x2418
-#define GT64260_ENET_E0SPR 0x2420
-#define GT64260_ENET_E0HTPR 0x2428
-#define GT64260_ENET_E0FCSAL 0x2430
-#define GT64260_ENET_E0FCSAH 0x2438
-#define GT64260_ENET_E0SDCR 0x2440
-#define GT64260_ENET_E0SDCMR 0x2448
-#define GT64260_ENET_E0ICR 0x2450
-#define GT64260_ENET_E0IMR 0x2458
-#define GT64260_ENET_E0FRDP0 0x2480
-#define GT64260_ENET_E0FRDP1 0x2484
-#define GT64260_ENET_E0FRDP2 0x2488
-#define GT64260_ENET_E0FRDP3 0x248c
-#define GT64260_ENET_E0CRDP0 0x24a0
-#define GT64260_ENET_E0CRDP1 0x24a4
-#define GT64260_ENET_E0CRDP2 0x24a8
-#define GT64260_ENET_E0CRDP3 0x24ac
-#define GT64260_ENET_E0CTDP0 0x24e0
-#define GT64260_ENET_E0CTDP1 0x24e4
-#define GT64260_ENET_0_DSCP2P0L 0x2460
-#define GT64260_ENET_0_DSCP2P0H 0x2464
-#define GT64260_ENET_0_DSCP2P1L 0x2468
-#define GT64260_ENET_0_DSCP2P1H 0x246c
-#define GT64260_ENET_0_VPT2P 0x2470
-#define GT64260_ENET_0_MIB_CTRS 0x2500
-
-#define GT64260_ENET_E1PCR 0x2800
-#define GT64260_ENET_E1PCXR 0x2808
-#define GT64260_ENET_E1PCMR 0x2810
-#define GT64260_ENET_E1PSR 0x2818
-#define GT64260_ENET_E1SPR 0x2820
-#define GT64260_ENET_E1HTPR 0x2828
-#define GT64260_ENET_E1FCSAL 0x2830
-#define GT64260_ENET_E1FCSAH 0x2838
-#define GT64260_ENET_E1SDCR 0x2840
-#define GT64260_ENET_E1SDCMR 0x2848
-#define GT64260_ENET_E1ICR 0x2850
-#define GT64260_ENET_E1IMR 0x2858
-#define GT64260_ENET_E1FRDP0 0x2880
-#define GT64260_ENET_E1FRDP1 0x2884
-#define GT64260_ENET_E1FRDP2 0x2888
-#define GT64260_ENET_E1FRDP3 0x288c
-#define GT64260_ENET_E1CRDP0 0x28a0
-#define GT64260_ENET_E1CRDP1 0x28a4
-#define GT64260_ENET_E1CRDP2 0x28a8
-#define GT64260_ENET_E1CRDP3 0x28ac
-#define GT64260_ENET_E1CTDP0 0x28e0
-#define GT64260_ENET_E1CTDP1 0x28e4
-#define GT64260_ENET_1_DSCP2P0L 0x2860
-#define GT64260_ENET_1_DSCP2P0H 0x2864
-#define GT64260_ENET_1_DSCP2P1L 0x2868
-#define GT64260_ENET_1_DSCP2P1H 0x286c
-#define GT64260_ENET_1_VPT2P 0x2870
-#define GT64260_ENET_1_MIB_CTRS 0x2900
-
-#define GT64260_ENET_E2PCR 0x2c00
-#define GT64260_ENET_E2PCXR 0x2c08
-#define GT64260_ENET_E2PCMR 0x2c10
-#define GT64260_ENET_E2PSR 0x2c18
-#define GT64260_ENET_E2SPR 0x2c20
-#define GT64260_ENET_E2HTPR 0x2c28
-#define GT64260_ENET_E2FCSAL 0x2c30
-#define GT64260_ENET_E2FCSAH 0x2c38
-#define GT64260_ENET_E2SDCR 0x2c40
-#define GT64260_ENET_E2SDCMR 0x2c48
-#define GT64260_ENET_E2ICR 0x2c50
-#define GT64260_ENET_E2IMR 0x2c58
-#define GT64260_ENET_E2FRDP0 0x2c80
-#define GT64260_ENET_E2FRDP1 0x2c84
-#define GT64260_ENET_E2FRDP2 0x2c88
-#define GT64260_ENET_E2FRDP3 0x2c8c
-#define GT64260_ENET_E2CRDP0 0x2ca0
-#define GT64260_ENET_E2CRDP1 0x2ca4
-#define GT64260_ENET_E2CRDP2 0x2ca8
-#define GT64260_ENET_E2CRDP3 0x2cac
-#define GT64260_ENET_E2CTDP0 0x2ce0
-#define GT64260_ENET_E2CTDP1 0x2ce4
-#define GT64260_ENET_2_DSCP2P0L 0x2c60
-#define GT64260_ENET_2_DSCP2P0H 0x2c64
-#define GT64260_ENET_2_DSCP2P1L 0x2c68
-#define GT64260_ENET_2_DSCP2P1H 0x2c6c
-#define GT64260_ENET_2_VPT2P 0x2c70
-#define GT64260_ENET_2_MIB_CTRS 0x2d00
-
-
-/*
- *****************************************************************************
- *
- * Multi-Protocol Serial Controller Interface Registers
- *
- *****************************************************************************
- */
-
-/* Signal Routing */
-#define GT64260_MPSC_MRR 0xb400
-#define GT64260_MPSC_RCRR 0xb404
-#define GT64260_MPSC_TCRR 0xb408
-
-/* Main Configuratino Registers */
-#define GT64260_MPSC_0_MMCRL 0x8000
-#define GT64260_MPSC_0_MMCRH 0x8004
-#define GT64260_MPSC_0_MPCR 0x8008
-#define GT64260_MPSC_0_CHR_1 0x800c
-#define GT64260_MPSC_0_CHR_2 0x8010
-#define GT64260_MPSC_0_CHR_3 0x8014
-#define GT64260_MPSC_0_CHR_4 0x8018
-#define GT64260_MPSC_0_CHR_5 0x801c
-#define GT64260_MPSC_0_CHR_6 0x8020
-#define GT64260_MPSC_0_CHR_7 0x8024
-#define GT64260_MPSC_0_CHR_8 0x8028
-#define GT64260_MPSC_0_CHR_9 0x802c
-#define GT64260_MPSC_0_CHR_10 0x8030
-#define GT64260_MPSC_0_CHR_11 0x8034
-
-#define GT64260_MPSC_1_MMCRL 0x9000
-#define GT64260_MPSC_1_MMCRH 0x9004
-#define GT64260_MPSC_1_MPCR 0x9008
-#define GT64260_MPSC_1_CHR_1 0x900c
-#define GT64260_MPSC_1_CHR_2 0x9010
-#define GT64260_MPSC_1_CHR_3 0x9014
-#define GT64260_MPSC_1_CHR_4 0x9018
-#define GT64260_MPSC_1_CHR_5 0x901c
-#define GT64260_MPSC_1_CHR_6 0x9020
-#define GT64260_MPSC_1_CHR_7 0x9024
-#define GT64260_MPSC_1_CHR_8 0x9028
-#define GT64260_MPSC_1_CHR_9 0x902c
-#define GT64260_MPSC_1_CHR_10 0x9030
-#define GT64260_MPSC_1_CHR_11 0x9034
-
-#define GT64260_MPSC_0_INTR_CAUSE 0xb804
-#define GT64260_MPSC_0_INTR_MASK 0xb884
-#define GT64260_MPSC_1_INTR_CAUSE 0xb80c
-#define GT64260_MPSC_1_INTR_MASK 0xb88c
-
-#define GT64260_MPSC_UART_CR_TEV (1<<1)
-#define GT64260_MPSC_UART_CR_TA (1<<7)
-#define GT64260_MPSC_UART_CR_TTCS (1<<9)
-#define GT64260_MPSC_UART_CR_REV (1<<17)
-#define GT64260_MPSC_UART_CR_RA (1<<23)
-#define GT64260_MPSC_UART_CR_CRD (1<<25)
-#define GT64260_MPSC_UART_CR_EH (1<<31)
-
-#define GT64260_MPSC_UART_ESR_CTS (1<<0)
-#define GT64260_MPSC_UART_ESR_CD (1<<1)
-#define GT64260_MPSC_UART_ESR_TIDLE (1<<3)
-#define GT64260_MPSC_UART_ESR_RHS (1<<5)
-#define GT64260_MPSC_UART_ESR_RLS (1<<7)
-#define GT64260_MPSC_UART_ESR_RLIDL (1<<11)
-
-
-/*
- *****************************************************************************
- *
- * Serial DMA Controller Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_SDMA_0_SDC 0x4000
-#define GT64260_SDMA_0_SDCM 0x4008
-#define GT64260_SDMA_0_RX_DESC 0x4800
-#define GT64260_SDMA_0_RX_BUF_PTR 0x4808
-#define GT64260_SDMA_0_SCRDP 0x4810
-#define GT64260_SDMA_0_TX_DESC 0x4c00
-#define GT64260_SDMA_0_SCTDP 0x4c10
-#define GT64260_SDMA_0_SFTDP 0x4c14
-
-#define GT64260_SDMA_1_SDC 0x6000
-#define GT64260_SDMA_1_SDCM 0x6008
-#define GT64260_SDMA_1_RX_DESC 0x6800
-#define GT64260_SDMA_1_RX_BUF_PTR 0x6808
-#define GT64260_SDMA_1_SCRDP 0x6810
-#define GT64260_SDMA_1_TX_DESC 0x6c00
-#define GT64260_SDMA_1_SCTDP 0x6c10
-#define GT64260_SDMA_1_SFTDP 0x6c14
-
-#define GT64260_SDMA_INTR_CAUSE 0xb800
-#define GT64260_SDMA_INTR_MASK 0xb880
-
-#define GT64260_SDMA_DESC_CMDSTAT_PE (1<<0)
-#define GT64260_SDMA_DESC_CMDSTAT_CDL (1<<1)
-#define GT64260_SDMA_DESC_CMDSTAT_FR (1<<3)
-#define GT64260_SDMA_DESC_CMDSTAT_OR (1<<6)
-#define GT64260_SDMA_DESC_CMDSTAT_BR (1<<9)
-#define GT64260_SDMA_DESC_CMDSTAT_MI (1<<10)
-#define GT64260_SDMA_DESC_CMDSTAT_A (1<<11)
-#define GT64260_SDMA_DESC_CMDSTAT_AM (1<<12)
-#define GT64260_SDMA_DESC_CMDSTAT_CT (1<<13)
-#define GT64260_SDMA_DESC_CMDSTAT_C (1<<14)
-#define GT64260_SDMA_DESC_CMDSTAT_ES (1<<15)
-#define GT64260_SDMA_DESC_CMDSTAT_L (1<<16)
-#define GT64260_SDMA_DESC_CMDSTAT_F (1<<17)
-#define GT64260_SDMA_DESC_CMDSTAT_P (1<<18)
-#define GT64260_SDMA_DESC_CMDSTAT_EI (1<<23)
-#define GT64260_SDMA_DESC_CMDSTAT_O (1<<31)
-
-#define GT64260_SDMA_SDC_RFT (1<<0)
-#define GT64260_SDMA_SDC_SFM (1<<1)
-#define GT64260_SDMA_SDC_BLMR (1<<6)
-#define GT64260_SDMA_SDC_BLMT (1<<7)
-#define GT64260_SDMA_SDC_POVR (1<<8)
-#define GT64260_SDMA_SDC_RIFB (1<<9)
-
-#define GT64260_SDMA_SDCM_ERD (1<<7)
-#define GT64260_SDMA_SDCM_AR (1<<15)
-#define GT64260_SDMA_SDCM_STD (1<<16)
-#define GT64260_SDMA_SDCM_TXD (1<<23)
-#define GT64260_SDMA_SDCM_AT (1<<31)
-
-#define GT64260_SDMA_0_CAUSE_RXBUF (1<<0)
-#define GT64260_SDMA_0_CAUSE_RXERR (1<<1)
-#define GT64260_SDMA_0_CAUSE_TXBUF (1<<2)
-#define GT64260_SDMA_0_CAUSE_TXEND (1<<3)
-#define GT64260_SDMA_1_CAUSE_RXBUF (1<<8)
-#define GT64260_SDMA_1_CAUSE_RXERR (1<<9)
-#define GT64260_SDMA_1_CAUSE_TXBUF (1<<10)
-#define GT64260_SDMA_1_CAUSE_TXEND (1<<11)
-
-
-/*
- *****************************************************************************
- *
- * Baud Rate Generator Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_BRG_0_BCR 0xb200
-#define GT64260_BRG_0_BTR 0xb204
-#define GT64260_BRG_1_BCR 0xb208
-#define GT64260_BRG_1_BTR 0xb20c
-#define GT64260_BRG_2_BCR 0xb210
-#define GT64260_BRG_2_BTR 0xb214
-
-#define GT64260_BRG_INTR_CAUSE 0xb834
-#define GT64260_BRG_INTR_MASK 0xb8b4
-
-
-/*
- *****************************************************************************
- *
- * Watchdog Timer Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_WDT_WDC 0xb410
-#define GT64260_WDT_WDV 0xb414
-
-
-/*
- *****************************************************************************
- *
- * General Purpose Pins Controller Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_GPP_IO_CNTL 0xf100
-#define GT64260_GPP_LEVEL_CNTL 0xf110
-#define GT64260_GPP_VALUE 0xf104
-#define GT64260_GPP_INTR_CAUSE 0xf108
-#define GT64260_GPP_INTR_MASK 0xf10c
-
-
-/*
- *****************************************************************************
- *
- * Multi-Purpose Pins Controller Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_MPP_CNTL_0 0xf000
-#define GT64260_MPP_CNTL_1 0xf004
-#define GT64260_MPP_CNTL_2 0xf008
-#define GT64260_MPP_CNTL_3 0xf00c
-#define GT64260_MPP_SERIAL_PORTS_MULTIPLEX 0xf010
-
-
-/*
- *****************************************************************************
- *
- * I2C Controller Interface Registers
- *
- *****************************************************************************
- */
-
-/* FIXME: fill in */
-
-
-/*
- *****************************************************************************
- *
- * Interrupt Controller Interface Registers
- *
- *****************************************************************************
- */
-
-#define GT64260_IC_MAIN_CAUSE_LO 0x0c18
-#define GT64260_IC_MAIN_CAUSE_HI 0x0c68
-#define GT64260_IC_CPU_INTR_MASK_LO 0x0c1c
-#define GT64260_IC_CPU_INTR_MASK_HI 0x0c6c
-#define GT64260_IC_CPU_SELECT_CAUSE 0x0c70
-#define GT64260_IC_PCI_0_INTR_MASK_LO 0x0c24
-#define GT64260_IC_PCI_0_INTR_MASK_HI 0x0c64
-#define GT64260_IC_PCI_0_SELECT_CAUSE 0x0c74
-#define GT64260_IC_PCI_1_INTR_MASK_LO 0x0ca4
-#define GT64260_IC_PCI_1_INTR_MASK_HI 0x0ce4
-#define GT64260_IC_PCI_1_SELECT_CAUSE 0x0cf4
-#define GT64260_IC_CPU_INT_0_MASK 0x0e60
-#define GT64260_IC_CPU_INT_1_MASK 0x0e64
-#define GT64260_IC_CPU_INT_2_MASK 0x0e68
-#define GT64260_IC_CPU_INT_3_MASK 0x0e6c
-
-
-#endif /* __ASMPPC_GT64260_DEFS_H */
BUG_ON(!pte_none(*(kmap_pte+idx)));
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
- flush_tlb_page(NULL, vaddr);
+ flush_tlb_page(0, vaddr);
return (void*) vaddr;
}
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
- flush_tlb_page(NULL, vaddr);
+ flush_tlb_page(0, vaddr);
#endif
dec_preempt_count();
preempt_check_resched();
{
#ifndef CONFIG_APUS
if (address == 0)
- return NULL;
+ return 0;
return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
#else
return (void*) mm_ptov (address);
return irq;
}
-#elif defined(CONFIG_CPM2) && defined(CONFIG_85xx)
-/* Now include the board configuration specific associations.
-*/
-#include <asm/mpc85xx.h>
-
-/* The MPC8560 openpic has 32 internal interrupts and 12 external
- * interrupts.
- *
- * We are "flattening" the interrupt vectors of the cascaded CPM
- * so that we can uniquely identify any interrupt source with a
- * single integer.
- */
-#define NR_CPM_INTS 64
-#define NR_EPIC_INTS 44
-#ifndef NR_8259_INTS
-#define NR_8259_INTS 0
-#endif
-#define NUM_8259_INTERRUPTS NR_8259_INTS
-
-#ifndef CPM_IRQ_OFFSET
-#define CPM_IRQ_OFFSET 0
-#endif
-
-#define NR_IRQS (NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS)
-
-/* These values must be zero-based and map 1:1 with the EPIC configuration.
- * They are used throughout the 8560 I/O subsystem to generate
- * interrupt masks, flags, and other control patterns. This is why the
- * current kernel assumption of the 8259 as the base controller is such
- * a pain in the butt.
- */
-
-#define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET)
-#define SIU_INT_I2C ((uint)0x01+CPM_IRQ_OFFSET)
-#define SIU_INT_SPI ((uint)0x02+CPM_IRQ_OFFSET)
-#define SIU_INT_RISC ((uint)0x03+CPM_IRQ_OFFSET)
-#define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET)
-#define SIU_INT_SMC2 ((uint)0x05+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER1 ((uint)0x0c+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER2 ((uint)0x0d+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER3 ((uint)0x0e+CPM_IRQ_OFFSET)
-#define SIU_INT_TIMER4 ((uint)0x0f+CPM_IRQ_OFFSET)
-#define SIU_INT_FCC1 ((uint)0x20+CPM_IRQ_OFFSET)
-#define SIU_INT_FCC2 ((uint)0x21+CPM_IRQ_OFFSET)
-#define SIU_INT_FCC3 ((uint)0x22+CPM_IRQ_OFFSET)
-#define SIU_INT_MCC1 ((uint)0x24+CPM_IRQ_OFFSET)
-#define SIU_INT_MCC2 ((uint)0x25+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET)
-#define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET)
-#define SIU_INT_PC15 ((uint)0x30+CPM_IRQ_OFFSET)
-#define SIU_INT_PC14 ((uint)0x31+CPM_IRQ_OFFSET)
-#define SIU_INT_PC13 ((uint)0x32+CPM_IRQ_OFFSET)
-#define SIU_INT_PC12 ((uint)0x33+CPM_IRQ_OFFSET)
-#define SIU_INT_PC11 ((uint)0x34+CPM_IRQ_OFFSET)
-#define SIU_INT_PC10 ((uint)0x35+CPM_IRQ_OFFSET)
-#define SIU_INT_PC9 ((uint)0x36+CPM_IRQ_OFFSET)
-#define SIU_INT_PC8 ((uint)0x37+CPM_IRQ_OFFSET)
-#define SIU_INT_PC7 ((uint)0x38+CPM_IRQ_OFFSET)
-#define SIU_INT_PC6 ((uint)0x39+CPM_IRQ_OFFSET)
-#define SIU_INT_PC5 ((uint)0x3a+CPM_IRQ_OFFSET)
-#define SIU_INT_PC4 ((uint)0x3b+CPM_IRQ_OFFSET)
-#define SIU_INT_PC3 ((uint)0x3c+CPM_IRQ_OFFSET)
-#define SIU_INT_PC2 ((uint)0x3d+CPM_IRQ_OFFSET)
-#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
-#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
-
-static __inline__ int irq_canonicalize(int irq)
-{
- return irq;
-}
-
#else /* CONFIG_40x + CONFIG_8xx */
/*
* this is the # irq's for all ppc arch's (pmac/chrp/prep)
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
-#elif defined(CONFIG_E500)
+#elif CONFIG_E500
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
+++ /dev/null
-/*
- * include/asm-ppc/mpc52xx.h
- *
- * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
- * May need to be cleaned as the port goes on ...
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Originally written by Dale Farnsworth <dfarnsworth@mvista.com>
- * for the 2.4 kernel.
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ASM_MPC52xx_H__
-#define __ASM_MPC52xx_H__
-
-#ifndef __ASSEMBLY__
-#include <asm/ppcboot.h>
-#include <asm/types.h>
-
-struct pt_regs;
-struct ocp_def;
-#endif /* __ASSEMBLY__ */
-
-
-/* ======================================================================== */
-/* Main registers/struct addresses */
-/* ======================================================================== */
-/* Theses are PHYSICAL addresses ! */
-/* TODO : There should be no static mapping, but it's not yet the case, so */
-/* we require a 1:1 mapping */
-
-#define MPC52xx_MBAR 0xf0000000 /* Phys address */
-#define MPC52xx_MBAR_SIZE 0x00010000
-#define MPC52xx_MBAR_VIRT 0xf0000000 /* Virt address */
-
-#define MPC52xx_MMAP_CTL (MPC52xx_MBAR + 0x0000)
-#define MPC52xx_CDM (MPC52xx_MBAR + 0x0200)
-#define MPC52xx_SFTRST (MPC52xx_MBAR + 0x0220)
-#define MPC52xx_SFTRST_BIT 0x01000000
-#define MPC52xx_INTR (MPC52xx_MBAR + 0x0500)
-#define MPC52xx_GPTx(x) (MPC52xx_MBAR + 0x0600 + ((x)<<4))
-#define MPC52xx_RTC (MPC52xx_MBAR + 0x0800)
-#define MPC52xx_MSCAN1 (MPC52xx_MBAR + 0x0900)
-#define MPC52xx_MSCAN2 (MPC52xx_MBAR + 0x0980)
-#define MPC52xx_GPIO (MPC52xx_MBAR + 0x0b00)
-#define MPC52xx_PCI (MPC52xx_MBAR + 0x0d00)
-#define MPC52xx_USB_OHCI (MPC52xx_MBAR + 0x1000)
-#define MPC52xx_SDMA (MPC52xx_MBAR + 0x1200)
-#define MPC52xx_XLB (MPC52xx_MBAR + 0x1f00)
-#define MPC52xx_PSCx(x) (MPC52xx_MBAR + 0x2000 + ((x)<<9))
-#define MPC52xx_PSC1 (MPC52xx_MBAR + 0x2000)
-#define MPC52xx_PSC2 (MPC52xx_MBAR + 0x2200)
-#define MPC52xx_PSC3 (MPC52xx_MBAR + 0x2400)
-#define MPC52xx_PSC4 (MPC52xx_MBAR + 0x2600)
-#define MPC52xx_PSC5 (MPC52xx_MBAR + 0x2800)
-#define MPC52xx_PSC6 (MPC52xx_MBAR + 0x2C00)
-#define MPC52xx_FEC (MPC52xx_MBAR + 0x3000)
-#define MPC52xx_ATA (MPC52xx_MBAR + 0x3a00)
-#define MPC52xx_I2C1 (MPC52xx_MBAR + 0x3d00)
-#define MPC52xx_I2C_MICR (MPC52xx_MBAR + 0x3d20)
-#define MPC52xx_I2C2 (MPC52xx_MBAR + 0x3d40)
-
-/* SRAM used for SDMA */
-#define MPC52xx_SRAM (MPC52xx_MBAR + 0x8000)
-#define MPC52xx_SRAM_SIZE (16*1024)
-#define MPC52xx_SDMA_MAX_TASKS 16
-
- /* Memory allocation block size */
-#define MPC52xx_SDRAM_UNIT 0x8000 /* 32K byte */
-
-
-/* ======================================================================== */
-/* IRQ mapping */
-/* ======================================================================== */
-/* Be sure to look at mpc52xx_pic.h if you wish for whatever reason to change
- * this
- */
-
-#define MPC52xx_CRIT_IRQ_NUM 4
-#define MPC52xx_MAIN_IRQ_NUM 17
-#define MPC52xx_SDMA_IRQ_NUM 17
-#define MPC52xx_PERP_IRQ_NUM 23
-
-#define MPC52xx_CRIT_IRQ_BASE 0
-#define MPC52xx_MAIN_IRQ_BASE (MPC52xx_CRIT_IRQ_BASE + MPC52xx_CRIT_IRQ_NUM)
-#define MPC52xx_SDMA_IRQ_BASE (MPC52xx_MAIN_IRQ_BASE + MPC52xx_MAIN_IRQ_NUM)
-#define MPC52xx_PERP_IRQ_BASE (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)
-
-#define MPC52xx_IRQ0 (MPC52xx_CRIT_IRQ_BASE + 0)
-#define MPC52xx_SLICE_TIMER_0_IRQ (MPC52xx_CRIT_IRQ_BASE + 1)
-#define MPC52xx_HI_INT_IRQ (MPC52xx_CRIT_IRQ_BASE + 2)
-#define MPC52xx_CCS_IRQ (MPC52xx_CRIT_IRQ_BASE + 3)
-
-#define MPC52xx_IRQ1 (MPC52xx_MAIN_IRQ_BASE + 1)
-#define MPC52xx_IRQ2 (MPC52xx_MAIN_IRQ_BASE + 2)
-#define MPC52xx_IRQ3 (MPC52xx_MAIN_IRQ_BASE + 3)
-
-#define MPC52xx_SDMA_IRQ (MPC52xx_PERP_IRQ_BASE + 0)
-#define MPC52xx_PSC1_IRQ (MPC52xx_PERP_IRQ_BASE + 1)
-#define MPC52xx_PSC2_IRQ (MPC52xx_PERP_IRQ_BASE + 2)
-#define MPC52xx_PSC3_IRQ (MPC52xx_PERP_IRQ_BASE + 3)
-#define MPC52xx_PSC6_IRQ (MPC52xx_PERP_IRQ_BASE + 4)
-#define MPC52xx_IRDA_IRQ (MPC52xx_PERP_IRQ_BASE + 4)
-#define MPC52xx_FEC_IRQ (MPC52xx_PERP_IRQ_BASE + 5)
-#define MPC52xx_USB_IRQ (MPC52xx_PERP_IRQ_BASE + 6)
-#define MPC52xx_ATA_IRQ (MPC52xx_PERP_IRQ_BASE + 7)
-#define MPC52xx_PCI_CNTRL_IRQ (MPC52xx_PERP_IRQ_BASE + 8)
-#define MPC52xx_PCI_SCIRX_IRQ (MPC52xx_PERP_IRQ_BASE + 9)
-#define MPC52xx_PCI_SCITX_IRQ (MPC52xx_PERP_IRQ_BASE + 10)
-#define MPC52xx_PSC4_IRQ (MPC52xx_PERP_IRQ_BASE + 11)
-#define MPC52xx_PSC5_IRQ (MPC52xx_PERP_IRQ_BASE + 12)
-#define MPC52xx_SPI_MODF_IRQ (MPC52xx_PERP_IRQ_BASE + 13)
-#define MPC52xx_SPI_SPIF_IRQ (MPC52xx_PERP_IRQ_BASE + 14)
-#define MPC52xx_I2C1_IRQ (MPC52xx_PERP_IRQ_BASE + 15)
-#define MPC52xx_I2C2_IRQ (MPC52xx_PERP_IRQ_BASE + 16)
-#define MPC52xx_CAN1_IRQ (MPC52xx_PERP_IRQ_BASE + 17)
-#define MPC52xx_CAN2_IRQ (MPC52xx_PERP_IRQ_BASE + 18)
-#define MPC52xx_IR_RX_IRQ (MPC52xx_PERP_IRQ_BASE + 19)
-#define MPC52xx_IR_TX_IRQ (MPC52xx_PERP_IRQ_BASE + 20)
-#define MPC52xx_XLB_ARB_IRQ (MPC52xx_PERP_IRQ_BASE + 21)
-
-
-
-/* ======================================================================== */
-/* Structures mapping of some unit register set */
-/* ======================================================================== */
-
-#ifndef __ASSEMBLY__
-
-/* Memory Mapping Control */
-struct mpc52xx_mmap_ctl {
- volatile u32 mbar; /* MMAP_CTRL + 0x00 */
-
- volatile u32 cs0_start; /* MMAP_CTRL + 0x04 */
- volatile u32 cs0_stop; /* MMAP_CTRL + 0x08 */
- volatile u32 cs1_start; /* MMAP_CTRL + 0x0c */
- volatile u32 cs1_stop; /* MMAP_CTRL + 0x10 */
- volatile u32 cs2_start; /* MMAP_CTRL + 0x14 */
- volatile u32 cs2_stop; /* MMAP_CTRL + 0x18 */
- volatile u32 cs3_start; /* MMAP_CTRL + 0x1c */
- volatile u32 cs3_stop; /* MMAP_CTRL + 0x20 */
- volatile u32 cs4_start; /* MMAP_CTRL + 0x24 */
- volatile u32 cs4_stop; /* MMAP_CTRL + 0x28 */
- volatile u32 cs5_start; /* MMAP_CTRL + 0x2c */
- volatile u32 cs5_stop; /* MMAP_CTRL + 0x30 */
-
- volatile u32 sdram0; /* MMAP_CTRL + 0x34 */
- volatile u32 sdram1; /* MMAP_CTRL + 0X38 */
-
- volatile u32 reserved[4]; /* MMAP_CTRL + 0x3c .. 0x48 */
-
- volatile u32 boot_start; /* MMAP_CTRL + 0x4c */
- volatile u32 boot_stop; /* MMAP_CTRL + 0x50 */
-
- volatile u32 ipbi_ws_ctrl; /* MMAP_CTRL + 0x54 */
-
- volatile u32 cs6_start; /* MMAP_CTRL + 0x58 */
- volatile u32 cs6_stop; /* MMAP_CTRL + 0x5c */
- volatile u32 cs7_start; /* MMAP_CTRL + 0x60 */
- volatile u32 cs7_stop; /* MMAP_CTRL + 0x60 */
-};
-
-/* Interrupt controller */
-struct mpc52xx_intr {
- volatile u32 per_mask; /* INTR + 0x00 */
- volatile u32 per_pri1; /* INTR + 0x04 */
- volatile u32 per_pri2; /* INTR + 0x08 */
- volatile u32 per_pri3; /* INTR + 0x0c */
- volatile u32 ctrl; /* INTR + 0x10 */
- volatile u32 main_mask; /* INTR + 0x14 */
- volatile u32 main_pri1; /* INTR + 0x18 */
- volatile u32 main_pri2; /* INTR + 0x1c */
- volatile u32 reserved1; /* INTR + 0x20 */
- volatile u32 enc_status; /* INTR + 0x24 */
- volatile u32 crit_status; /* INTR + 0x28 */
- volatile u32 main_status; /* INTR + 0x2c */
- volatile u32 per_status; /* INTR + 0x30 */
- volatile u32 reserved2; /* INTR + 0x34 */
- volatile u32 per_error; /* INTR + 0x38 */
-};
-
-/* SDMA */
-struct mpc52xx_sdma {
- volatile u32 taskBar; /* SDMA + 0x00 */
- volatile u32 currentPointer; /* SDMA + 0x04 */
- volatile u32 endPointer; /* SDMA + 0x08 */
- volatile u32 variablePointer;/* SDMA + 0x0c */
-
- volatile u8 IntVect1; /* SDMA + 0x10 */
- volatile u8 IntVect2; /* SDMA + 0x11 */
- volatile u16 PtdCntrl; /* SDMA + 0x12 */
-
- volatile u32 IntPend; /* SDMA + 0x14 */
- volatile u32 IntMask; /* SDMA + 0x18 */
-
- volatile u16 tcr[16]; /* SDMA + 0x1c .. 0x3a */
-
- volatile u8 ipr[31]; /* SDMA + 0x3c .. 5b */
-
- volatile u32 res1; /* SDMA + 0x5c */
- volatile u32 task_size0; /* SDMA + 0x60 */
- volatile u32 task_size1; /* SDMA + 0x64 */
- volatile u32 MDEDebug; /* SDMA + 0x68 */
- volatile u32 ADSDebug; /* SDMA + 0x6c */
- volatile u32 Value1; /* SDMA + 0x70 */
- volatile u32 Value2; /* SDMA + 0x74 */
- volatile u32 Control; /* SDMA + 0x78 */
- volatile u32 Status; /* SDMA + 0x7c */
-};
-
-/* GPT */
-struct mpc52xx_gpt {
- volatile u32 mode; /* GPTx + 0x00 */
- volatile u32 count; /* GPTx + 0x04 */
- volatile u32 pwm; /* GPTx + 0x08 */
- volatile u32 status; /* GPTx + 0X0c */
-};
-
-/* RTC */
-struct mpc52xx_rtc {
- volatile u32 time_set; /* RTC + 0x00 */
- volatile u32 date_set; /* RTC + 0x04 */
- volatile u32 stopwatch; /* RTC + 0x08 */
- volatile u32 int_enable; /* RTC + 0x0c */
- volatile u32 time; /* RTC + 0x10 */
- volatile u32 date; /* RTC + 0x14 */
- volatile u32 stopwatch_intr; /* RTC + 0x18 */
- volatile u32 bus_error; /* RTC + 0x1c */
- volatile u32 dividers; /* RTC + 0x20 */
-};
-
-/* GPIO */
-struct mpc52xx_gpio {
- volatile u32 port_config; /* GPIO + 0x00 */
- volatile u32 simple_gpioe; /* GPIO + 0x04 */
- volatile u32 simple_ode; /* GPIO + 0x08 */
- volatile u32 simple_ddr; /* GPIO + 0x0c */
- volatile u32 simple_dvo; /* GPIO + 0x10 */
- volatile u32 simple_ival; /* GPIO + 0x14 */
- volatile u8 outo_gpioe; /* GPIO + 0x18 */
- volatile u8 reserved1[3]; /* GPIO + 0x19 */
- volatile u8 outo_dvo; /* GPIO + 0x1c */
- volatile u8 reserved2[3]; /* GPIO + 0x1d */
- volatile u8 sint_gpioe; /* GPIO + 0x20 */
- volatile u8 reserved3[3]; /* GPIO + 0x21 */
- volatile u8 sint_ode; /* GPIO + 0x24 */
- volatile u8 reserved4[3]; /* GPIO + 0x25 */
- volatile u8 sint_ddr; /* GPIO + 0x28 */
- volatile u8 reserved5[3]; /* GPIO + 0x29 */
- volatile u8 sint_dvo; /* GPIO + 0x2c */
- volatile u8 reserved6[3]; /* GPIO + 0x2d */
- volatile u8 sint_inten; /* GPIO + 0x30 */
- volatile u8 reserved7[3]; /* GPIO + 0x31 */
- volatile u16 sint_itype; /* GPIO + 0x34 */
- volatile u16 reserved8; /* GPIO + 0x36 */
- volatile u8 gpio_control; /* GPIO + 0x38 */
- volatile u8 reserved9[3]; /* GPIO + 0x39 */
- volatile u8 sint_istat; /* GPIO + 0x3c */
- volatile u8 sint_ival; /* GPIO + 0x3d */
- volatile u8 bus_errs; /* GPIO + 0x3e */
- volatile u8 reserved10; /* GPIO + 0x3f */
-};
-
-#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD 4
-#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD 5
-#define MPC52xx_GPIO_PCI_DIS (1<<15)
-
-/* XLB Bus control */
-struct mpc52xx_xlb {
- volatile u8 reserved[0x40];
- volatile u32 config; /* XLB + 0x40 */
- volatile u32 version; /* XLB + 0x44 */
- volatile u32 status; /* XLB + 0x48 */
- volatile u32 int_enable; /* XLB + 0x4c */
- volatile u32 addr_capture; /* XLB + 0x50 */
- volatile u32 bus_sig_capture; /* XLB + 0x54 */
- volatile u32 addr_timeout; /* XLB + 0x58 */
- volatile u32 data_timeout; /* XLB + 0x5c */
- volatile u32 bus_act_timeout; /* XLB + 0x60 */
- volatile u32 master_pri_enable; /* XLB + 0x64 */
- volatile u32 master_priority; /* XLB + 0x68 */
- volatile u32 base_address; /* XLB + 0x6c */
- volatile u32 snoop_window; /* XLB + 0x70 */
-};
-
-
-/* Clock Distribution control */
-struct mpc52xx_cdm {
- volatile u32 jtag_id; /* MBAR_CDM + 0x00 reg0 read only */
- volatile u32 rstcfg; /* MBAR_CDM + 0x04 reg1 read only */
- volatile u32 breadcrumb; /* MBAR_CDM + 0x08 reg2 */
-
- volatile u8 mem_clk_sel; /* MBAR_CDM + 0x0c reg3 byte0 */
- volatile u8 xlb_clk_sel; /* MBAR_CDM + 0x0d reg3 byte1 read only */
- volatile u8 ipb_clk_sel; /* MBAR_CDM + 0x0e reg3 byte2 */
- volatile u8 pci_clk_sel; /* MBAR_CDM + 0x0f reg3 byte3 */
-
- volatile u8 ext_48mhz_en; /* MBAR_CDM + 0x10 reg4 byte0 */
- volatile u8 fd_enable; /* MBAR_CDM + 0x11 reg4 byte1 */
- volatile u16 fd_counters; /* MBAR_CDM + 0x12 reg4 byte2,3 */
-
- volatile u32 clk_enables; /* MBAR_CDM + 0x14 reg5 */
-
- volatile u8 osc_disable; /* MBAR_CDM + 0x18 reg6 byte0 */
- volatile u8 reserved0[3]; /* MBAR_CDM + 0x19 reg6 byte1,2,3 */
-
- volatile u8 ccs_sleep_enable;/* MBAR_CDM + 0x1c reg7 byte0 */
- volatile u8 osc_sleep_enable;/* MBAR_CDM + 0x1d reg7 byte1 */
- volatile u8 reserved1; /* MBAR_CDM + 0x1e reg7 byte2 */
- volatile u8 ccs_qreq_test; /* MBAR_CDM + 0x1f reg7 byte3 */
-
- volatile u8 soft_reset; /* MBAR_CDM + 0x20 u8 byte0 */
- volatile u8 no_ckstp; /* MBAR_CDM + 0x21 u8 byte0 */
- volatile u8 reserved2[2]; /* MBAR_CDM + 0x22 u8 byte1,2,3 */
-
- volatile u8 pll_lock; /* MBAR_CDM + 0x24 reg9 byte0 */
- volatile u8 pll_looselock; /* MBAR_CDM + 0x25 reg9 byte1 */
- volatile u8 pll_sm_lockwin; /* MBAR_CDM + 0x26 reg9 byte2 */
- volatile u8 reserved3; /* MBAR_CDM + 0x27 reg9 byte3 */
-
- volatile u16 reserved4; /* MBAR_CDM + 0x28 reg10 byte0,1 */
- volatile u16 mclken_div_psc1;/* MBAR_CDM + 0x2a reg10 byte2,3 */
-
- volatile u16 reserved5; /* MBAR_CDM + 0x2c reg11 byte0,1 */
- volatile u16 mclken_div_psc2;/* MBAR_CDM + 0x2e reg11 byte2,3 */
-
- volatile u16 reserved6; /* MBAR_CDM + 0x30 reg12 byte0,1 */
- volatile u16 mclken_div_psc3;/* MBAR_CDM + 0x32 reg12 byte2,3 */
-
- volatile u16 reserved7; /* MBAR_CDM + 0x34 reg13 byte0,1 */
- volatile u16 mclken_div_psc6;/* MBAR_CDM + 0x36 reg13 byte2,3 */
-};
-
-#endif /* __ASSEMBLY__ */
-
-
-/* ========================================================================= */
-/* Prototypes for MPC52xx syslib */
-/* ========================================================================= */
-
-#ifndef __ASSEMBLY__
-
-extern void mpc52xx_init_irq(void);
-extern int mpc52xx_get_irq(struct pt_regs *regs);
-
-extern unsigned long mpc52xx_find_end_of_memory(void);
-extern void mpc52xx_set_bat(void);
-extern void mpc52xx_map_io(void);
-extern void mpc52xx_restart(char *cmd);
-extern void mpc52xx_halt(void);
-extern void mpc52xx_power_off(void);
-extern void mpc52xx_progress(char *s, unsigned short hex);
-extern void mpc52xx_calibrate_decr(void);
-extern void mpc52xx_add_board_devices(struct ocp_def board_ocp[]);
-
-#endif /* __ASSEMBLY__ */
-
-
-/* ========================================================================= */
-/* Platform configuration */
-/* ========================================================================= */
-
-/* The U-Boot platform information struct */
-extern bd_t __res;
-
-/* Platform options */
-#if defined(CONFIG_LITE5200)
-#include <platforms/lite5200.h>
-#endif
-
-
-#endif /* __ASM_MPC52xx_H__ */
+++ /dev/null
-/*
- * include/asm-ppc/mpc52xx_psc.h
- *
- * Definitions of consts/structs to drive the Freescale MPC52xx OnChip
- * PSCs. Theses are shared between multiple drivers since a PSC can be
- * UART, AC97, IR, I2S, ... So this header is in asm-ppc.
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Based/Extracted from some header of the 2.4 originally written by
- * Dale Farnsworth <dfarnsworth@mvista.com>
- *
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __MPC52xx_PSC_H__
-#define __MPC52xx_PSC_H__
-
-#include <asm/types.h>
-
-/* Max number of PSCs */
-#define MPC52xx_PSC_MAXNUM 6
-
-/* Programmable Serial Controller (PSC) status register bits */
-#define MPC52xx_PSC_SR_CDE 0x0080
-#define MPC52xx_PSC_SR_RXRDY 0x0100
-#define MPC52xx_PSC_SR_RXFULL 0x0200
-#define MPC52xx_PSC_SR_TXRDY 0x0400
-#define MPC52xx_PSC_SR_TXEMP 0x0800
-#define MPC52xx_PSC_SR_OE 0x1000
-#define MPC52xx_PSC_SR_PE 0x2000
-#define MPC52xx_PSC_SR_FE 0x4000
-#define MPC52xx_PSC_SR_RB 0x8000
-
-/* PSC Command values */
-#define MPC52xx_PSC_RX_ENABLE 0x0001
-#define MPC52xx_PSC_RX_DISABLE 0x0002
-#define MPC52xx_PSC_TX_ENABLE 0x0004
-#define MPC52xx_PSC_TX_DISABLE 0x0008
-#define MPC52xx_PSC_SEL_MODE_REG_1 0x0010
-#define MPC52xx_PSC_RST_RX 0x0020
-#define MPC52xx_PSC_RST_TX 0x0030
-#define MPC52xx_PSC_RST_ERR_STAT 0x0040
-#define MPC52xx_PSC_RST_BRK_CHG_INT 0x0050
-#define MPC52xx_PSC_START_BRK 0x0060
-#define MPC52xx_PSC_STOP_BRK 0x0070
-
-/* PSC TxRx FIFO status bits */
-#define MPC52xx_PSC_RXTX_FIFO_ERR 0x0040
-#define MPC52xx_PSC_RXTX_FIFO_UF 0x0020
-#define MPC52xx_PSC_RXTX_FIFO_OF 0x0010
-#define MPC52xx_PSC_RXTX_FIFO_FR 0x0008
-#define MPC52xx_PSC_RXTX_FIFO_FULL 0x0004
-#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002
-#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001
-
-/* PSC interrupt mask bits */
-#define MPC52xx_PSC_IMR_TXRDY 0x0100
-#define MPC52xx_PSC_IMR_RXRDY 0x0200
-#define MPC52xx_PSC_IMR_DB 0x0400
-#define MPC52xx_PSC_IMR_IPC 0x8000
-
-/* PSC input port change bit */
-#define MPC52xx_PSC_CTS 0x01
-#define MPC52xx_PSC_DCD 0x02
-#define MPC52xx_PSC_D_CTS 0x10
-#define MPC52xx_PSC_D_DCD 0x20
-
-/* PSC mode fields */
-#define MPC52xx_PSC_MODE_5_BITS 0x00
-#define MPC52xx_PSC_MODE_6_BITS 0x01
-#define MPC52xx_PSC_MODE_7_BITS 0x02
-#define MPC52xx_PSC_MODE_8_BITS 0x03
-#define MPC52xx_PSC_MODE_BITS_MASK 0x03
-#define MPC52xx_PSC_MODE_PAREVEN 0x00
-#define MPC52xx_PSC_MODE_PARODD 0x04
-#define MPC52xx_PSC_MODE_PARFORCE 0x08
-#define MPC52xx_PSC_MODE_PARNONE 0x10
-#define MPC52xx_PSC_MODE_ERR 0x20
-#define MPC52xx_PSC_MODE_FFULL 0x40
-#define MPC52xx_PSC_MODE_RXRTS 0x80
-
-#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
-#define MPC52xx_PSC_MODE_ONE_STOP 0x07
-#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
-
-#define MPC52xx_PSC_RFNUM_MASK 0x01ff
-
-
-/* Structure of the hardware registers */
-struct mpc52xx_psc {
- volatile u8 mode; /* PSC + 0x00 */
- volatile u8 reserved0[3];
- union { /* PSC + 0x04 */
- volatile u16 status;
- volatile u16 clock_select;
- } sr_csr;
-#define mpc52xx_psc_status sr_csr.status
-#define mpc52xx_psc_clock_select sr_csr.clock_select
- volatile u16 reserved1;
- volatile u8 command; /* PSC + 0x08 */
-volatile u8 reserved2[3];
- union { /* PSC + 0x0c */
- volatile u8 buffer_8;
- volatile u16 buffer_16;
- volatile u32 buffer_32;
- } buffer;
-#define mpc52xx_psc_buffer_8 buffer.buffer_8
-#define mpc52xx_psc_buffer_16 buffer.buffer_16
-#define mpc52xx_psc_buffer_32 buffer.buffer_32
- union { /* PSC + 0x10 */
- volatile u8 ipcr;
- volatile u8 acr;
- } ipcr_acr;
-#define mpc52xx_psc_ipcr ipcr_acr.ipcr
-#define mpc52xx_psc_acr ipcr_acr.acr
- volatile u8 reserved3[3];
- union { /* PSC + 0x14 */
- volatile u16 isr;
- volatile u16 imr;
- } isr_imr;
-#define mpc52xx_psc_isr isr_imr.isr
-#define mpc52xx_psc_imr isr_imr.imr
- volatile u16 reserved4;
- volatile u8 ctur; /* PSC + 0x18 */
- volatile u8 reserved5[3];
- volatile u8 ctlr; /* PSC + 0x1c */
- volatile u8 reserved6[3];
- volatile u16 ccr; /* PSC + 0x20 */
- volatile u8 reserved7[14];
- volatile u8 ivr; /* PSC + 0x30 */
- volatile u8 reserved8[3];
- volatile u8 ip; /* PSC + 0x34 */
- volatile u8 reserved9[3];
- volatile u8 op1; /* PSC + 0x38 */
- volatile u8 reserved10[3];
- volatile u8 op0; /* PSC + 0x3c */
- volatile u8 reserved11[3];
- volatile u32 sicr; /* PSC + 0x40 */
- volatile u8 ircr1; /* PSC + 0x44 */
- volatile u8 reserved13[3];
- volatile u8 ircr2; /* PSC + 0x44 */
- volatile u8 reserved14[3];
- volatile u8 irsdr; /* PSC + 0x4c */
- volatile u8 reserved15[3];
- volatile u8 irmdr; /* PSC + 0x50 */
- volatile u8 reserved16[3];
- volatile u8 irfdr; /* PSC + 0x54 */
- volatile u8 reserved17[3];
- volatile u16 rfnum; /* PSC + 0x58 */
- volatile u16 reserved18;
- volatile u16 tfnum; /* PSC + 0x5c */
- volatile u16 reserved19;
- volatile u32 rfdata; /* PSC + 0x60 */
- volatile u16 rfstat; /* PSC + 0x64 */
- volatile u16 reserved20;
- volatile u8 rfcntl; /* PSC + 0x68 */
- volatile u8 reserved21[5];
- volatile u16 rfalarm; /* PSC + 0x6e */
- volatile u16 reserved22;
- volatile u16 rfrptr; /* PSC + 0x72 */
- volatile u16 reserved23;
- volatile u16 rfwptr; /* PSC + 0x76 */
- volatile u16 reserved24;
- volatile u16 rflrfptr; /* PSC + 0x7a */
- volatile u16 reserved25;
- volatile u16 rflwfptr; /* PSC + 0x7e */
- volatile u32 tfdata; /* PSC + 0x80 */
- volatile u16 tfstat; /* PSC + 0x84 */
- volatile u16 reserved26;
- volatile u8 tfcntl; /* PSC + 0x88 */
- volatile u8 reserved27[5];
- volatile u16 tfalarm; /* PSC + 0x8e */
- volatile u16 reserved28;
- volatile u16 tfrptr; /* PSC + 0x92 */
- volatile u16 reserved29;
- volatile u16 tfwptr; /* PSC + 0x96 */
- volatile u16 reserved30;
- volatile u16 tflrfptr; /* PSC + 0x9a */
- volatile u16 reserved31;
- volatile u16 tflwfptr; /* PSC + 0x9e */
-};
-
-
-#endif /* __MPC52xx_PSC_H__ */
#include <platforms/sbs8260.h>
#endif
-#ifdef CONFIG_RPX8260
-#include <platforms/rpx8260.h>
+#ifdef CONFIG_RPX6
+#include <platforms/rpxsuper.h>
#endif
#ifdef CONFIG_WILLOW
#ifdef CONFIG_MPC8540_ADS
#include <platforms/85xx/mpc8540_ads.h>
#endif
-#ifdef CONFIG_MPC8555_CDS
-#include <platforms/85xx/mpc8555_cds.h>
-#endif
-#ifdef CONFIG_MPC8560_ADS
-#include <platforms/85xx/mpc8560_ads.h>
-#endif
#ifdef CONFIG_SBC8560
#include <platforms/85xx/sbc8560.h>
#endif
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
-#ifdef CONFIG_PCI
#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
/*
* The "residual" board information structure the boot loader passes
--- /dev/null
+/*
+ * include/asm-ppc/mv64x60.h
+ *
+ * Prototypes, etc. for the Marvell/Galileo MV64x60 host bridge routines.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2001-2002 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ASMPPC_MV64x60_H
+#define __ASMPPC_MV64x60_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/config.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/mv64x60_defs.h>
+
+extern u8 mv64x60_pci_exclude_bridge;
+
+extern spinlock_t mv64x60_lock;
+extern spinlock_t mv64x60_rmw_lock;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* 32-bit Window table entry defines */
+#define MV64x60_CPU2MEM_0_WIN 0
+#define MV64x60_CPU2MEM_1_WIN 1
+#define MV64x60_CPU2MEM_2_WIN 2
+#define MV64x60_CPU2MEM_3_WIN 3
+#define MV64x60_CPU2DEV_0_WIN 4
+#define MV64x60_CPU2DEV_1_WIN 5
+#define MV64x60_CPU2DEV_2_WIN 6
+#define MV64x60_CPU2DEV_3_WIN 7
+#define MV64x60_CPU2BOOT_WIN 8
+#define MV64x60_CPU2PCI0_IO_WIN 9
+#define MV64x60_CPU2PCI0_MEM_0_WIN 10
+#define MV64x60_CPU2PCI0_MEM_1_WIN 11
+#define MV64x60_CPU2PCI0_MEM_2_WIN 12
+#define MV64x60_CPU2PCI0_MEM_3_WIN 13
+#define MV64x60_CPU2PCI1_IO_WIN 14
+#define MV64x60_CPU2PCI1_MEM_0_WIN 15
+#define MV64x60_CPU2PCI1_MEM_1_WIN 16
+#define MV64x60_CPU2PCI1_MEM_2_WIN 17
+#define MV64x60_CPU2PCI1_MEM_3_WIN 18
+#define MV64x60_CPU2SRAM_WIN 19
+#define MV64x60_CPU2PCI0_IO_REMAP_WIN 20
+#define MV64x60_CPU2PCI1_IO_REMAP_WIN 21
+#define MV64x60_CPU_PROT_0_WIN 22
+#define MV64x60_CPU_PROT_1_WIN 23
+#define MV64x60_CPU_PROT_2_WIN 24
+#define MV64x60_CPU_PROT_3_WIN 25
+#define MV64x60_CPU_SNOOP_0_WIN 26
+#define MV64x60_CPU_SNOOP_1_WIN 27
+#define MV64x60_CPU_SNOOP_2_WIN 28
+#define MV64x60_CPU_SNOOP_3_WIN 29
+#define MV64x60_PCI02MEM_REMAP_0_WIN 30
+#define MV64x60_PCI02MEM_REMAP_1_WIN 31
+#define MV64x60_PCI02MEM_REMAP_2_WIN 32
+#define MV64x60_PCI02MEM_REMAP_3_WIN 33
+#define MV64x60_PCI12MEM_REMAP_0_WIN 34
+#define MV64x60_PCI12MEM_REMAP_1_WIN 35
+#define MV64x60_PCI12MEM_REMAP_2_WIN 36
+#define MV64x60_PCI12MEM_REMAP_3_WIN 37
+
+#define MV64x60_32BIT_WIN_COUNT 38
+
+/* 64-bit Window table entry defines */
+#define MV64x60_CPU2PCI0_MEM_0_REMAP_WIN 0
+#define MV64x60_CPU2PCI0_MEM_1_REMAP_WIN 1
+#define MV64x60_CPU2PCI0_MEM_2_REMAP_WIN 2
+#define MV64x60_CPU2PCI0_MEM_3_REMAP_WIN 3
+#define MV64x60_CPU2PCI1_MEM_0_REMAP_WIN 4
+#define MV64x60_CPU2PCI1_MEM_1_REMAP_WIN 5
+#define MV64x60_CPU2PCI1_MEM_2_REMAP_WIN 6
+#define MV64x60_CPU2PCI1_MEM_3_REMAP_WIN 7
+#define MV64x60_PCI02MEM_ACC_CNTL_0_WIN 8
+#define MV64x60_PCI02MEM_ACC_CNTL_1_WIN 9
+#define MV64x60_PCI02MEM_ACC_CNTL_2_WIN 10
+#define MV64x60_PCI02MEM_ACC_CNTL_3_WIN 11
+#define MV64x60_PCI12MEM_ACC_CNTL_0_WIN 12
+#define MV64x60_PCI12MEM_ACC_CNTL_1_WIN 13
+#define MV64x60_PCI12MEM_ACC_CNTL_2_WIN 14
+#define MV64x60_PCI12MEM_ACC_CNTL_3_WIN 15
+#define MV64x60_PCI02MEM_SNOOP_0_WIN 16
+#define MV64x60_PCI02MEM_SNOOP_1_WIN 17
+#define MV64x60_PCI02MEM_SNOOP_2_WIN 18
+#define MV64x60_PCI02MEM_SNOOP_3_WIN 19
+#define MV64x60_PCI12MEM_SNOOP_0_WIN 20
+#define MV64x60_PCI12MEM_SNOOP_1_WIN 21
+#define MV64x60_PCI12MEM_SNOOP_2_WIN 22
+#define MV64x60_PCI12MEM_SNOOP_3_WIN 23
+
+#define MV64x60_64BIT_WIN_COUNT 24
+
+
+/*
+ * Define a structure that's used to pass in config information to the
+ * core routines.
+ */
+typedef struct {
+ u32 cpu_base;
+ u32 pci_base_hi;
+ u32 pci_base_lo;
+ u32 size;
+ u32 swap;
+} mv64x60_pci_window_t;
+
+typedef struct {
+ u8 enable_bus; /* allow access to this PCI bus? */
+ u8 enumerate_bus; /* enumerate devices on this bus? */
+
+ mv64x60_pci_window_t pci_io;
+ mv64x60_pci_window_t pci_mem[3];
+
+ u32 acc_cntl_options[MV64x60_CPU2MEM_WINDOWS];
+ u32 snoop_options[MV64x60_CPU2MEM_WINDOWS];
+ u16 pci_cmd_bits;
+ u16 latency_timer;
+} mv64x60_pci_info_t;
+
+typedef struct {
+ u32 phys_reg_base;
+
+ u32 window_preserve_mask_32;
+ u32 window_preserve_mask_64;
+
+ u32 base_irq; /* Starting irq # for this intr ctlr */
+ int ((*map_irq)(struct pci_dev *, unsigned char, unsigned char));
+
+ u32 cpu_prot_options[MV64x60_CPU2MEM_WINDOWS];
+ u32 cpu_snoop_options[MV64x60_CPU2MEM_WINDOWS];
+
+ mv64x60_pci_info_t pci_0;
+ mv64x60_pci_info_t pci_1;
+} mv64x60_setup_info_t;
+
+/*
+ * Define the 'handle' struct that will be passed between the 64x60 core
+ * code and the platform-specific code that will use it. The handle
+ * will contain pointers to chip-specific routines & information.
+ */
+typedef struct {
+ u32 base_reg;
+ u32 size_reg;
+ u8 base_bits;
+ u8 size_bits;
+ u32 (*get_from_field)(u32 val, u32 num_bits);
+ u32 (*map_to_field)(u32 val, u32 num_bits);
+ u32 extra;
+} mv64x60_32bit_window_t;
+
+typedef struct {
+ u32 base_hi_reg;
+ u32 base_lo_reg;
+ u32 size_reg;
+ u8 base_lo_bits;
+ u8 size_bits;
+ u32 (*get_from_field)(u32 val, u32 num_bits);
+ u32 (*map_to_field)(u32 val, u32 num_bits);
+ u32 extra;
+} mv64x60_64bit_window_t;
+
+typedef struct mv64x60_handle mv64x60_handle_t;
+
+typedef struct {
+ u32 (*translate_size)(u32 base, u32 size, u32 num_bits);
+ u32 (*untranslate_size)(u32 base, u32 size, u32 num_bits);
+ void (*set_pci2mem_window)(struct pci_controller *hose, u32 window,
+ u32 base);
+ u32 (*is_enabled_32bit)(mv64x60_handle_t *bh, u32 window);
+ void (*enable_window_32bit)(mv64x60_handle_t *bh, u32 window);
+ void (*disable_window_32bit)(mv64x60_handle_t *bh, u32 window);
+ void (*enable_window_64bit)(mv64x60_handle_t *bh, u32 window);
+ void (*disable_window_64bit)(mv64x60_handle_t *bh, u32 window);
+ void (*disable_all_windows)(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+ void (*chip_specific_init)(mv64x60_handle_t *bh,
+ mv64x60_setup_info_t *si);
+
+ mv64x60_32bit_window_t *window_tab_32bit;
+ mv64x60_64bit_window_t *window_tab_64bit;
+} mv64x60_chip_info_t;
+
+struct mv64x60_handle {
+ u32 type; /* type of bridge */
+ u32 v_base; /* virtual base addr of bridge regs */
+ u32 p_base; /* physical base addr of bridge regs */
+ u32 base_irq; /* Base irq # for intrs on this intr cltr */
+
+ u32 io_base_a; /* vaddr of pci 0's I/O space */
+ u32 io_base_b; /* vaddr of pci 1's I/O space */
+
+ struct pci_controller *hose_a;
+ struct pci_controller *hose_b;
+
+ mv64x60_chip_info_t *ci; /* chip/bridge-specific info */
+};
+
+
+/* Define I/O routines for accessing registers on the 64x60 bridge. */
+extern inline void
+mv64x60_write(mv64x60_handle_t *bh, u32 offset, u32 val) {
+ out_le32((volatile u32 *)(bh->v_base + offset), val);
+}
+
+extern inline u32
+mv64x60_read(mv64x60_handle_t *bh, u32 offset) {
+ return in_le32((volatile u32 *)(bh->v_base + offset));
+}
+
+extern inline void
+mv64x60_modify(mv64x60_handle_t *bh, u32 offs, u32 data, u32 mask)
+{
+ uint32_t reg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mv64x60_rmw_lock, flags);
+ reg = mv64x60_read(bh, offs) & (~mask); /* zero any bits we care about*/
+ reg |= data & mask; /* set bits from the data */
+ mv64x60_write(bh, offs, reg);
+ spin_unlock_irqrestore(&mv64x60_rmw_lock, flags);
+}
+
+#define mv64x60_set_bits(bh, offs, bits) mv64x60_modify(bh, offs, ~0, bits)
+#define mv64x60_clr_bits(bh, offs, bits) mv64x60_modify(bh, offs, 0, bits)
+
+
+/* Externally visible function prototypes */
+int mv64x60_init(mv64x60_handle_t *bh, mv64x60_setup_info_t *si);
+u32 mv64x60_get_mem_size(u32 bridge_base, u32 chip_type);
+void mv64x60_get_32bit_window(mv64x60_handle_t *bh, u32 window,
+ u32 *base, u32 *size);
+void mv64x60_set_32bit_window(mv64x60_handle_t *bh, u32 window, u32 base,
+ u32 size, u32 other_bits);
+void mv64x60_get_64bit_window(mv64x60_handle_t *bh, u32 window, u32 *base_hi,
+ u32 *base_lo, u32 *size);
+void mv64x60_set_64bit_window(mv64x60_handle_t *bh, u32 window, u32 base_hi,
+ u32 base_lo, u32 size, u32 other_bits);
+
+
+void gt64260_init_irq(void);
+int gt64260_get_irq(struct pt_regs *regs);
+
+/*
+ * OCP Related Definitions
+ */
+typedef struct {
+ u8 mirror_regs;
+ u8 cache_mgmt;
+ u8 max_idle;
+ int default_baud;
+ int default_bits;
+ int default_parity;
+ int default_flow;
+ u32 chr_1_val;
+ u32 chr_2_val;
+ u32 chr_10_val;
+ u32 mpcr_val;
+ u32 mrr_val;
+ u32 rcrr_val;
+ u32 tcrr_val;
+ u32 intr_mask_val;
+ u32 bcr_val;
+ u32 sdma_irq;
+ u8 brg_can_tune;
+ u8 brg_clk_src;
+ u32 brg_clk_freq;
+} mv64x60_ocp_mpsc_data_t;
+
+#define MV64x60_OCP_SYSFS_MPSC_DATA() \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, mirror_regs) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, cache_mgmt) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, max_idle) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, default_baud) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, default_bits) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%c\n", mpsc, default_parity) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%c\n", mpsc, default_flow) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, chr_1_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, chr_2_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, chr_10_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, mpcr_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, mrr_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, rcrr_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, tcrr_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, intr_mask_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "0x%x\n", mpsc, bcr_val) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, sdma_irq) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, brg_can_tune) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, brg_clk_src) \
+OCP_SYSFS_ADDTL(mv64x60_ocp_mpsc_data_t, "%d\n", mpsc, brg_clk_freq) \
+ \
+void \
+mv64x60_ocp_show_mpsc(struct device *dev) \
+{ \
+ device_create_file(dev, &dev_attr_mpsc_mirror_regs); \
+ device_create_file(dev, &dev_attr_mpsc_cache_mgmt); \
+ device_create_file(dev, &dev_attr_mpsc_max_idle); \
+ device_create_file(dev, &dev_attr_mpsc_default_baud); \
+ device_create_file(dev, &dev_attr_mpsc_default_bits); \
+ device_create_file(dev, &dev_attr_mpsc_default_parity); \
+ device_create_file(dev, &dev_attr_mpsc_default_flow); \
+ device_create_file(dev, &dev_attr_mpsc_chr_1_val); \
+ device_create_file(dev, &dev_attr_mpsc_chr_2_val); \
+ device_create_file(dev, &dev_attr_mpsc_chr_10_val); \
+ device_create_file(dev, &dev_attr_mpsc_mpcr_val); \
+ device_create_file(dev, &dev_attr_mpsc_mrr_val); \
+ device_create_file(dev, &dev_attr_mpsc_rcrr_val); \
+ device_create_file(dev, &dev_attr_mpsc_tcrr_val); \
+ device_create_file(dev, &dev_attr_mpsc_intr_mask_val); \
+ device_create_file(dev, &dev_attr_mpsc_bcr_val); \
+ device_create_file(dev, &dev_attr_mpsc_sdma_irq); \
+ device_create_file(dev, &dev_attr_mpsc_brg_can_tune); \
+ device_create_file(dev, &dev_attr_mpsc_brg_clk_src); \
+ device_create_file(dev, &dev_attr_mpsc_brg_clk_freq); \
+}
+
+#endif /* __ASMPPC_MV64x60_H */
--- /dev/null
+/*
+ * include/asm-ppc/gt64260_defs.h
+ *
+ * Register definitions for the Marvell/Galileo GT64260, MV64360, etc.
+ * host bridges.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2001-2002 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef __ASMPPC_MV64x60_DEFS_H
+#define __ASMPPC_MV64x60_DEFS_H
+
+/*
+ * Define the Marvell bridges that are supported
+ */
+#define MV64x60_TYPE_INVALID 0
+#define MV64x60_TYPE_GT64260A 1
+#define MV64x60_TYPE_GT64260B 2
+#define MV64x60_TYPE_MV64360 3
+#define MV64x60_TYPE_MV64361 4
+#define MV64x60_TYPE_MV64362 5
+#define MV64x60_TYPE_MV64460 6
+
+
+/* Revisions of each supported chip */
+#define GT64260_REV_A 0x10
+#define GT64260_REV_B 0x20
+#define MV64360 0x01
+#define MV64460 0x01
+
+/* Minimum window size supported by 64260 is 1MB */
+#define GT64260_WINDOW_SIZE_MIN 0x00100000
+#define MV64360_WINDOW_SIZE_MIN 0x00010000
+
+/* IRQ's for embedded controllers */
+#define MV64x60_IRQ_DEV 1
+#define MV64x60_IRQ_CPU_ERR 3
+#define MV64x60_IRQ_TIMER_0_1 8
+#define MV64x60_IRQ_TIMER_2_3 9
+#define MV64x60_IRQ_TIMER_4_5 10
+#define MV64x60_IRQ_TIMER_6_7 11
+#define MV64x60_IRQ_ETH_0 32
+#define MV64x60_IRQ_ETH_1 33
+#define MV64x60_IRQ_ETH_2 34
+#define MV64x60_IRQ_SDMA_0 36
+#define MV64x60_IRQ_I2C 37
+#define MV64x60_IRQ_SDMA_1 38
+#define MV64x60_IRQ_BRG 39
+#define MV64x60_IRQ_MPSC_0 40
+#define MV64x60_IRQ_MPSC_1 42
+#define MV64x60_IRQ_COMM 43
+
+#define MV64360_IRQ_PCI0 12
+#define MV64360_IRQ_SRAM_PAR_ERR 13
+#define MV64360_IRQ_PCI1 16
+
+/* Offsets for register blocks */
+#define MV64x60_MPSC_0_OFFSET 0x8000
+#define MV64x60_MPSC_1_OFFSET 0x9000
+#define MV64x60_MPSC_ROUTING_OFFSET 0xb400
+#define MV64x60_SDMA_0_OFFSET 0x4000
+#define MV64x60_SDMA_1_OFFSET 0x6000
+#define MV64x60_SDMA_INTR_OFFSET 0xb800
+#define MV64x60_BRG_0_OFFSET 0xb200
+#define MV64x60_BRG_1_OFFSET 0xb208
+
+/*
+ *****************************************************************************
+ *
+ * CPU Interface Registers
+ *
+ *****************************************************************************
+ */
+
+/* CPU physical address of 64260's registers */
+#define MV64x60_INTERNAL_SPACE_DECODE 0x0068
+#define MV64x60_INTERNAL_SPACE_SIZE 0x10000
+#define MV64x60_INTERNAL_SPACE_DEFAULT_ADDR 0x14000000
+
+#define MV64360_CPU_BAR_ENABLE 0x0278
+
+/* CPU Memory Controller Window Registers (4 windows) */
+#define MV64x60_CPU2MEM_WINDOWS 4
+
+#define MV64x60_CPU2MEM_0_BASE 0x0008
+#define MV64x60_CPU2MEM_0_SIZE 0x0010
+#define MV64x60_CPU2MEM_1_BASE 0x0208
+#define MV64x60_CPU2MEM_1_SIZE 0x0210
+#define MV64x60_CPU2MEM_2_BASE 0x0018
+#define MV64x60_CPU2MEM_2_SIZE 0x0020
+#define MV64x60_CPU2MEM_3_BASE 0x0218
+#define MV64x60_CPU2MEM_3_SIZE 0x0220
+
+/* CPU Device Controller Window Registers (4 windows) */
+#define MV64x60_CPU2DEV_CS_WINDOWS 4
+
+#define MV64x60_CPU2DEV_0_BASE 0x0028
+#define MV64x60_CPU2DEV_0_SIZE 0x0030
+#define MV64x60_CPU2DEV_1_BASE 0x0228
+#define MV64x60_CPU2DEV_1_SIZE 0x0230
+#define MV64x60_CPU2DEV_2_BASE 0x0248
+#define MV64x60_CPU2DEV_2_SIZE 0x0250
+#define MV64x60_CPU2DEV_3_BASE 0x0038
+#define MV64x60_CPU2DEV_3_SIZE 0x0040
+
+#define MV64x60_CPU2BOOT_0_BASE 0x0238
+#define MV64x60_CPU2BOOT_0_SIZE 0x0240
+
+/* CPU Windows to PCI space (2 PCI buses each w/ 1 I/O & 4 MEM windows) */
+#define MV64x60_PCI_BUSES 2
+#define MV64x60_PCI_IO_WINDOWS_PER_BUS 1
+#define MV64x60_PCI_MEM_WINDOWS_PER_BUS 4
+
+#define MV64x60_CPU2PCI_SWAP_BYTE 0x00000000
+#define MV64x60_CPU2PCI_SWAP_NONE 0x01000000
+#define MV64x60_CPU2PCI_SWAP_BYTE_WORD 0x02000000
+#define MV64x60_CPU2PCI_SWAP_WORD 0x03000000
+
+#define MV64x60_CPU2PCI_MEM_REQ64 (1<<27)
+
+#define MV64x60_CPU2PCI0_IO_BASE 0x0048
+#define MV64x60_CPU2PCI0_IO_SIZE 0x0050
+#define MV64x60_CPU2PCI0_MEM_0_BASE 0x0058
+#define MV64x60_CPU2PCI0_MEM_0_SIZE 0x0060
+#define MV64x60_CPU2PCI0_MEM_1_BASE 0x0080
+#define MV64x60_CPU2PCI0_MEM_1_SIZE 0x0088
+#define MV64x60_CPU2PCI0_MEM_2_BASE 0x0258
+#define MV64x60_CPU2PCI0_MEM_2_SIZE 0x0260
+#define MV64x60_CPU2PCI0_MEM_3_BASE 0x0280
+#define MV64x60_CPU2PCI0_MEM_3_SIZE 0x0288
+
+#define MV64x60_CPU2PCI0_IO_REMAP 0x00f0
+#define MV64x60_CPU2PCI0_MEM_0_REMAP_LO 0x00f8
+#define MV64x60_CPU2PCI0_MEM_0_REMAP_HI 0x0320
+#define MV64x60_CPU2PCI0_MEM_1_REMAP_LO 0x0100
+#define MV64x60_CPU2PCI0_MEM_1_REMAP_HI 0x0328
+#define MV64x60_CPU2PCI0_MEM_2_REMAP_LO 0x02f8
+#define MV64x60_CPU2PCI0_MEM_2_REMAP_HI 0x0330
+#define MV64x60_CPU2PCI0_MEM_3_REMAP_LO 0x0300
+#define MV64x60_CPU2PCI0_MEM_3_REMAP_HI 0x0338
+
+#define MV64x60_CPU2PCI1_IO_BASE 0x0090
+#define MV64x60_CPU2PCI1_IO_SIZE 0x0098
+#define MV64x60_CPU2PCI1_MEM_0_BASE 0x00a0
+#define MV64x60_CPU2PCI1_MEM_0_SIZE 0x00a8
+#define MV64x60_CPU2PCI1_MEM_1_BASE 0x00b0
+#define MV64x60_CPU2PCI1_MEM_1_SIZE 0x00b8
+#define MV64x60_CPU2PCI1_MEM_2_BASE 0x02a0
+#define MV64x60_CPU2PCI1_MEM_2_SIZE 0x02a8
+#define MV64x60_CPU2PCI1_MEM_3_BASE 0x02b0
+#define MV64x60_CPU2PCI1_MEM_3_SIZE 0x02b8
+
+#define MV64360_CPU2SRAM_BASE 0x0268
+
+#define MV64x60_CPU2PCI1_IO_REMAP 0x0108
+#define MV64x60_CPU2PCI1_MEM_0_REMAP_LO 0x0110
+#define MV64x60_CPU2PCI1_MEM_0_REMAP_HI 0x0340
+#define MV64x60_CPU2PCI1_MEM_1_REMAP_LO 0x0118
+#define MV64x60_CPU2PCI1_MEM_1_REMAP_HI 0x0348
+#define MV64x60_CPU2PCI1_MEM_2_REMAP_LO 0x0310
+#define MV64x60_CPU2PCI1_MEM_2_REMAP_HI 0x0350
+#define MV64x60_CPU2PCI1_MEM_3_REMAP_LO 0x0318
+#define MV64x60_CPU2PCI1_MEM_3_REMAP_HI 0x0358
+
+/* CPU Control Registers */
+#define MV64x60_CPU_CONFIG 0x0000
+#define MV64x60_CPU_MODE 0x0120
+#define MV64x60_CPU_MASTER_CNTL 0x0160
+#define MV64x60_CPU_XBAR_CNTL_LO 0x0150
+#define MV64x60_CPU_XBAR_CNTL_HI 0x0158
+#define MV64x60_CPU_XBAR_TO 0x0168
+
+#define GT64260_CPU_RR_XBAR_CNTL_LO 0x0170
+#define GT64260_CPU_RR_XBAR_CNTL_HI 0x0178
+
+#define MV64360_CPU_PADS_CALIBRATION 0x03b4
+#define MV64360_CPU_RESET_SAMPLE_LO 0x03c4
+#define MV64360_CPU_RESET_SAMPLE_HI 0x03d4
+
+/* SMP Register Map */
+#define MV64360_WHO_AM_I 0x0200
+#define MV64360_CPU0_DOORBELL 0x0214
+#define MV64360_CPU0_DOORBELL_CLR 0x021c
+#define MV64360_CPU0_DOORBELL_MASK 0x0234
+#define MV64360_CPU1_DOORBELL 0x0224
+#define MV64360_CPU1_DOORBELL_CLR 0x022c
+#define MV64360_CPU1_DOORBELL_MASK 0x023c
+#define MV64360_CPUx_DOORBELL(x) (0x0214 + ((x)*0x10))
+#define MV64360_CPUx_DOORBELL_CLR(x) (0x021c + ((x)*0x10))
+#define MV64360_CPUx_DOORBELL_MASK(x) (0x0234 + ((x)*0x08))
+#define MV64360_SEMAPHORE_0 0x0244
+#define MV64360_SEMAPHORE_1 0x024c
+#define MV64360_SEMAPHORE_2 0x0254
+#define MV64360_SEMAPHORE_3 0x025c
+#define MV64360_SEMAPHORE_4 0x0264
+#define MV64360_SEMAPHORE_5 0x026c
+#define MV64360_SEMAPHORE_6 0x0274
+#define MV64360_SEMAPHORE_7 0x027c
+
+/* CPU Sync Barrier Registers */
+#define GT64260_CPU_SYNC_BARRIER_PCI0 0x00c0
+#define GT64260_CPU_SYNC_BARRIER_PCI1 0x00c8
+
+#define MV64360_CPU0_SYNC_BARRIER_TRIG 0x00c0
+#define MV64360_CPU0_SYNC_BARRIER_VIRT 0x00c8
+#define MV64360_CPU1_SYNC_BARRIER_TRIG 0x00d0
+#define MV64360_CPU1_SYNC_BARRIER_VIRT 0x00d8
+
+/* CPU Deadlock and Ordering registers (Rev B part only) */
+#define GT64260_CPU_DEADLOCK_ORDERING 0x02d0
+#define GT64260_CPU_WB_PRIORITY_BUFFER_DEPTH 0x02d8
+#define GT64260_CPU_COUNTERS_SYNC_BARRIER_ATTRIBUTE 0x02e0
+
+/* CPU Access Protection Registers (gt64260 realy has 8 but don't need) */
+#define MV64x260_CPU_PROT_WINDOWS 4
+
+#define GT64260_CPU_PROT_ACCPROTECT (1<<16)
+#define GT64260_CPU_PROT_WRPROTECT (1<<17)
+#define GT64260_CPU_PROT_CACHEPROTECT (1<<18)
+
+#define MV64360_CPU_PROT_ACCPROTECT (1<<20)
+#define MV64360_CPU_PROT_WRPROTECT (1<<21)
+#define MV64360_CPU_PROT_CACHEPROTECT (1<<22)
+#define MV64360_CPU_PROT_WIN_ENABLE (1<<31)
+
+#define MV64x60_CPU_PROT_BASE_0 0x0180
+#define MV64x60_CPU_PROT_SIZE_0 0x0188
+#define MV64x60_CPU_PROT_BASE_1 0x0190
+#define MV64x60_CPU_PROT_SIZE_1 0x0198
+#define MV64x60_CPU_PROT_BASE_2 0x01a0
+#define MV64x60_CPU_PROT_SIZE_2 0x01a8
+#define MV64x60_CPU_PROT_BASE_3 0x01b0
+#define MV64x60_CPU_PROT_SIZE_3 0x01b8
+
+#define GT64260_CPU_PROT_BASE_4 0x01c0
+#define GT64260_CPU_PROT_SIZE_4 0x01c8
+#define GT64260_CPU_PROT_BASE_5 0x01d0
+#define GT64260_CPU_PROT_SIZE_5 0x01d8
+#define GT64260_CPU_PROT_BASE_6 0x01e0
+#define GT64260_CPU_PROT_SIZE_6 0x01e8
+#define GT64260_CPU_PROT_BASE_7 0x01f0
+#define GT64260_CPU_PROT_SIZE_7 0x01f8
+
+/* CPU Snoop Control Registers (64260 only) */
+#define GT64260_CPU_SNOOP_WINDOWS 4
+
+#define GT64260_CPU_SNOOP_NONE 0x00000000
+#define GT64260_CPU_SNOOP_WT 0x00010000
+#define GT64260_CPU_SNOOP_WB 0x00020000
+#define GT64260_CPU_SNOOP_MASK 0x00030000
+#define GT64260_CPU_SNOOP_ALL_BITS GT64260_CPU_SNOOP_MASK
+
+#define GT64260_CPU_SNOOP_BASE_0 0x0380
+#define GT64260_CPU_SNOOP_SIZE_0 0x0388
+#define GT64260_CPU_SNOOP_BASE_1 0x0390
+#define GT64260_CPU_SNOOP_SIZE_1 0x0398
+#define GT64260_CPU_SNOOP_BASE_2 0x03a0
+#define GT64260_CPU_SNOOP_SIZE_2 0x03a8
+#define GT64260_CPU_SNOOP_BASE_3 0x03b0
+#define GT64260_CPU_SNOOP_SIZE_3 0x03b8
+
+/* CPU Error Report Registers */
+#define MV64x60_CPU_ERR_ADDR_LO 0x0070
+#define MV64x60_CPU_ERR_ADDR_HI 0x0078
+#define MV64x60_CPU_ERR_DATA_LO 0x0128
+#define MV64x60_CPU_ERR_DATA_HI 0x0130
+#define MV64x60_CPU_ERR_PARITY 0x0138
+#define MV64x60_CPU_ERR_CAUSE 0x0140
+#define MV64x60_CPU_ERR_MASK 0x0148
+
+/*
+ *****************************************************************************
+ *
+ * SRAM Cotnroller Registers
+ *
+ *****************************************************************************
+ */
+
+#define MV64360_SRAM_CONFIG 0x0380
+#define MV64360_SRAM_TEST_MODE 0x03f4
+#define MV64360_SRAM_ERR_CAUSE 0x0388
+#define MV64360_SRAM_ERR_ADDR_LO 0x0390
+#define MV64360_SRAM_ERR_ADDR_HI 0x03f8
+#define MV64360_SRAM_ERR_DATA_LO 0x0398
+#define MV64360_SRAM_ERR_DATA_HI 0x03a0
+#define MV64360_SRAM_ERR_PARITY 0x03a8
+
+
+/*
+ *****************************************************************************
+ *
+ * SDRAM Cotnroller Registers
+ *
+ *****************************************************************************
+ */
+
+/* SDRAM Config Registers (64260) */
+#define GT64260_SDRAM_CONFIG 0x0448
+
+/* SDRAM Error Report Registers (64260) */
+#define GT64260_SDRAM_ERR_DATA_LO 0x0484
+#define GT64260_SDRAM_ERR_DATA_HI 0x0480
+#define GT64260_SDRAM_ERR_ADDR 0x0490
+#define GT64260_SDRAM_ERR_ECC_RCVD 0x0488
+#define GT64260_SDRAM_ERR_ECC_CALC 0x048c
+#define GT64260_SDRAM_ERR_ECC_CNTL 0x0494
+#define GT64260_SDRAM_ERR_ECC_ERR_CNT 0x0498
+
+/* SDRAM Config Registers (64360) */
+#define MV64360_SDRAM_CONFIG 0x1400
+
+/* SDRAM Error Report Registers (64360) */
+#define MV64360_SDRAM_ERR_DATA_LO 0x1444
+#define MV64360_SDRAM_ERR_DATA_HI 0x1440
+#define MV64360_SDRAM_ERR_ADDR 0x1450
+#define MV64360_SDRAM_ERR_ECC_RCVD 0x1448
+#define MV64360_SDRAM_ERR_ECC_CALC 0x144c
+#define MV64360_SDRAM_ERR_ECC_CNTL 0x1454
+#define MV64360_SDRAM_ERR_ECC_ERR_CNT 0x1458
+
+
+/*
+ *****************************************************************************
+ *
+ * Device/BOOT Cotnroller Registers
+ *
+ *****************************************************************************
+ */
+
+/* Device Control Registers */
+#define MV64x60_DEV_BANK_PARAMS_0 0x045c
+#define MV64x60_DEV_BANK_PARAMS_1 0x0460
+#define MV64x60_DEV_BANK_PARAMS_2 0x0464
+#define MV64x60_DEV_BANK_PARAMS_3 0x0468
+#define MV64x60_DEV_BOOT_PARAMS 0x046c
+#define MV64x60_DEV_IF_CNTL 0x04c0
+#define MV64x60_DEV_IF_XBAR_CNTL_LO 0x04c8
+#define MV64x60_DEV_IF_XBAR_CNTL_HI 0x04cc
+#define MV64x60_DEV_IF_XBAR_CNTL_TO 0x04c4
+
+/* Device Interrupt Registers */
+#define MV64x60_DEV_INTR_CAUSE 0x04d0
+#define MV64x60_DEV_INTR_MASK 0x04d4
+#define MV64x60_DEV_INTR_ERR_ADDR 0x04d8
+
+#define MV64360_DEV_INTR_ERR_DATA 0x04dc
+#define MV64360_DEV_INTR_ERR_PAR 0x04e0
+
+
+/*
+ *****************************************************************************
+ *
+ * PCI Bridge Interface Registers
+ *
+ *****************************************************************************
+ */
+
+/* PCI Configuration Access Registers */
+#define MV64x60_PCI0_CONFIG_ADDR 0x0cf8
+#define MV64x60_PCI0_CONFIG_DATA 0x0cfc
+#define MV64x60_PCI0_IACK 0x0c34
+
+#define MV64x60_PCI1_CONFIG_ADDR 0x0c78
+#define MV64x60_PCI1_CONFIG_DATA 0x0c7c
+#define MV64x60_PCI1_IACK 0x0cb4
+
+/* PCI Control Registers */
+#define MV64x60_PCI0_CMD 0x0c00
+#define MV64x60_PCI0_MODE 0x0d00
+#define MV64x60_PCI0_TO_RETRY 0x0c04
+#define MV64x60_PCI0_RD_BUF_DISCARD_TIMER 0x0d04
+#define MV64x60_PCI0_MSI_TRIGGER_TIMER 0x0c38
+#define MV64x60_PCI0_ARBITER_CNTL 0x1d00
+#define MV64x60_PCI0_XBAR_CNTL_LO 0x1d08
+#define MV64x60_PCI0_XBAR_CNTL_HI 0x1d0c
+#define MV64x60_PCI0_XBAR_CNTL_TO 0x1d04
+#define MV64x60_PCI0_RD_RESP_XBAR_CNTL_LO 0x1d18
+#define MV64x60_PCI0_RD_RESP_XBAR_CNTL_HI 0x1d1c
+#define MV64x60_PCI0_SYNC_BARRIER 0x1d10
+#define MV64x60_PCI0_P2P_CONFIG 0x1d14
+#define MV64x60_PCI0_INTR_MASK
+
+#define GT64260_PCI0_P2P_SWAP_CNTL 0x1d54
+
+#define MV64x60_PCI1_CMD 0x0c80
+#define MV64x60_PCI1_MODE 0x0d80
+#define MV64x60_PCI1_TO_RETRY 0x0c84
+#define MV64x60_PCI1_RD_BUF_DISCARD_TIMER 0x0d84
+#define MV64x60_PCI1_MSI_TRIGGER_TIMER 0x0cb8
+#define MV64x60_PCI1_ARBITER_CNTL 0x1d80
+#define MV64x60_PCI1_XBAR_CNTL_LO 0x1d88
+#define MV64x60_PCI1_XBAR_CNTL_HI 0x1d8c
+#define MV64x60_PCI1_XBAR_CNTL_TO 0x1d84
+#define MV64x60_PCI1_RD_RESP_XBAR_CNTL_LO 0x1d98
+#define MV64x60_PCI1_RD_RESP_XBAR_CNTL_HI 0x1d9c
+#define MV64x60_PCI1_SYNC_BARRIER 0x1d90
+#define MV64x60_PCI1_P2P_CONFIG 0x1d94
+
+#define GT64260_PCI1_P2P_SWAP_CNTL 0x1dd4
+
+/* PCI Access Control Regions Registers */
+#define GT64260_PCI_ACC_CNTL_PREFETCHEN (1<<12)
+#define GT64260_PCI_ACC_CNTL_DREADEN (1<<13)
+#define GT64260_PCI_ACC_CNTL_RDPREFETCH (1<<16)
+#define GT64260_PCI_ACC_CNTL_RDLINEPREFETCH (1<<17)
+#define GT64260_PCI_ACC_CNTL_RDMULPREFETCH (1<<18)
+#define GT64260_PCI_ACC_CNTL_MBURST_32_BTYES 0x00000000
+#define GT64260_PCI_ACC_CNTL_MBURST_64_BYTES 0x00100000
+#define GT64260_PCI_ACC_CNTL_MBURST_128_BYTES 0x00200000
+#define GT64260_PCI_ACC_CNTL_MBURST_MASK 0x00300000
+#define GT64260_PCI_ACC_CNTL_SWAP_BYTE 0x00000000
+#define GT64260_PCI_ACC_CNTL_SWAP_NONE 0x01000000
+#define GT64260_PCI_ACC_CNTL_SWAP_BYTE_WORD 0x02000000
+#define GT64260_PCI_ACC_CNTL_SWAP_WORD 0x03000000
+#define GT64260_PCI_ACC_CNTL_SWAP_MASK 0x03000000
+#define GT64260_PCI_ACC_CNTL_ACCPROT (1<<28)
+#define GT64260_PCI_ACC_CNTL_WRPROT (1<<29)
+
+#define GT64260_PCI_ACC_CNTL_ALL_BITS (GT64260_PCI_ACC_CNTL_PREFETCHEN | \
+ GT64260_PCI_ACC_CNTL_DREADEN | \
+ GT64260_PCI_ACC_CNTL_RDPREFETCH | \
+ GT64260_PCI_ACC_CNTL_RDLINEPREFETCH |\
+ GT64260_PCI_ACC_CNTL_RDMULPREFETCH | \
+ GT64260_PCI_ACC_CNTL_MBURST_MASK | \
+ GT64260_PCI_ACC_CNTL_SWAP_MASK | \
+ GT64260_PCI_ACC_CNTL_ACCPROT| \
+ GT64260_PCI_ACC_CNTL_WRPROT)
+
+#define MV64360_PCI_ACC_CNTL_ENABLE (1<<0)
+#define MV64360_PCI_ACC_CNTL_REQ64 (1<<1)
+#define MV64360_PCI_ACC_CNTL_SNOOP_NONE 0x00000000
+#define MV64360_PCI_ACC_CNTL_SNOOP_WT 0x00000004
+#define MV64360_PCI_ACC_CNTL_SNOOP_WB 0x00000008
+#define MV64360_PCI_ACC_CNTL_SNOOP_MASK 0x0000000c
+#define MV64360_PCI_ACC_CNTL_ACCPROT (1<<4)
+#define MV64360_PCI_ACC_CNTL_WRPROT (1<<5)
+#define MV64360_PCI_ACC_CNTL_SWAP_BYTE 0x00000000
+#define MV64360_PCI_ACC_CNTL_SWAP_NONE 0x00000040
+#define MV64360_PCI_ACC_CNTL_SWAP_BYTE_WORD 0x00000080
+#define MV64360_PCI_ACC_CNTL_SWAP_WORD 0x000000c0
+#define MV64360_PCI_ACC_CNTL_SWAP_MASK 0x000000c0
+#define MV64360_PCI_ACC_CNTL_MBURST_32_BYTES 0x00000000
+#define MV64360_PCI_ACC_CNTL_MBURST_64_BYTES 0x00000100
+#define MV64360_PCI_ACC_CNTL_MBURST_128_BYTES 0x00000200
+#define MV64360_PCI_ACC_CNTL_MBURST_MASK 0x00000300
+#define MV64360_PCI_ACC_CNTL_RDSIZE_32_BYTES 0x00000000
+#define MV64360_PCI_ACC_CNTL_RDSIZE_64_BYTES 0x00000400
+#define MV64360_PCI_ACC_CNTL_RDSIZE_128_BYTES 0x00000800
+#define MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES 0x00000c00
+#define MV64360_PCI_ACC_CNTL_RDSIZE_MASK 0x00000c00
+
+#define MV64360_PCI_ACC_CNTL_ALL_BITS (MV64360_PCI_ACC_CNTL_ENABLE | \
+ MV64360_PCI_ACC_CNTL_REQ64 | \
+ MV64360_PCI_ACC_CNTL_SNOOP_MASK | \
+ MV64360_PCI_ACC_CNTL_ACCPROT | \
+ MV64360_PCI_ACC_CNTL_WRPROT | \
+ MV64360_PCI_ACC_CNTL_SWAP_MASK | \
+ MV64360_PCI_ACC_CNTL_MBURST_MASK | \
+ MV64360_PCI_ACC_CNTL_RDSIZE_MASK)
+
+#define MV64x60_PCI0_ACC_CNTL_0_BASE_LO 0x1e00
+#define MV64x60_PCI0_ACC_CNTL_0_BASE_HI 0x1e04
+#define MV64x60_PCI0_ACC_CNTL_0_SIZE 0x1e08
+#define MV64x60_PCI0_ACC_CNTL_1_BASE_LO 0x1e10
+#define MV64x60_PCI0_ACC_CNTL_1_BASE_HI 0x1e14
+#define MV64x60_PCI0_ACC_CNTL_1_SIZE 0x1e18
+#define MV64x60_PCI0_ACC_CNTL_2_BASE_LO 0x1e20
+#define MV64x60_PCI0_ACC_CNTL_2_BASE_HI 0x1e24
+#define MV64x60_PCI0_ACC_CNTL_2_SIZE 0x1e28
+#define MV64x60_PCI0_ACC_CNTL_3_BASE_LO 0x1e30
+#define MV64x60_PCI0_ACC_CNTL_3_BASE_HI 0x1e34
+#define MV64x60_PCI0_ACC_CNTL_3_SIZE 0x1e38
+#define MV64x60_PCI0_ACC_CNTL_4_BASE_LO 0x1e40
+#define MV64x60_PCI0_ACC_CNTL_4_BASE_HI 0x1e44
+#define MV64x60_PCI0_ACC_CNTL_4_SIZE 0x1e48
+#define MV64x60_PCI0_ACC_CNTL_5_BASE_LO 0x1e50
+#define MV64x60_PCI0_ACC_CNTL_5_BASE_HI 0x1e54
+#define MV64x60_PCI0_ACC_CNTL_5_SIZE 0x1e58
+
+#define GT64260_PCI0_ACC_CNTL_6_BASE_LO 0x1e60
+#define GT64260_PCI0_ACC_CNTL_6_BASE_HI 0x1e64
+#define GT64260_PCI0_ACC_CNTL_6_SIZE 0x1e68
+#define GT64260_PCI0_ACC_CNTL_7_BASE_LO 0x1e70
+#define GT64260_PCI0_ACC_CNTL_7_BASE_HI 0x1e74
+#define GT64260_PCI0_ACC_CNTL_7_SIZE 0x1e78
+
+#define MV64x60_PCI1_ACC_CNTL_0_BASE_LO 0x1e80
+#define MV64x60_PCI1_ACC_CNTL_0_BASE_HI 0x1e84
+#define MV64x60_PCI1_ACC_CNTL_0_SIZE 0x1e88
+#define MV64x60_PCI1_ACC_CNTL_1_BASE_LO 0x1e90
+#define MV64x60_PCI1_ACC_CNTL_1_BASE_HI 0x1e94
+#define MV64x60_PCI1_ACC_CNTL_1_SIZE 0x1e98
+#define MV64x60_PCI1_ACC_CNTL_2_BASE_LO 0x1ea0
+#define MV64x60_PCI1_ACC_CNTL_2_BASE_HI 0x1ea4
+#define MV64x60_PCI1_ACC_CNTL_2_SIZE 0x1ea8
+#define MV64x60_PCI1_ACC_CNTL_3_BASE_LO 0x1eb0
+#define MV64x60_PCI1_ACC_CNTL_3_BASE_HI 0x1eb4
+#define MV64x60_PCI1_ACC_CNTL_3_SIZE 0x1eb8
+#define MV64x60_PCI1_ACC_CNTL_4_BASE_LO 0x1ec0
+#define MV64x60_PCI1_ACC_CNTL_4_BASE_HI 0x1ec4
+#define MV64x60_PCI1_ACC_CNTL_4_SIZE 0x1ec8
+#define MV64x60_PCI1_ACC_CNTL_5_BASE_LO 0x1ed0
+#define MV64x60_PCI1_ACC_CNTL_5_BASE_HI 0x1ed4
+#define MV64x60_PCI1_ACC_CNTL_5_SIZE 0x1ed8
+
+#define GT64260_PCI1_ACC_CNTL_6_BASE_LO 0x1ee0
+#define GT64260_PCI1_ACC_CNTL_6_BASE_HI 0x1ee4
+#define GT64260_PCI1_ACC_CNTL_6_SIZE 0x1ee8
+#define GT64260_PCI1_ACC_CNTL_7_BASE_LO 0x1ef0
+#define GT64260_PCI1_ACC_CNTL_7_BASE_HI 0x1ef4
+#define GT64260_PCI1_ACC_CNTL_7_SIZE 0x1ef8
+
+/* PCI Snoop Control Registers (64260 only) */
+#define GT64260_PCI_SNOOP_NONE 0x00000000
+#define GT64260_PCI_SNOOP_WT 0x00001000
+#define GT64260_PCI_SNOOP_WB 0x00002000
+
+#define GT64260_PCI0_SNOOP_0_BASE_LO 0x1f00
+#define GT64260_PCI0_SNOOP_0_BASE_HI 0x1f04
+#define GT64260_PCI0_SNOOP_0_SIZE 0x1f08
+#define GT64260_PCI0_SNOOP_1_BASE_LO 0x1f10
+#define GT64260_PCI0_SNOOP_1_BASE_HI 0x1f14
+#define GT64260_PCI0_SNOOP_1_SIZE 0x1f18
+#define GT64260_PCI0_SNOOP_2_BASE_LO 0x1f20
+#define GT64260_PCI0_SNOOP_2_BASE_HI 0x1f24
+#define GT64260_PCI0_SNOOP_2_SIZE 0x1f28
+#define GT64260_PCI0_SNOOP_3_BASE_LO 0x1f30
+#define GT64260_PCI0_SNOOP_3_BASE_HI 0x1f34
+#define GT64260_PCI0_SNOOP_3_SIZE 0x1f38
+
+#define GT64260_PCI1_SNOOP_0_BASE_LO 0x1f80
+#define GT64260_PCI1_SNOOP_0_BASE_HI 0x1f84
+#define GT64260_PCI1_SNOOP_0_SIZE 0x1f88
+#define GT64260_PCI1_SNOOP_1_BASE_LO 0x1f90
+#define GT64260_PCI1_SNOOP_1_BASE_HI 0x1f94
+#define GT64260_PCI1_SNOOP_1_SIZE 0x1f98
+#define GT64260_PCI1_SNOOP_2_BASE_LO 0x1fa0
+#define GT64260_PCI1_SNOOP_2_BASE_HI 0x1fa4
+#define GT64260_PCI1_SNOOP_2_SIZE 0x1fa8
+#define GT64260_PCI1_SNOOP_3_BASE_LO 0x1fb0
+#define GT64260_PCI1_SNOOP_3_BASE_HI 0x1fb4
+#define GT64260_PCI1_SNOOP_3_SIZE 0x1fb8
+
+/* PCI Error Report Registers */
+#define MV64x60_PCI0_ERR_SERR_MASK 0x0c28
+#define MV64x60_PCI0_ERR_ADDR_LO 0x1d40
+#define MV64x60_PCI0_ERR_ADDR_HI 0x1d44
+#define MV64x60_PCI0_ERR_DATA_LO 0x1d48
+#define MV64x60_PCI0_ERR_DATA_HI 0x1d4c
+#define MV64x60_PCI0_ERR_CMD 0x1d50
+#define MV64x60_PCI0_ERR_CAUSE 0x1d58
+#define MV64x60_PCI0_ERR_MASK 0x1d5c
+
+#define MV64x60_PCI1_ERR_SERR_MASK 0x0ca8
+#define MV64x60_PCI1_ERR_ADDR_LO 0x1dc0
+#define MV64x60_PCI1_ERR_ADDR_HI 0x1dc4
+#define MV64x60_PCI1_ERR_DATA_LO 0x1dc8
+#define MV64x60_PCI1_ERR_DATA_HI 0x1dcc
+#define MV64x60_PCI1_ERR_CMD 0x1dd0
+#define MV64x60_PCI1_ERR_CAUSE 0x1dd8
+#define MV64x60_PCI1_ERR_MASK 0x1ddc
+
+/* PCI Slave Address Decoding Registers */
+#define MV64x60_PCI0_MEM_0_SIZE 0x0c08
+#define MV64x60_PCI0_MEM_1_SIZE 0x0d08
+#define MV64x60_PCI0_MEM_2_SIZE 0x0c0c
+#define MV64x60_PCI0_MEM_3_SIZE 0x0d0c
+#define MV64x60_PCI1_MEM_0_SIZE 0x0c88
+#define MV64x60_PCI1_MEM_1_SIZE 0x0d88
+#define MV64x60_PCI1_MEM_2_SIZE 0x0c8c
+#define MV64x60_PCI1_MEM_3_SIZE 0x0d8c
+
+#define MV64x60_PCI0_BAR_ENABLE 0x0c3c
+#define MV64x60_PCI1_BAR_ENABLE 0x0cbc
+
+#define MV64x60_PCI0_PCI_DECODE_CNTL 0x0d3c
+
+
+
+
+
+#define MV64x60_PCI0_SLAVE_BAR_REG_ENABLES 0x0c3c
+#define MV64x60_PCI0_SLAVE_MEM_0_REMAP 0x0c48
+#define MV64x60_PCI0_SLAVE_MEM_1_REMAP 0x0d48
+#define MV64x60_PCI0_SLAVE_MEM_2_REMAP 0x0c4c
+#define MV64x60_PCI0_SLAVE_MEM_3_REMAP 0x0d4c
+#define MV64x60_PCI0_SLAVE_CS_0_REMAP 0x0c50
+#define MV64x60_PCI0_SLAVE_CS_1_REMAP 0x0d50
+#define MV64x60_PCI0_SLAVE_CS_2_REMAP 0x0d58
+#define MV64x60_PCI0_SLAVE_CS_3_REMAP 0x0c54
+#define MV64x60_PCI0_SLAVE_BOOT_REMAP 0x0d54
+#define MV64x60_PCI0_SLAVE_P2P_MEM_0_REMAP_LO 0x0d5c
+#define MV64x60_PCI0_SLAVE_P2P_MEM_0_REMAP_HI 0x0d60
+#define MV64x60_PCI0_SLAVE_P2P_MEM_1_REMAP_LO 0x0d64
+#define MV64x60_PCI0_SLAVE_P2P_MEM_1_REMAP_HI 0x0d68
+#define MV64x60_PCI0_SLAVE_P2P_IO_REMAP 0x0d6c
+#define MV64x60_PCI0_SLAVE_CPU_REMAP 0x0d70
+
+#define GT64260_PCI0_SLAVE_DAC_SCS_0_REMAP 0x0f00
+#define GT64260_PCI0_SLAVE_DAC_SCS_1_REMAP 0x0f04
+#define GT64260_PCI0_SLAVE_DAC_SCS_2_REMAP 0x0f08
+#define GT64260_PCI0_SLAVE_DAC_SCS_3_REMAP 0x0f0c
+#define GT64260_PCI0_SLAVE_DAC_CS_0_REMAP 0x0f10
+#define GT64260_PCI0_SLAVE_DAC_CS_1_REMAP 0x0f14
+#define GT64260_PCI0_SLAVE_DAC_CS_2_REMAP 0x0f18
+#define GT64260_PCI0_SLAVE_DAC_CS_3_REMAP 0x0f1c
+#define GT64260_PCI0_SLAVE_DAC_BOOT_REMAP 0x0f20
+#define GT64260_PCI0_SLAVE_DAC_P2P_MEM_0_REMAP_LO 0x0f24
+#define GT64260_PCI0_SLAVE_DAC_P2P_MEM_0_REMAP_HI 0x0f28
+#define GT64260_PCI0_SLAVE_DAC_P2P_MEM_1_REMAP_LO 0x0f2c
+#define GT64260_PCI0_SLAVE_DAC_P2P_MEM_1_REMAP_HI 0x0f30
+#define GT64260_PCI0_SLAVE_DAC_CPU_REMAP 0x0f34
+
+#define GT64260_PCI0_SLAVE_EXP_ROM_REMAP 0x0f38
+#define GT64260_PCI0_SLAVE_PCI_DECODE_CNTL 0x0d3c
+
+
+
+
+
+/* XXXX BEGIN */
+#define MV64x60_PCI1_PCI_DECODE_CNTL 0x0dbc
+
+#define MV64x60_PCI1_SLAVE_MEM_0_SIZE 0x0c88
+#define MV64x60_PCI1_SLAVE_MEM_1_SIZE 0x0d88
+#define MV64x60_PCI1_SLAVE_MEM_2_SIZE 0x0c8c
+#define MV64x60_PCI1_SLAVE_MEM_3_SIZE 0x0d8c
+#define MV64x60_PCI1_SLAVE_CS_0_SIZE 0x0c90
+#define MV64x60_PCI1_SLAVE_CS_1_SIZE 0x0d90
+#define MV64x60_PCI1_SLAVE_CS_2_SIZE 0x0d98
+#define MV64x60_PCI1_SLAVE_CS_3_SIZE 0x0c94
+#define MV64x60_PCI1_SLAVE_BOOT_SIZE 0x0d94
+#define MV64x60_PCI1_SLAVE_P2P_MEM_0_SIZE 0x0d9c
+#define MV64x60_PCI1_SLAVE_P2P_MEM_1_SIZE 0x0da0
+#define MV64x60_PCI1_SLAVE_P2P_IO_SIZE 0x0da4
+#define MV64x60_PCI1_SLAVE_CPU_SIZE 0x0da8
+
+
+/* XXXXX END */
+
+
+#define GT64260_PCI1_SLAVE_DAC_SCS_0_SIZE 0x0e80
+#define GT64260_PCI1_SLAVE_DAC_SCS_1_SIZE 0x0e84
+#define GT64260_PCI1_SLAVE_DAC_SCS_2_SIZE 0x0e88
+#define GT64260_PCI1_SLAVE_DAC_SCS_3_SIZE 0x0e8c
+#define GT64260_PCI1_SLAVE_DAC_CS_0_SIZE 0x0e90
+#define GT64260_PCI1_SLAVE_DAC_CS_1_SIZE 0x0e94
+#define GT64260_PCI1_SLAVE_DAC_CS_2_SIZE 0x0e98
+#define GT64260_PCI1_SLAVE_DAC_CS_3_SIZE 0x0e9c
+#define GT64260_PCI1_SLAVE_DAC_BOOT_SIZE 0x0ea0
+#define GT64260_PCI1_SLAVE_DAC_P2P_MEM_0_SIZE 0x0ea4
+#define GT64260_PCI1_SLAVE_DAC_P2P_MEM_1_SIZE 0x0ea8
+#define GT64260_PCI1_SLAVE_DAC_CPU_SIZE 0x0eac
+
+#define GT64260_PCI1_SLAVE_EXP_ROM_SIZE 0x0dac
+
+
+
+
+/* XXXX BEGIN */
+
+#define MV64x60_PCI1_SLAVE_BAR_REG_ENABLES 0x0cbc
+#define MV64x60_PCI1_SLAVE_MEM_0_REMAP 0x0cc8
+#define MV64x60_PCI1_SLAVE_MEM_1_REMAP 0x0dc8
+#define MV64x60_PCI1_SLAVE_MEM_2_REMAP 0x0ccc
+#define MV64x60_PCI1_SLAVE_MEM_3_REMAP 0x0dcc
+#define MV64x60_PCI1_SLAVE_CS_0_REMAP 0x0cd0
+#define MV64x60_PCI1_SLAVE_CS_1_REMAP 0x0dd0
+#define MV64x60_PCI1_SLAVE_CS_2_REMAP 0x0dd8
+#define MV64x60_PCI1_SLAVE_CS_3_REMAP 0x0cd4
+#define MV64x60_PCI1_SLAVE_BOOT_REMAP 0x0dd4
+#define MV64x60_PCI1_SLAVE_P2P_MEM_0_REMAP_LO 0x0ddc
+#define MV64x60_PCI1_SLAVE_P2P_MEM_0_REMAP_HI 0x0de0
+#define MV64x60_PCI1_SLAVE_P2P_MEM_1_REMAP_LO 0x0de4
+#define MV64x60_PCI1_SLAVE_P2P_MEM_1_REMAP_HI 0x0de8
+#define MV64x60_PCI1_SLAVE_P2P_IO_REMAP 0x0dec
+#define MV64x60_PCI1_SLAVE_CPU_REMAP 0x0df0
+
+/* XXXXX END */
+
+
+
+#define GT64260_PCI1_SLAVE_DAC_SCS_0_REMAP 0x0f80
+#define GT64260_PCI1_SLAVE_DAC_SCS_1_REMAP 0x0f84
+#define GT64260_PCI1_SLAVE_DAC_SCS_2_REMAP 0x0f88
+#define GT64260_PCI1_SLAVE_DAC_SCS_3_REMAP 0x0f8c
+#define GT64260_PCI1_SLAVE_DAC_CS_0_REMAP 0x0f90
+#define GT64260_PCI1_SLAVE_DAC_CS_1_REMAP 0x0f94
+#define GT64260_PCI1_SLAVE_DAC_CS_2_REMAP 0x0f98
+#define GT64260_PCI1_SLAVE_DAC_CS_3_REMAP 0x0f9c
+#define GT64260_PCI1_SLAVE_DAC_BOOT_REMAP 0x0fa0
+#define GT64260_PCI1_SLAVE_DAC_P2P_MEM_0_REMAP_LO 0x0fa4
+#define GT64260_PCI1_SLAVE_DAC_P2P_MEM_0_REMAP_HI 0x0fa8
+#define GT64260_PCI1_SLAVE_DAC_P2P_MEM_1_REMAP_LO 0x0fac
+#define GT64260_PCI1_SLAVE_DAC_P2P_MEM_1_REMAP_HI 0x0fb0
+#define GT64260_PCI1_SLAVE_DAC_CPU_REMAP 0x0fb4
+
+#define GT64260_PCI1_SLAVE_EXP_ROM_REMAP 0x0fb8
+#define GT64260_PCI1_SLAVE_PCI_DECODE_CNTL 0x0dbc
+
+
+/*
+ *****************************************************************************
+ *
+ * Timer/Counter Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define MV64x60_TIMR_CNTR_0 0x0850
+#define MV64x60_TIMR_CNTR_1 0x0854
+#define MV64x60_TIMR_CNTR_2 0x0858
+#define MV64x60_TIMR_CNTR_3 0x085c
+#define MV64x60_TIMR_CNTR_0_3_CNTL 0x0864
+#define MV64x60_TIMR_CNTR_0_3_INTR_CAUSE 0x0868
+#define MV64x60_TIMR_CNTR_0_3_INTR_MASK 0x086c
+
+#define GT64260_TIMR_CNTR_4 0x0950
+#define GT64260_TIMR_CNTR_5 0x0954
+#define GT64260_TIMR_CNTR_6 0x0958
+#define GT64260_TIMR_CNTR_7 0x095c
+#define GT64260_TIMR_CNTR_4_7_CNTL 0x0964
+#define GT64260_TIMR_CNTR_4_7_INTR_CAUSE 0x0968
+#define GT64260_TIMR_CNTR_4_7_INTR_MASK 0x096c
+
+
+/*
+ *****************************************************************************
+ *
+ * Communications Controller (Enet, Serial, etc.) Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define GT64260_COMM_ENET_0_OFFSET 0xf200
+#define GT64260_COMM_ENET_1_OFFSET 0xf220
+#define GT64260_COMM_ENET_2_OFFSET 0xf240
+
+#define GT64260_ENET_CNTL_LO \
+ (0xf200 - GT64260_COMM_ENET_0_BASE)
+#define GT64260_ENET_CNTL_HI \
+ (0xf204 - GT64260_COMM_ENET_0_BASE)
+#define GT64260_ENET_RX_BUF_PCI_ADDR_HI \
+ (0xf208 - GT64260_COMM_ENET_0_BASE)
+#define GT64260_ENET_TX_BUF_PCI_ADDR_HI \
+ (0xf20c - GT64260_COMM_ENET_0_BASE)
+#define GT64260_ENET_RX_DESC_ADDR_HI \
+ (0xf210 - GT64260_COMM_ENET_0_BASE)
+#define GT64260_ENET_TX_DESC_ADDR_HI \
+ (0xf214 - GT64260_COMM_ENET_0_BASE)
+#define GT64260_ENET_HASH_TAB_PCI_ADDR_HI \
+ (0xf218 - GT64260_COMM_ENET_0_BASE)
+
+#define GT64260_COMM_MPSC_0_OFFSET 0xf280
+#define GT64260_COMM_MPSC_1_OFFSET 0xf2c0
+
+#define GT64260_MPSC_CNTL_LO \
+ (0xf280 - GT64260_COMM_MPSC_0_BASE)
+#define GT64260_MPSC_CNTL_HI \
+ (0xf284 - GT64260_COMM_MPSC_0_BASE)
+#define GT64260_MPSC_RX_BUF_PCI_ADDR_HI \
+ (0xf288 - GT64260_COMM_MPSC_0_BASE)
+#define GT64260_MPSC_TX_BUF_PCI_ADDR_HI \
+ (0xf28c - GT64260_COMM_MPSC_0_BASE)
+#define GT64260_MPSC_RX_DESC_ADDR_HI \
+ (0xf290 - GT64260_COMM_MPSC_0_BASE)
+#define GT64260_MPSC_TX_DESC_ADDR_HI \
+ (0xf294 - GT64260_COMM_MPSC_0_BASE)
+
+#define GT64260_SER_INIT_PCI_ADDR_HI 0xf320
+#define GT64260_SER_INIT_LAST_DATA 0xf324
+#define GT64260_SER_INIT_CONTROL 0xf328
+#define GT64260_SER_INIT_STATUS 0xf32c
+
+#define GT64260_COMM_ARBITER_CNTL 0xf300
+#define GT64260_COMM_CONFIG 0xb40c
+#define GT64260_COMM_XBAR_TO 0xf304
+#define GT64260_COMM_INTR_CAUSE 0xf310
+#define GT64260_COMM_INTR_MASK 0xf314
+#define GT64260_COMM_ERR_ADDR 0xf318
+
+
+/*
+ *****************************************************************************
+ *
+ * Fast Ethernet Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define GT64260_ENET_PHY_ADDR 0x2000
+#define GT64260_ENET_ESMIR 0x2010
+
+#define GT64260_ENET_0_OFFSET 0x2400
+#define GT64260_ENET_1_OFFSET 0x2800
+#define GT64260_ENET_2_OFFSET 0x2c00
+
+#define GT64260_ENET_EPCR (0x2400 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EPCXR (0x2408 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EPCMR (0x2410 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EPSR (0x2418 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ESPR (0x2420 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EHTPR (0x2428 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EFCSAL (0x2430 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EFCSAH (0x2438 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ESDCR (0x2440 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ESDCMR (0x2448 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EICR (0x2450 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EIMR (0x2458 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EFRDP0 (0x2480 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EFRDP1 (0x2484 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EFRDP2 (0x2488 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_EFRDP3 (0x248c - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ECRDP0 (0x24a0 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ECRDP1 (0x24a4 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ECRDP2 (0x24a8 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ECRDP3 (0x24ac - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ECTDP0 (0x24e0 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_ECTDP1 (0x24e4 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_DSCP2P0L (0x2460 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_DSCP2P0H (0x2464 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_DSCP2P1L (0x2468 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_DSCP2P1H (0x246c - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_VPT2P (0x2470 - GT64260_ENET_0_OFFSET)
+#define GT64260_ENET_MIB_CTRS (0x2500 - GT64260_ENET_0_OFFSET)
+
+/*
+ *****************************************************************************
+ *
+ * IDMA Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define GT64260_IDMA_0_OFFSET 0x0800
+#define GT64260_IDMA_1_OFFSET 0x0804
+#define GT64260_IDMA_2_OFFSET 0x0808
+#define GT64260_IDMA_3_OFFSET 0x080c
+#define GT64260_IDMA_4_OFFSET 0x0900
+#define GT64260_IDMA_5_OFFSET 0x0904
+#define GT64260_IDMA_6_OFFSET 0x0908
+#define GT64260_IDMA_7_OFFSET 0x090c
+
+#define GT64260_IDMA_BYTE_COUNT (0x0800 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_SRC_ADDR (0x0810 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_DST_ADDR (0x0820 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_NEXT_DESC (0x0830 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_CUR_DESC (0x0870 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_SRC_PCI_ADDR_HI (0x0890 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_DST_PCI_ADDR_HI (0x08a0 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_NEXT_DESC_PCI_ADDR_HI (0x08b0 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_CONTROL_LO (0x0840 - GT64260_IDMA_0_OFFSET)
+#define GT64260_IDMA_CONTROL_HI (0x0880 - GT64260_IDMA_0_OFFSET)
+
+#define GT64260_IDMA_0_3_ARBITER_CNTL 0x0860
+#define GT64260_IDMA_4_7_ARBITER_CNTL 0x0960
+
+#define GT64260_IDMA_0_3_XBAR_TO 0x08d0
+#define GT64260_IDMA_4_7_XBAR_TO 0x09d0
+
+#define GT64260_IDMA_0_3_INTR_CAUSE 0x08c0
+#define GT64260_IDMA_0_3_INTR_MASK 0x08c4
+#define GT64260_IDMA_0_3_ERROR_ADDR 0x08c8
+#define GT64260_IDMA_0_3_ERROR_SELECT 0x08cc
+#define GT64260_IDMA_4_7_INTR_CAUSE 0x09c0
+#define GT64260_IDMA_4_7_INTR_MASK 0x09c4
+#define GT64260_IDMA_4_7_ERROR_ADDR 0x09c8
+#define GT64260_IDMA_4_7_ERROR_SELECT 0x09cc
+
+/*
+ *****************************************************************************
+ *
+ * Watchdog Timer Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define GT64260_WDT_WDC 0xb410
+#define GT64260_WDT_WDV 0xb414
+
+
+/*
+ *****************************************************************************
+ *
+ * General Purpose Pins Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define MV64x60_GPP_IO_CNTL 0xf100
+#define MV64x60_GPP_LEVEL_CNTL 0xf110
+#define MV64x60_GPP_VALUE 0xf104
+#define MV64x60_GPP_INTR_CAUSE 0xf108
+#define MV64x60_GPP_INTR_MASK 0xf10c
+
+
+/*
+ *****************************************************************************
+ *
+ * Multi-Purpose Pins Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define MV64x60_MPP_CNTL_0 0xf000
+#define MV64x60_MPP_CNTL_1 0xf004
+#define MV64x60_MPP_CNTL_2 0xf008
+#define MV64x60_MPP_CNTL_3 0xf00c
+#define GT64260_MPP_SERIAL_PORTS_MULTIPLEX 0xf010
+
+
+/*
+ *****************************************************************************
+ *
+ * I2C Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define GT64260_I2C_OFFSET 0xc000
+
+#define GT64260_I2C_ADDR (0xc000 - GT64260_I2C_OFFSET)
+#define GT64260_I2C_EX_ADDR (0xc010 - GT64260_I2C_OFFSET)
+#define GT64260_I2C_DATA (0xc004 - GT64260_I2C_OFFSET)
+#define GT64260_I2C_CONTROL (0xc008 - GT64260_I2C_OFFSET)
+#define GT64260_I2C_STATUS (0xc00c - GT64260_I2C_OFFSET)
+#define GT64260_I2C_BAUD_RATE (0xc00c - GT64260_I2C_OFFSET)
+#define GT64260_I2C_RESET (0xc01c - GT64260_I2C_OFFSET)
+
+#define GT64260_I2C_ACK_BIT (1<<2)
+#define GT64260_I2C_IFLG_BIT (1<<3)
+#define GT64260_I2C_STOP_BIT (1<<4)
+#define GT64260_I2C_START_BIT (1<<5)
+#define GT64260_I2C_ENABLE_BIT (1<<6)
+#define GT64260_I2C_INT_ENABLE_BIT (1<<7)
+
+#define GT64260_I2C_DATA_READ_BIT 0x01
+
+#define GT64260_I2C_STATUS_SENT_START 0x08
+#define GT64260_I2C_STATUS_RESENT_START 0x10
+#define GT64260_I2C_STATUS_WRITE_ADDR_ACK 0x18
+#define GT64260_I2C_STATUS_WRITE_ACK 0x28
+#define GT64260_I2C_STATUS_READ_ADDR_ACK 0x40
+#define GT64260_I2C_STATUS_READ_ACK 0x50
+#define GT64260_I2C_STATUS_READ_NO_ACK 0x58
+#define GT64260_I2C_STATUS_IDLE 0xf8
+
+
+/*
+ *****************************************************************************
+ *
+ * Interrupt Controller Interface Registers
+ *
+ *****************************************************************************
+ */
+
+#define GT64260_IC_OFFSET 0x0c18
+
+#define GT64260_IC_MAIN_CAUSE_LO (0x0c18 - GT64260_IC_OFFSET)
+#define GT64260_IC_MAIN_CAUSE_HI (0x0c68 - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_INTR_MASK_LO (0x0c1c - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_INTR_MASK_HI (0x0c6c - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_SELECT_CAUSE (0x0c70 - GT64260_IC_OFFSET)
+#define GT64260_IC_PCI0_INTR_MASK_LO (0x0c24 - GT64260_IC_OFFSET)
+#define GT64260_IC_PCI0_INTR_MASK_HI (0x0c64 - GT64260_IC_OFFSET)
+#define GT64260_IC_PCI0_SELECT_CAUSE (0x0c74 - GT64260_IC_OFFSET)
+#define GT64260_IC_PCI1_INTR_MASK_LO (0x0ca4 - GT64260_IC_OFFSET)
+#define GT64260_IC_PCI1_INTR_MASK_HI (0x0ce4 - GT64260_IC_OFFSET)
+#define GT64260_IC_PCI1_SELECT_CAUSE (0x0cf4 - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_INT_0_MASK (0x0e60 - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_INT_1_MASK (0x0e64 - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_INT_2_MASK (0x0e68 - GT64260_IC_OFFSET)
+#define GT64260_IC_CPU_INT_3_MASK (0x0e6c - GT64260_IC_OFFSET)
+
+#define MV64360_IC_OFFSET 0x0000
+
+#define MV64360_IC_MAIN_CAUSE_LO (0x0004 - MV64360_IC_OFFSET)
+#define MV64360_IC_MAIN_CAUSE_HI (0x000c - MV64360_IC_OFFSET)
+#define MV64360_IC_CPU0_INTR_MASK_LO (0x0014 - MV64360_IC_OFFSET)
+#define MV64360_IC_CPU0_INTR_MASK_HI (0x001c - MV64360_IC_OFFSET)
+#define MV64360_IC_CPU0_SELECT_CAUSE (0x0024 - MV64360_IC_OFFSET)
+#define MV64360_IC_CPU1_INTR_MASK_LO (0x0034 - MV64360_IC_OFFSET)
+#define MV64360_IC_CPU1_INTR_MASK_HI (0x003c - MV64360_IC_OFFSET)
+#define MV64360_IC_CPU1_SELECT_CAUSE (0x0044 - MV64360_IC_OFFSET)
+#define MV64360_IC_INT0_MASK_LO (0x0054 - MV64360_IC_OFFSET)
+#define MV64360_IC_INT0_MASK_HI (0x005c - MV64360_IC_OFFSET)
+#define MV64360_IC_INT0_SELECT_CAUSE (0x0064 - MV64360_IC_OFFSET)
+#define MV64360_IC_INT1_MASK_LO (0x0074 - MV64360_IC_OFFSET)
+#define MV64360_IC_INT1_MASK_HI (0x007c - MV64360_IC_OFFSET)
+#define MV64360_IC_INT1_SELECT_CAUSE (0x0084 - MV64360_IC_OFFSET)
+
+#endif /* __ASMPPC_MV64x60_DEFS_H */
#define OCP_VENDOR_ARM 0x0004
#define OCP_VENDOR_FREESCALE 0x1057
#define OCP_VENDOR_IBM 0x1014
+#define OCP_VENDOR_MARVELL 0x11ab
#define OCP_VENDOR_MOTOROLA OCP_VENDOR_FREESCALE
#define OCP_VENDOR_XILINX 0x10ee
#define OCP_VENDOR_UNKNOWN 0xFFFF
#define OCP_FUNC_16550 0x0031
#define OCP_FUNC_IIC 0x0032
#define OCP_FUNC_USB 0x0033
-#define OCP_FUNC_PSC_UART 0x0034
+#define OCP_FUNC_MPSC 0x0034
+#define OCP_FUNC_COMM_MPSC 0x0035
+#define OCP_FUNC_SDMA 0x0036
/* Memory devices 0x0090 - 0x009F */
#define OCP_FUNC_MAL 0x0090
#define OCP_FUNC_PERFMON 0x00D2 /* Performance Monitor */
#define OCP_FUNC_RGMII 0x00D3
#define OCP_FUNC_TAH 0x00D4
+#define OCP_FUNC_I2C 0x00D5 /* I2C Controller */
+#define OCP_FUNC_BRG 0x00D6 /* Baud Rate Generator */
+#define OCP_FUNC_PIC 0x00D7 /* Programmable Interrupt Controller */
/* Network 0x0200 - 0x02FF */
#define OCP_FUNC_EMAC 0x0200
-#define OCP_FUNC_GFAR 0x0201 /* TSEC & FEC */
+#define OCP_FUNC_ENET 0x0201 /* TSEC & FEC */
+#define OCP_FUNC_COMM_EMAC 0x0202
+#define OCP_FUNC_GFAR 0x0203 /* TSEC & FEC */
/* Bridge devices 0xE00 - 0xEFF */
#define OCP_FUNC_OPB 0x0E00
+#define OCP_FUNC_HB 0x0E01 /* Host bridge */
#define OCP_FUNC_UNKNOWN 0xFFFF
extern int openpic_get_irq(struct pt_regs *regs);
extern void openpic_reset_processor_phys(u_int cpumask);
extern void openpic_setup_ISU(int isu_num, unsigned long addr);
-extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask);
+extern void openpic_cause_IPI(u_int ipi, u_int cpumask);
extern void smp_openpic_message_pass(int target, int msg, unsigned long data,
int wait);
extern void openpic_set_k2_cascade(int irq);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _PPC_PAGE_H */
--- /dev/null
+/*
+ * Author: Pete Popov <ppopov@mvista.com>
+ *
+ * 2000 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Data structures specific to the IBM PowerPC 405 on-chip DMA controller
+ * and API.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASMPPC_405_DMA_H
+#define __ASMPPC_405_DMA_H
+
+#include <linux/types.h>
+
+/* #define DEBUG_405DMA */
+
+#define TRUE 1
+#define FALSE 0
+
+#define SGL_LIST_SIZE 4096
+/* #define PCI_ALLOC_IS_NONCONSISTENT */
+
+#define MAX_405GP_DMA_CHANNELS 4
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS 0xFFFFFFFF
+
+extern unsigned long ISA_DMA_THRESHOLD;
+
+#define dma_outb outb
+#define dma_inb inb
+
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+#define DMA_MODE_READ DMA_TD /* Peripheral to Memory */
+#define DMA_MODE_WRITE 0 /* Memory to Peripheral */
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq0_ActiveLow (1<<31)
+#define DMAAck0_ActiveLow (1<<30)
+#define EOT0_ActiveLow (1<<29) /* End of Transfer */
+
+#define DMAReq1_ActiveLow (1<<28)
+#define DMAAck1_ActiveLow (1<<27)
+#define EOT1_ActiveLow (1<<26)
+
+#define DMAReq2_ActiveLow (1<<25)
+#define DMAAck2_ActiveLow (1<<24)
+#define EOT2_ActiveLow (1<<23)
+
+#define DMAReq3_ActiveLow (1<<22)
+#define DMAAck3_ActiveLow (1<<21)
+#define EOT3_ActiveLow (1<<20)
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+
+
+/*
+ * DMA Channel Control Registers
+ */
+#define DMA_CH_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CH_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CH_ENABLE(x) (((x)&DMA_CH_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+
+#define SET_DMA_PW(x) (((x)&0x3)<<26) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(3)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>26)
+
+#define DMA_DAI (1<<25) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<25)
+
+#define DMA_SAI (1<<24) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<24)
+
+#define DMA_BEN (1<<23) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<23)
+
+#define SET_DMA_TM(x) (((x)&0x3)<<21) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>21)
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<19) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>19)
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<13) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>13)
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<10) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>10)
+
+#define DMA_ETD_OUTPUT (1<<9) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<9)
+
+#define DMA_TCE_ENABLE (1<<8)
+#define SET_DMA_TCE(x) (((x)&0x1)<<8)
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<6) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>6)
+
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<4) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>4)
+
+#define DMA_PCE (1<<3) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<3)
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>3)
+
+#define DMA_DEC (1<<2) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG0_ENABLE (1<<31) /* Start Scatter Gather */
+#define SSG1_ENABLE (1<<30)
+#define SSG2_ENABLE (1<<29)
+#define SSG3_ENABLE (1<<28)
+#define SSG0_MASK_ENABLE (1<<15) /* Enable writing to SSG0 bit */
+#define SSG1_MASK_ENABLE (1<<14)
+#define SSG2_MASK_ENABLE (1<<13)
+#define SSG3_MASK_ENABLE (1<<12)
+
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+
+
+
+typedef uint32_t sgl_handle_t;
+
+typedef struct {
+
+ /*
+ * Valid polarity settings:
+ * DMAReq0_ActiveLow
+ * DMAAck0_ActiveLow
+ * EOT0_ActiveLow
+ *
+ * DMAReq1_ActiveLow
+ * DMAAck1_ActiveLow
+ * EOT1_ActiveLow
+ *
+ * DMAReq2_ActiveLow
+ * DMAAck2_ActiveLow
+ * EOT2_ActiveLow
+ *
+ * DMAReq3_ActiveLow
+ * DMAAck3_ActiveLow
+ * EOT3_ActiveLow
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ dma_addr_t addr;
+
+} ppc_dma_ch_t;
+
+
+typedef struct {
+ uint32_t control;
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+
+
+
+typedef struct {
+ unsigned int dmanr;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ ppc_sgl_t *phead;
+ ppc_sgl_t *ptail;
+
+} sgl_list_info_t;
+
+
+typedef struct {
+ unsigned int *src_addr;
+ unsigned int *dst_addr;
+ dma_addr_t dma_src_addr;
+ dma_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+
+extern ppc_dma_ch_t dma_channels[];
+
+/*
+ *
+ * DMA API inline functions
+ * These functions are implemented here as inline functions for
+ * performance reasons.
+ *
+ */
+
+static __inline__ int get_405gp_dma_status(void)
+{
+ return (mfdcr(DCRN_DMASR));
+}
+
+
+static __inline__ int enable_405gp_dma(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("enable_dma: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+
+ switch (dmanr) {
+ case 0:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ mtdcr(DCRN_DMASA0, NULL);
+ mtdcr(DCRN_DMADA0, p_dma_ch->addr);
+ }
+ else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ mtdcr(DCRN_DMASA0, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA0, NULL);
+ }
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCRN_DMACR0);
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ mtdcr(DCRN_DMASA1, NULL);
+ mtdcr(DCRN_DMADA1, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ mtdcr(DCRN_DMASA1, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA1, NULL);
+ }
+ control = mfdcr(DCRN_DMACR1);
+ control &= ~(DMA_TM_MASK | DMA_TD);
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ mtdcr(DCRN_DMASA2, NULL);
+ mtdcr(DCRN_DMADA2, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ mtdcr(DCRN_DMASA2, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA2, NULL);
+ }
+ control = mfdcr(DCRN_DMACR2);
+ control &= ~(DMA_TM_MASK | DMA_TD);
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ mtdcr(DCRN_DMASA3, NULL);
+ mtdcr(DCRN_DMADA3, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ mtdcr(DCRN_DMASA3, p_dma_ch->addr);
+ mtdcr(DCRN_DMADA3, NULL);
+ }
+ control = mfdcr(DCRN_DMACR3);
+ control &= ~(DMA_TM_MASK | DMA_TD);
+ control |= (p_dma_ch->mode | DMA_CH_ENABLE);
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
+
+
+
+static __inline__ void disable_405gp_dma(unsigned int dmanr)
+{
+ unsigned int control;
+
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control &= ~DMA_CH_ENABLE;
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("disable_dma: bad channel: %d\n", dmanr);
+#endif
+ }
+}
+
+
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+static __inline__ int set_405gp_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ switch (mode) {
+ case DMA_MODE_READ:
+ case DMA_MODE_WRITE:
+ case DMA_MODE_MM:
+ case DMA_MODE_MM_DEVATSRC:
+ case DMA_MODE_MM_DEVATDST:
+ break;
+ default:
+ printk("set_dma_mode: bad mode 0x%x\n", mode);
+ return DMA_STATUS_BAD_MODE;
+ }
+ if (dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("set_dma_mode: bad channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ p_dma_ch->mode = mode;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+static __inline__ void
+set_405gp_dma_count(unsigned int dmanr, unsigned int count)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_405DMA
+ {
+ int error = 0;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, p_dma_ch->pwidth);
+ }
+#endif
+
+ count = count >> p_dma_ch->shift;
+ switch (dmanr) {
+ case 0:
+ mtdcr(DCRN_DMACT0, count);
+ break;
+ case 1:
+ mtdcr(DCRN_DMACT1, count);
+ break;
+ case 2:
+ mtdcr(DCRN_DMACT2, count);
+ break;
+ case 3:
+ mtdcr(DCRN_DMACT3, count);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("set_dma_count: bad channel: %d\n", dmanr);
+#endif
+ }
+}
+
+
+
+/*
+ * Returns the number of bytes left to be transfered.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
+ */
+static __inline__ int get_405gp_dma_residue(unsigned int dmanr)
+{
+ unsigned int count;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ switch (dmanr) {
+ case 0:
+ count = mfdcr(DCRN_DMACT0);
+ break;
+ case 1:
+ count = mfdcr(DCRN_DMACT1);
+ break;
+ case 2:
+ count = mfdcr(DCRN_DMACT2);
+ break;
+ case 3:
+ count = mfdcr(DCRN_DMACT3);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("get_dma_residue: bad channel: %d\n", dmanr);
+#endif
+ return 0;
+ }
+
+ return (count << p_dma_ch->shift);
+}
+
+
+
+/*
+ * Sets the DMA address for a memory to peripheral or peripheral
+ * to memory transfer. The address is just saved in the channel
+ * structure for now and used later in enable_dma().
+ */
+static __inline__ void set_405gp_dma_addr(unsigned int dmanr, dma_addr_t addr)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+#ifdef DEBUG_405DMA
+ {
+ int error = 0;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if ((unsigned)addr & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if ((unsigned)addr & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if ((unsigned)addr & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_addr: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: set_dma_addr addr 0x%x bus width %d\n",
+ addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ /* save dma address and program it later after we know the xfer mode */
+ p_dma_ch->addr = addr;
+}
+
+
+
+
+/*
+ * Sets both DMA addresses for a memory to memory transfer.
+ * For memory to peripheral or peripheral to memory transfers
+ * the function set_dma_addr() should be used instead.
+ */
+static __inline__ void
+set_405gp_dma_addr2(unsigned int dmanr, dma_addr_t src_dma_addr,
+ dma_addr_t dst_dma_addr)
+{
+#ifdef DEBUG_405DMA
+ {
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ int error = 0;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (((unsigned)src_dma_addr & 0x1) ||
+ ((unsigned)dst_dma_addr & 0x1)
+ )
+ error = 1;
+ break;
+ case PW_32:
+ if (((unsigned)src_dma_addr & 0x3) ||
+ ((unsigned)dst_dma_addr & 0x3)
+ )
+ error = 1;
+ break;
+ case PW_64:
+ if (((unsigned)src_dma_addr & 0x7) ||
+ ((unsigned)dst_dma_addr & 0x7)
+ )
+ error = 1;
+ break;
+ default:
+ printk("set_dma_addr2: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: set_dma_addr2 src 0x%x dst 0x%x bus width %d\n",
+ src_dma_addr, dst_dma_addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ switch (dmanr) {
+ case 0:
+ mtdcr(DCRN_DMASA0, src_dma_addr);
+ mtdcr(DCRN_DMADA0, dst_dma_addr);
+ break;
+ case 1:
+ mtdcr(DCRN_DMASA1, src_dma_addr);
+ mtdcr(DCRN_DMADA1, dst_dma_addr);
+ break;
+ case 2:
+ mtdcr(DCRN_DMASA2, src_dma_addr);
+ mtdcr(DCRN_DMADA2, dst_dma_addr);
+ break;
+ case 3:
+ mtdcr(DCRN_DMASA3, src_dma_addr);
+ mtdcr(DCRN_DMADA3, dst_dma_addr);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("set_dma_addr2: bad channel: %d\n", dmanr);
+#endif
+ }
+}
+
+
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+static __inline__ int
+enable_405gp_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ p_dma_ch->int_enable = TRUE;
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control|= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control|= DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control|= DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control|= DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_interrupt: bad channel: %d\n", dmanr);
+#endif
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+static __inline__ int
+disable_405gp_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ p_dma_ch->int_enable = TRUE;
+ switch (dmanr) {
+ case 0:
+ control = mfdcr(DCRN_DMACR0);
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0, control);
+ break;
+ case 1:
+ control = mfdcr(DCRN_DMACR1);
+ control &= ~DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR1, control);
+ break;
+ case 2:
+ control = mfdcr(DCRN_DMACR2);
+ control &= ~DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR2, control);
+ break;
+ case 3:
+ control = mfdcr(DCRN_DMACR3);
+ control &= ~DMA_CIE_ENABLE;
+ mtdcr(DCRN_DMACR3, control);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_interrupt: bad channel: %d\n", dmanr);
+#endif
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ return DMA_STATUS_GOOD;
+}
+
+
+#ifdef DCRNCAP_DMA_SG
+
+/*
+ * Add a new sgl descriptor to the end of a scatter/gather list
+ * which was created by alloc_dma_handle().
+ *
+ * For a memory to memory transfer, both dma addresses must be
+ * valid. For a peripheral to memory transfer, one of the addresses
+ * must be set to NULL, depending on the direction of the transfer:
+ * memory to peripheral: set dst_addr to NULL,
+ * peripheral to memory: set src_addr to NULL.
+ */
+static __inline__ int
+add_405gp_dma_sgl(sgl_handle_t handle, dma_addr_t src_addr, dma_addr_t dst_addr,
+ unsigned int count)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc_dma_ch_t *p_dma_ch;
+
+ if (!handle) {
+#ifdef DEBUG_405DMA
+ printk("add_dma_sgl: null handle\n");
+#endif
+ return DMA_STATUS_BAD_HANDLE;
+ }
+
+#ifdef DEBUG_405DMA
+ if (psgl->dmanr >= MAX_405GP_DMA_CHANNELS) {
+ printk("add_dma_sgl error: psgl->dmanr == %d\n", psgl->dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ p_dma_ch = &dma_channels[psgl->dmanr];
+
+#ifdef DEBUG_405DMA
+ {
+ int error = 0;
+ unsigned int aligned = (unsigned)src_addr | (unsigned)dst_addr | count;
+ switch(p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (aligned & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (aligned & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (aligned & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("add_dma_sgl: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return DMA_STATUS_GENERAL_ERROR;
+ }
+ if (error)
+ printk("Alignment warning: add_dma_sgl src 0x%x dst 0x%x count 0x%x bus width var %d\n",
+ src_addr, dst_addr, count, p_dma_ch->pwidth);
+
+ }
+#endif
+
+ if ((unsigned)(psgl->ptail + 1) >= ((unsigned)psgl + SGL_LIST_SIZE)) {
+#ifdef DEBUG_405DMA
+ printk("sgl handle out of memory \n");
+#endif
+ return DMA_STATUS_OUT_OF_MEMORY;
+ }
+
+
+ if (!psgl->ptail) {
+ psgl->phead = (ppc_sgl_t *)
+ ((unsigned)psgl + sizeof(sgl_list_info_t));
+ psgl->ptail = psgl->phead;
+ } else {
+ psgl->ptail->next = virt_to_bus(psgl->ptail + 1);
+ psgl->ptail++;
+ }
+
+ psgl->ptail->control = psgl->control;
+ psgl->ptail->src_addr = src_addr;
+ psgl->ptail->dst_addr = dst_addr;
+ psgl->ptail->control_count = (count >> p_dma_ch->shift) |
+ psgl->sgl_control;
+ psgl->ptail->next = (uint32_t)NULL;
+
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Enable (start) the DMA described by the sgl handle.
+ */
+static __inline__ void enable_405gp_dma_sgl(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc_dma_ch_t *p_dma_ch;
+ uint32_t sg_command;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("enable_dma_sgl: null handle\n");
+ return;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("enable_dma_sgl: bad channel in handle %d\n",
+ psgl->dmanr);
+ return;
+ } else if (!psgl->phead) {
+ printk("enable_dma_sgl: sg list empty\n");
+ return;
+ }
+#endif
+
+ p_dma_ch = &dma_channels[psgl->dmanr];
+ psgl->ptail->control_count &= ~SG_LINK; /* make this the last dscrptr */
+ sg_command = mfdcr(DCRN_ASGC);
+
+ switch(psgl->dmanr) {
+ case 0:
+ mtdcr(DCRN_ASG0, virt_to_bus(psgl->phead));
+ sg_command |= SSG0_ENABLE;
+ break;
+ case 1:
+ mtdcr(DCRN_ASG1, virt_to_bus(psgl->phead));
+ sg_command |= SSG1_ENABLE;
+ break;
+ case 2:
+ mtdcr(DCRN_ASG2, virt_to_bus(psgl->phead));
+ sg_command |= SSG2_ENABLE;
+ break;
+ case 3:
+ mtdcr(DCRN_ASG3, virt_to_bus(psgl->phead));
+ sg_command |= SSG3_ENABLE;
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_sgl: bad channel: %d\n", psgl->dmanr);
+#endif
+ }
+
+#if 0 /* debug */
+ printk("\n\nenable_dma_sgl at dma_addr 0x%x\n",
+ virt_to_bus(psgl->phead));
+ {
+ ppc_sgl_t *pnext, *sgl_addr;
+
+ pnext = psgl->phead;
+ while (pnext) {
+ printk("dma descriptor at 0x%x, dma addr 0x%x\n",
+ (unsigned)pnext, (unsigned)virt_to_bus(pnext));
+ printk("control 0x%x src 0x%x dst 0x%x c_count 0x%x, next 0x%x\n",
+ (unsigned)pnext->control, (unsigned)pnext->src_addr,
+ (unsigned)pnext->dst_addr,
+ (unsigned)pnext->control_count, (unsigned)pnext->next);
+
+ (unsigned)pnext = bus_to_virt(pnext->next);
+ }
+ printk("sg_command 0x%x\n", sg_command);
+ }
+#endif
+
+#ifdef PCI_ALLOC_IS_NONCONSISTENT
+ /*
+ * This is temporary only, until pci_alloc_consistent() really does
+ * return "consistent" memory.
+ */
+ flush_dcache_range((unsigned)handle, (unsigned)handle + SGL_LIST_SIZE);
+#endif
+
+ mtdcr(DCRN_ASGC, sg_command); /* start transfer */
+}
+
+
+
+/*
+ * Halt an active scatter/gather DMA operation.
+ */
+static __inline__ void disable_405gp_dma_sgl(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ uint32_t sg_command;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("enable_dma_sgl: null handle\n");
+ return;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("enable_dma_sgl: bad channel in handle %d\n",
+ psgl->dmanr);
+ return;
+ }
+#endif
+ sg_command = mfdcr(DCRN_ASGC);
+ switch(psgl->dmanr) {
+ case 0:
+ sg_command &= ~SSG0_ENABLE;
+ break;
+ case 1:
+ sg_command &= ~SSG1_ENABLE;
+ break;
+ case 2:
+ sg_command &= ~SSG2_ENABLE;
+ break;
+ case 3:
+ sg_command &= ~SSG3_ENABLE;
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("enable_dma_sgl: bad channel: %d\n", psgl->dmanr);
+#endif
+ }
+
+ mtdcr(DCRN_ASGC, sg_command); /* stop transfer */
+}
+
+
+
+/*
+ * Returns number of bytes left to be transferred from the entire sgl list.
+ * *src_addr and *dst_addr get set to the source/destination address of
+ * the sgl descriptor where the DMA stopped.
+ *
+ * An sgl transfer must NOT be active when this function is called.
+ */
+static __inline__ int
+get_405gp_dma_sgl_residue(sgl_handle_t handle, dma_addr_t *src_addr,
+ dma_addr_t *dst_addr)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc_dma_ch_t *p_dma_ch;
+ ppc_sgl_t *pnext, *sgl_addr;
+ uint32_t count_left;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("get_dma_sgl_residue: null handle\n");
+ return DMA_STATUS_BAD_HANDLE;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("get_dma_sgl_residue: bad channel in handle %d\n",
+ psgl->dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ switch(psgl->dmanr) {
+ case 0:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG0));
+ count_left = mfdcr(DCRN_DMACT0);
+ break;
+ case 1:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG1));
+ count_left = mfdcr(DCRN_DMACT1);
+ break;
+ case 2:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG2));
+ count_left = mfdcr(DCRN_DMACT2);
+ break;
+ case 3:
+ sgl_addr = (ppc_sgl_t *)bus_to_virt(mfdcr(DCRN_ASG3));
+ count_left = mfdcr(DCRN_DMACT3);
+ break;
+ default:
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue: bad channel: %d\n", psgl->dmanr);
+#endif
+ goto error;
+ }
+
+ if (!sgl_addr) {
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue: sgl addr register is null\n");
+#endif
+ goto error;
+ }
+
+ pnext = psgl->phead;
+ while (pnext &&
+ ((unsigned)pnext < ((unsigned)psgl + SGL_LIST_SIZE) &&
+ (pnext != sgl_addr))
+ ) {
+ pnext = pnext++;
+ }
+
+ if (pnext == sgl_addr) { /* found the sgl descriptor */
+
+ *src_addr = pnext->src_addr;
+ *dst_addr = pnext->dst_addr;
+
+ /*
+ * Now search the remaining descriptors and add their count.
+ * We already have the remaining count from this descriptor in
+ * count_left.
+ */
+ pnext++;
+
+ while ((pnext != psgl->ptail) &&
+ ((unsigned)pnext < ((unsigned)psgl + SGL_LIST_SIZE))
+ ) {
+ count_left += pnext->control_count & SG_COUNT_MASK;
+ }
+
+ if (pnext != psgl->ptail) { /* should never happen */
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue error (1) psgl->ptail 0x%x handle 0x%x\n",
+ (unsigned int)psgl->ptail,
+ (unsigned int)handle);
+#endif
+ goto error;
+ }
+
+ /* success */
+ p_dma_ch = &dma_channels[psgl->dmanr];
+ return (count_left << p_dma_ch->shift); /* count in bytes */
+
+ } else {
+ /* this shouldn't happen */
+#ifdef DEBUG_405DMA
+ printk("get_dma_sgl_residue, unable to match current address 0x%x, handle 0x%x\n",
+ (unsigned int)sgl_addr, (unsigned int)handle);
+
+#endif
+ }
+
+
+error:
+ *src_addr = (dma_addr_t)NULL;
+ *dst_addr = (dma_addr_t)NULL;
+ return 0;
+}
+
+
+
+
+/*
+ * Returns the address(es) of the buffer(s) contained in the head element of
+ * the scatter/gather list. The element is removed from the scatter/gather
+ * list and the next element becomes the head.
+ *
+ * This function should only be called when the DMA is not active.
+ */
+static __inline__ int
+delete_405gp_dma_sgl_element(sgl_handle_t handle, dma_addr_t *src_dma_addr,
+ dma_addr_t *dst_dma_addr)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+
+#ifdef DEBUG_405DMA
+ if (!handle) {
+ printk("delete_sgl_element: null handle\n");
+ return DMA_STATUS_BAD_HANDLE;
+ } else if (psgl->dmanr > (MAX_405GP_DMA_CHANNELS - 1)) {
+ printk("delete_sgl_element: bad channel in handle %d\n",
+ psgl->dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#endif
+
+ if (!psgl->phead) {
+#ifdef DEBUG_405DMA
+ printk("delete_sgl_element: sgl list empty\n");
+#endif
+ *src_dma_addr = (dma_addr_t)NULL;
+ *dst_dma_addr = (dma_addr_t)NULL;
+ return DMA_STATUS_SGL_LIST_EMPTY;
+ }
+
+ *src_dma_addr = (dma_addr_t)psgl->phead->src_addr;
+ *dst_dma_addr = (dma_addr_t)psgl->phead->dst_addr;
+
+ if (psgl->phead == psgl->ptail) {
+ /* last descriptor on the list */
+ psgl->phead = NULL;
+ psgl->ptail = NULL;
+ } else {
+ psgl->phead++;
+ }
+
+ return DMA_STATUS_GOOD;
+}
+
+#endif /* DCRNCAP_DMA_SG */
+
+/*
+ * The rest of the DMA API, in ppc405_dma.c
+ */
+extern int hw_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+extern int get_channel_config(unsigned int, ppc_dma_ch_t *);
+extern int set_channel_priority(unsigned int, unsigned int);
+extern unsigned int get_peripheral_width(unsigned int);
+extern int alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+extern void free_dma_handle(sgl_handle_t);
+
+#endif
+#endif /* __KERNEL__ */
+++ /dev/null
-/*
- * include/asm-ppc/ppc4xx_dma.h
- *
- * IBM PPC4xx DMA engine library
- *
- * Copyright 2000-2004 MontaVista Software Inc.
- *
- * Cleaned up a bit more, Matt Porter <mporter@kernel.crashing.org>
- *
- * Original code by Armin Kuster <akuster@mvista.com>
- * and Pete Popov <ppopov@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifdef __KERNEL__
-#ifndef __ASMPPC_PPC4xx_DMA_H
-#define __ASMPPC_PPC4xx_DMA_H
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <asm/mmu.h>
-#include <asm/ibm4xx.h>
-
-#undef DEBUG_4xxDMA
-
-#define MAX_PPC4xx_DMA_CHANNELS 4
-
-/* in arch/ppc/kernel/setup.c -- Cort */
-extern unsigned long DMA_MODE_WRITE, DMA_MODE_READ;
-
-/*
- * Function return status codes
- * These values are used to indicate whether or not the function
- * call was successful, or a bad/invalid parameter was passed.
- */
-#define DMA_STATUS_GOOD 0
-#define DMA_STATUS_BAD_CHANNEL 1
-#define DMA_STATUS_BAD_HANDLE 2
-#define DMA_STATUS_BAD_MODE 3
-#define DMA_STATUS_NULL_POINTER 4
-#define DMA_STATUS_OUT_OF_MEMORY 5
-#define DMA_STATUS_SGL_LIST_EMPTY 6
-#define DMA_STATUS_GENERAL_ERROR 7
-#define DMA_STATUS_CHANNEL_NOTFREE 8
-
-#define DMA_CHANNEL_BUSY 0x80000000
-
-/*
- * These indicate status as returned from the DMA Status Register.
- */
-#define DMA_STATUS_NO_ERROR 0
-#define DMA_STATUS_CS 1 /* Count Status */
-#define DMA_STATUS_TS 2 /* Transfer Status */
-#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
-#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
-
-
-/*
- * DMA Channel Control Registers
- */
-
-#ifdef CONFIG_44x
-#define PPC4xx_DMA_64BIT
-#define DMA_CR_OFFSET 1
-#else
-#define DMA_CR_OFFSET 0
-#endif
-
-#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
-#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
-#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
-
-#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
-#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
-#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
-
-#define DMA_TD (1<<29)
-#define SET_DMA_TD(x) (((x)&0x1)<<29)
-#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
-
-#define DMA_PL (1<<28) /* Peripheral Location */
-#define SET_DMA_PL(x) (((x)&0x1)<<28)
-#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
-
-#define EXTERNAL_PERIPHERAL 0
-#define INTERNAL_PERIPHERAL 1
-
-#define SET_DMA_PW(x) (((x)&0x3)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
-#define DMA_PW_MASK SET_DMA_PW(3)
-#define PW_8 0
-#define PW_16 1
-#define PW_32 2
-#define PW_64 3
-/* FIXME: Add PW_128 support for 440GP DMA block */
-#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
-
-#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
-#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
-
-#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
-#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
-
-#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
-#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
-
-#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
-#define DMA_TM_MASK SET_DMA_TM(3)
-#define TM_PERIPHERAL 0 /* Peripheral */
-#define TM_RESERVED 1 /* Reserved */
-#define TM_S_MM 2 /* Memory to Memory */
-#define TM_D_MM 3 /* Device Paced Memory to Memory */
-#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
-
-#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
-#define DMA_PSC_MASK SET_DMA_PSC(3)
-#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
-
-#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
-#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
-#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
-
-#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
-#define DMA_PHC_MASK SET_DMA_PHC(0x7)
-#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
-
-#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
-#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
-
-#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
-#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
-
-#define DMA_DEC (1<<(2) /* Address Decrement */
-#define SET_DMA_DEC(x) (((x)&0x1)<<2)
-#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
-
-/*
- * Transfer Modes
- * These modes are defined in a way that makes it possible to
- * simply "or" in the value in the control register.
- */
-
-#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
-
- /* Device-paced memory to memory, */
- /* device is at source address */
-#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
-
- /* Device-paced memory to memory, */
- /* device is at destination address */
-#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
-
-/* 405gp/440gp */
-#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
-#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
-#define PREFETCH_1 0 /* Prefetch 1 Double Word */
-#define PREFETCH_2 1
-#define PREFETCH_4 2
-#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
-
-#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
-#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
-#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
-
-/* stb3x */
-
-#define DMA_ECE_ENABLE (1<<5)
-#define SET_DMA_ECE(x) (((x)&0x1)<<5)
-#define GET_DMA_ECE(x) (((x)&DMA_ECE_ENABLE)>>5)
-
-#define DMA_TCD_DISABLE (1<<4)
-#define SET_DMA_TCD(x) (((x)&0x1)<<4)
-#define GET_DMA_TCD(x) (((x)&DMA_TCD_DISABLE)>>4)
-
-typedef uint32_t sgl_handle_t;
-
-#ifdef CONFIG_PPC4xx_EDMA
-
-#define SGL_LIST_SIZE 4096
-#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
-
-#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
-#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
-#define PRIORITY_LOW 0
-#define PRIORITY_MID_LOW 1
-#define PRIORITY_MID_HIGH 2
-#define PRIORITY_HIGH 3
-#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
-
-/*
- * DMA Polarity Configuration Register
- */
-#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
-#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
-#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
-
-/*
- * DMA Sleep Mode Register
- */
-#define SLEEP_MODE_ENABLE (1<<21)
-
-/*
- * DMA Status Register
- */
-#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
-#define DMA_CS1 (1<<30)
-#define DMA_CS2 (1<<29)
-#define DMA_CS3 (1<<28)
-
-#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
-#define DMA_TS1 (1<<26)
-#define DMA_TS2 (1<<25)
-#define DMA_TS3 (1<<24)
-
-#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
-#define DMA_CH1_ERR (1<<22)
-#define DMA_CH2_ERR (1<<21)
-#define DMA_CH3_ERR (1<<20)
-
-#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
-#define DMA_IN_DMA_REQ1 (1<<18)
-#define DMA_IN_DMA_REQ2 (1<<17)
-#define DMA_IN_DMA_REQ3 (1<<16)
-
-#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
-#define DMA_EXT_DMA_REQ1 (1<<14)
-#define DMA_EXT_DMA_REQ2 (1<<13)
-#define DMA_EXT_DMA_REQ3 (1<<12)
-
-#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
-#define DMA_CH1_BUSY (1<<10)
-#define DMA_CH2_BUSY (1<<9)
-#define DMA_CH3_BUSY (1<<8)
-
-#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
-#define DMA_SG1 (1<<6)
-#define DMA_SG2 (1<<5)
-#define DMA_SG3 (1<<4)
-
-/*
- * DMA SG Command Register
- */
-#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
-#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
-
-/*
- * DMA Scatter/Gather Descriptor Bit fields
- */
-#define SG_LINK (1<<31) /* Link */
-#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
-#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
-#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
-#define SG_COUNT_MASK 0xFFFF /* Count Field */
-
-#define SET_DMA_CONTROL \
- (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
- SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
- SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
- SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
- SET_DMA_PL(p_init->pl) | /* peripheral location */ \
- SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
- SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
- SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
- SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
- SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
- SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
- SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
- SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
-
-#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
-
-#elif defined(CONFIG_STBXXX_DMA) /* stb03xxx */
-
-#define DMA_PPC4xx_SIZE 4096
-
-/*
- * DMA Status Register
- */
-
-#define SET_DMA_PRIORITY(x) (((x)&0x00800001)) /* DMA Channel Priority */
-#define DMA_PRIORITY_MASK 0x00800001
-#define PRIORITY_LOW 0x00000000
-#define PRIORITY_MID_LOW 0x00000001
-#define PRIORITY_MID_HIGH 0x00800000
-#define PRIORITY_HIGH 0x00800001
-#define GET_DMA_PRIORITY(x) (((((x)&DMA_PRIORITY_MASK) &0x00800000) >> 22 ) | (((x)&DMA_PRIORITY_MASK) &0x00000001))
-
-#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
-#define DMA_CS1 (1<<30)
-#define DMA_CS2 (1<<29)
-#define DMA_CS3 (1<<28)
-
-#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
-#define DMA_TS1 (1<<26)
-#define DMA_TS2 (1<<25)
-#define DMA_TS3 (1<<24)
-
-#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
-#define DMA_CH1_ERR (1<<22)
-#define DMA_CH2_ERR (1<<21)
-#define DMA_CH3_ERR (1<<20)
-
-#define DMA_CT0 (1<<19) /* Chained transfere */
-
-#define DMA_IN_DMA_REQ0 (1<<18) /* Internal DMA Request is pending */
-#define DMA_IN_DMA_REQ1 (1<<17)
-#define DMA_IN_DMA_REQ2 (1<<16)
-#define DMA_IN_DMA_REQ3 (1<<15)
-
-#define DMA_EXT_DMA_REQ0 (1<<14) /* External DMA Request is pending */
-#define DMA_EXT_DMA_REQ1 (1<<13)
-#define DMA_EXT_DMA_REQ2 (1<<12)
-#define DMA_EXT_DMA_REQ3 (1<<11)
-
-#define DMA_CH0_BUSY (1<<10) /* DMA Channel 0 Busy */
-#define DMA_CH1_BUSY (1<<9)
-#define DMA_CH2_BUSY (1<<8)
-#define DMA_CH3_BUSY (1<<7)
-
-#define DMA_CT1 (1<<6) /* Chained transfere */
-#define DMA_CT2 (1<<5)
-#define DMA_CT3 (1<<4)
-
-#define DMA_CH_ENABLE (1<<7)
-#define SET_DMA_CH(x) (((x)&0x1)<<7)
-#define GET_DMA_CH(x) (((x)&DMA_CH_ENABLE)>>7)
-
-/* STBx25xxx dma unique */
-/* enable device port on a dma channel
- * example ext 0 on dma 1
- */
-
-#define SSP0_RECV 15
-#define SSP0_XMIT 14
-#define EXT_DMA_0 12
-#define SC1_XMIT 11
-#define SC1_RECV 10
-#define EXT_DMA_2 9
-#define EXT_DMA_3 8
-#define SERIAL2_XMIT 7
-#define SERIAL2_RECV 6
-#define SC0_XMIT 5
-#define SC0_RECV 4
-#define SERIAL1_XMIT 3
-#define SERIAL1_RECV 2
-#define SERIAL0_XMIT 1
-#define SERIAL0_RECV 0
-
-#define DMA_CHAN_0 1
-#define DMA_CHAN_1 2
-#define DMA_CHAN_2 3
-#define DMA_CHAN_3 4
-
-/* end STBx25xx */
-
-/*
- * Bit 30 must be one for Redwoods, otherwise transfers may receive errors.
- */
-#define DMA_CR_MB0 0x2
-
-#define SET_DMA_CONTROL \
- (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
- SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
- SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
- SET_DMA_PL(p_init->pl) | /* peripheral location */ \
- SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
- SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
- SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
- SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
- SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
- SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
- SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
- SET_DMA_TCD(p_init->tcd_disable) | /* TC chain mode disable */ \
- SET_DMA_ECE(p_init->ece_enable) | /* ECE chanin mode enable */ \
- SET_DMA_CH(p_init->ch_enable) | /* Chain enable */ \
- DMA_CR_MB0 /* must be one */)
-
-#define GET_DMA_POLARITY(chan) chan
-
-#endif
-
-typedef struct {
- unsigned short in_use; /* set when channel is being used, clr when
- * available.
- */
- /*
- * Valid polarity settings:
- * DMAReq_ActiveLow(n)
- * DMAAck_ActiveLow(n)
- * EOT_ActiveLow(n)
- *
- * n is 0 to max dma chans
- */
- unsigned int polarity;
-
- char buffer_enable; /* Boolean: buffer enable */
- char tce_enable; /* Boolean: terminal count enable */
- char etd_output; /* Boolean: eot pin is a tc output */
- char pce; /* Boolean: parity check enable */
-
- /*
- * Peripheral location:
- * INTERNAL_PERIPHERAL (UART0 on the 405GP)
- * EXTERNAL_PERIPHERAL
- */
- char pl; /* internal/external peripheral */
-
- /*
- * Valid pwidth settings:
- * PW_8
- * PW_16
- * PW_32
- * PW_64
- */
- unsigned int pwidth;
-
- char dai; /* Boolean: dst address increment */
- char sai; /* Boolean: src address increment */
-
- /*
- * Valid psc settings: 0-3
- */
- unsigned int psc; /* Peripheral Setup Cycles */
-
- /*
- * Valid pwc settings:
- * 0-63
- */
- unsigned int pwc; /* Peripheral Wait Cycles */
-
- /*
- * Valid phc settings:
- * 0-7
- */
- unsigned int phc; /* Peripheral Hold Cycles */
-
- /*
- * Valid cp (channel priority) settings:
- * PRIORITY_LOW
- * PRIORITY_MID_LOW
- * PRIORITY_MID_HIGH
- * PRIORITY_HIGH
- */
- unsigned int cp; /* channel priority */
-
- /*
- * Valid pf (memory read prefetch) settings:
- *
- * PREFETCH_1
- * PREFETCH_2
- * PREFETCH_4
- */
- unsigned int pf; /* memory read prefetch */
-
- /*
- * Boolean: channel interrupt enable
- * NOTE: for sgl transfers, only the last descriptor will be setup to
- * interrupt.
- */
- char int_enable;
-
- char shift; /* easy access to byte_count shift, based on */
- /* the width of the channel */
-
- uint32_t control; /* channel control word */
-
- /* These variabled are used ONLY in single dma transfers */
- unsigned int mode; /* transfer mode */
- phys_addr_t addr;
- char ce; /* channel enable */
-#ifdef CONFIG_STB03xxx
- char ch_enable;
- char tcd_disable;
- char ece_enable;
- char td; /* transfer direction */
-#endif
-
-} ppc_dma_ch_t;
-
-/*
- * PPC44x DMA implementations have a slightly different
- * descriptor layout. Probably moved about due to the
- * change to 64-bit addresses and link pointer. I don't
- * know why they didn't just leave control_count after
- * the dst_addr.
- */
-#ifdef PPC4xx_DMA_64BIT
-typedef struct {
- uint32_t control;
- uint32_t control_count;
- phys_addr_t src_addr;
- phys_addr_t dst_addr;
- phys_addr_t next;
-} ppc_sgl_t;
-#else
-typedef struct {
- uint32_t control;
- phys_addr_t src_addr;
- phys_addr_t dst_addr;
- uint32_t control_count;
- uint32_t next;
-} ppc_sgl_t;
-#endif
-
-typedef struct {
- unsigned int dmanr;
- uint32_t control; /* channel ctrl word; loaded from each descrptr */
- uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
- dma_addr_t dma_addr; /* dma (physical) address of this list */
- ppc_sgl_t *phead;
- dma_addr_t phead_dma;
- ppc_sgl_t *ptail;
- dma_addr_t ptail_dma;
-} sgl_list_info_t;
-
-typedef struct {
- phys_addr_t *src_addr;
- phys_addr_t *dst_addr;
- phys_addr_t dma_src_addr;
- phys_addr_t dma_dst_addr;
-} pci_alloc_desc_t;
-
-extern ppc_dma_ch_t dma_channels[];
-
-/*
- * The DMA API are in ppc4xx_dma.c and ppc4xx_sgdma.c
- */
-extern int ppc4xx_init_dma_channel(unsigned int, ppc_dma_ch_t *);
-extern int ppc4xx_get_channel_config(unsigned int, ppc_dma_ch_t *);
-extern int ppc4xx_set_channel_priority(unsigned int, unsigned int);
-extern unsigned int ppc4xx_get_peripheral_width(unsigned int);
-extern void ppc4xx_set_sg_addr(int, phys_addr_t);
-extern int ppc4xx_add_dma_sgl(sgl_handle_t, phys_addr_t, phys_addr_t, unsigned int);
-extern void ppc4xx_enable_dma_sgl(sgl_handle_t);
-extern void ppc4xx_disable_dma_sgl(sgl_handle_t);
-extern int ppc4xx_get_dma_sgl_residue(sgl_handle_t, phys_addr_t *, phys_addr_t *);
-extern int ppc4xx_delete_dma_sgl_element(sgl_handle_t, phys_addr_t *, phys_addr_t *);
-extern int ppc4xx_alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
-extern void ppc4xx_free_dma_handle(sgl_handle_t);
-extern int ppc4xx_get_dma_status(void);
-extern void ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr);
-extern void ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr);
-extern void ppc4xx_enable_dma(unsigned int dmanr);
-extern void ppc4xx_disable_dma(unsigned int dmanr);
-extern void ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count);
-extern int ppc4xx_get_dma_residue(unsigned int dmanr);
-extern void ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
- phys_addr_t dst_dma_addr);
-extern int ppc4xx_enable_dma_interrupt(unsigned int dmanr);
-extern int ppc4xx_disable_dma_interrupt(unsigned int dmanr);
-extern int ppc4xx_clr_dma_status(unsigned int dmanr);
-extern int ppc4xx_map_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
-extern int ppc4xx_disable_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
-extern int ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode);
-
-/* These are in kernel/dma.c: */
-
-/* reserve a DMA channel */
-extern int request_dma(unsigned int dmanr, const char *device_id);
-/* release it again */
-extern void free_dma(unsigned int dmanr);
-#endif
-#endif /* __KERNEL__ */
unsigned long bi_sramsize; /* size of SRAM memory */
#if defined(CONFIG_8xx) || defined(CONFIG_CPM2) || defined(CONFIG_85xx)
unsigned long bi_immr_base; /* base of IMMR register */
-#endif
-#if defined(CONFIG_PPC_MPC52xx)
- unsigned long bi_mbar_base; /* base of internal registers */
#endif
unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */
unsigned long bi_ip_addr; /* IP Address */
unsigned long bi_brgfreq; /* BRG_CLK Freq, in MHz */
unsigned long bi_sccfreq; /* SCC_CLK Freq, in MHz */
unsigned long bi_vco; /* VCO Out from PLL, in MHz */
-#endif
-#if defined(CONFIG_PPC_MPC52xx)
- unsigned long bi_ipbfreq; /* IPB Bus Freq, in MHz */
- unsigned long bi_pcifreq; /* PCI Bus Freq, in MHz */
#endif
unsigned long bi_baudrate; /* Console Baudrate */
#if defined(CONFIG_405GP)
#define DBAT6U SPRN_DBAT6U /* Data BAT 6 Upper Register */
#define DBAT7L SPRN_DBAT7L /* Data BAT 7 Lower Register */
#define DBAT7U SPRN_DBAT7U /* Data BAT 7 Upper Register */
-//#define DEC SPRN_DEC /* Decrement Register */
+#define DEC SPRN_DEC /* Decrement Register */
#define DMISS SPRN_DMISS /* Data TLB Miss Register */
#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
#define EAR SPRN_EAR /* External Address Register */
#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */
#define L2CR SPRN_L2CR /* Classic PPC L2 cache control register */
#define L3CR SPRN_L3CR /* PPC 745x L3 cache control register */
-//#define LR SPRN_LR
+#define LR SPRN_LR
#define PVR SPRN_PVR /* Processor Version */
-//#define RPA SPRN_RPA /* Required Physical Address Register */
+#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
#define SVR_8555E 0x80790000
#define SVR_8560 0x80700000
-#if 0
/* Segment Registers */
#define SR0 0
#define SR1 1
#define SR13 13
#define SR14 14
#define SR15 15
-#endif
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#if defined(CONFIG_EV64260)
#include <platforms/ev64260.h>
+#elif defined(CONFIG_DMV182)
+#include <platforms/dmv182_serial.h>
#elif defined(CONFIG_GEMINI)
#include <platforms/gemini_serial.h>
#elif defined(CONFIG_POWERPMC250)
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
-
-typedef void __restorefn_t(void);
-typedef __restorefn_t __user *__sigrestore_t;
+typedef void (*__sighandler_t)(int);
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- __sigrestore_t sa_restorer;
+ void (*sa_restorer)(void);
sigset_t sa_mask; /* mask last for extensibility */
};
};
typedef struct sigaltstack {
- void __user *ss_sp;
+ void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
((addr) <= current->thread.fs.seg \
&& ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
-#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
extern inline int verify_area(int type, const void __user * addr, unsigned long size)
{
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
- __chk_user_ptr(ptr); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
- __chk_user_ptr(ptr); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
({ \
long __gu_err; \
long long __gu_val; \
- __chk_user_ptr(ptr); \
__get_user_size64(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
({ \
long __gu_err = -EFAULT; \
long long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
struct ucontext {
unsigned long uc_flags;
- struct ucontext __user *uc_link;
+ struct ucontext *uc_link;
stack_t uc_stack;
int uc_pad[7];
- struct mcontext __user *uc_regs;/* points to uc_mcontext field */
+ struct mcontext *uc_regs; /* points to uc_mcontext field */
sigset_t uc_sigmask;
/* glibc has 1024-bit signal masks, ours are 64-bit */
int uc_maskext[30];
#define __NR_mq_notify 266
#define __NR_mq_getsetattr 267
#define __NR_kexec_load 268
-#define __NR_ioprio_set 269
-#define __NR_ioprio_get 270
-#define __NR_syscalls 271
+#define __NR_syscalls 269
#define __NR(n) #n
out_be64(vaddr, val);
}
-#define EEH_CHECK_ALIGN(v,a) \
- ((((unsigned long)(v)) & ((a) - 1)) == 0)
-
static inline void eeh_memset_io(void *addr, int c, unsigned long n) {
void *vaddr = (void *)IO_TOKEN_TO_ADDR(addr);
- u32 lc = c;
- lc |= lc << 8;
- lc |= lc << 16;
-
- while(n && !EEH_CHECK_ALIGN(vaddr, 4)) {
- *((volatile u8 *)vaddr) = c;
- vaddr = (void *)((unsigned long)vaddr + 1);
- n--;
- }
- while(n >= 4) {
- *((volatile u32 *)vaddr) = lc;
- vaddr = (void *)((unsigned long)vaddr + 4);
- n -= 4;
- }
- while(n) {
- *((volatile u8 *)vaddr) = c;
- vaddr = (void *)((unsigned long)vaddr + 1);
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
+ memset(vaddr, c, n);
}
static inline void eeh_memcpy_fromio(void *dest, void *src, unsigned long n) {
void *vsrc = (void *)IO_TOKEN_TO_ADDR(src);
- void *vsrcsave = vsrc, *destsave = dest, *srcsave = src;
- unsigned long nsave = n;
-
- while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
- *((u8 *)dest) = *((volatile u8 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc = (void *)((unsigned long)vsrc + 1);
- dest = (void *)((unsigned long)dest + 1);
- n--;
- }
- while(n > 4) {
- *((u32 *)dest) = *((volatile u32 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc = (void *)((unsigned long)vsrc + 4);
- dest = (void *)((unsigned long)dest + 4);
- n -= 4;
- }
- while(n) {
- *((u8 *)dest) = *((volatile u8 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc = (void *)((unsigned long)vsrc + 1);
- dest = (void *)((unsigned long)dest + 1);
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
-
+ memcpy(dest, vsrc, n);
/* Look for ffff's here at dest[n]. Assume that at least 4 bytes
* were copied. Check all four bytes.
*/
- if ((nsave >= 4) &&
- (EEH_POSSIBLE_ERROR(srcsave, vsrcsave, (*((u32 *) destsave+nsave-4)),
- u32))) {
- eeh_check_failure(srcsave, (*((u32 *) destsave+nsave-4)));
+ if ((n >= 4) &&
+ (EEH_POSSIBLE_ERROR(src, vsrc, (*((u32 *) dest+n-4)), u32))) {
+ eeh_check_failure(src, (*((u32 *) dest+n-4)));
}
}
static inline void eeh_memcpy_toio(void *dest, void *src, unsigned long n) {
void *vdest = (void *)IO_TOKEN_TO_ADDR(dest);
-
- while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
- *((volatile u8 *)vdest) = *((u8 *)src);
- src = (void *)((unsigned long)src + 1);
- vdest = (void *)((unsigned long)vdest + 1);
- n--;
- }
- while(n > 4) {
- *((volatile u32 *)vdest) = *((volatile u32 *)src);
- src = (void *)((unsigned long)src + 4);
- vdest = (void *)((unsigned long)vdest + 4);
- n-=4;
- }
- while(n) {
- *((volatile u8 *)vdest) = *((u8 *)src);
- src = (void *)((unsigned long)src + 1);
- vdest = (void *)((unsigned long)vdest + 1);
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
+ memcpy(vdest, src, n);
}
-#undef EEH_CHECK_ALIGN
-
#define MAX_ISA_PORT 0x10000
extern unsigned long io_page_mask;
#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) & io_page_mask)
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
+++ /dev/null
-/*
- * hvcserver.h
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * PPC64 virtual I/O console server support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PPC64_HVCSERVER_H
-#define _PPC64_HVCSERVER_H
-
-#include <linux/list.h>
-
-/* Converged Location Code length */
-#define HVCS_CLC_LENGTH 79
-
-struct hvcs_partner_info {
- struct list_head node;
- unsigned int unit_address;
- unsigned int partition_ID;
- char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */
-};
-
-extern int hvcs_free_partner_info(struct list_head *head);
-extern int hvcs_get_partner_info(unsigned int unit_address,
- struct list_head *head, unsigned long *pi_buff);
-extern int hvcs_register_connection(unsigned int unit_address,
- unsigned int p_partition_ID, unsigned int p_unit_address);
-extern int hvcs_free_connection(unsigned int unit_address);
-
-#endif /* _PPC64_HVCSERVER_H */
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/threads.h>
+#include <asm/atomic.h>
/*
* Maximum number of interrupt sources that we can handle.
return virt_irq_to_real_map[virt_irq];
}
-extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
-
/*
* Because many systems have two overlapping names spaces for
* interrupts (ISA and XICS for example), and the ISA interrupts
mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
ctx; })
+/*
+ * Hardware Segment Lookaside Buffer Entry
+ * This structure has been padded out to two 64b doublewords (actual SLBE's are
+ * 94 bits). This padding facilites use by the segment management
+ * instructions.
+ */
typedef struct {
unsigned long esid: 36; /* Effective segment ID */
unsigned long resv0:20; /* Reserved */
} dw1;
} STE;
+typedef struct {
+ unsigned long esid: 36; /* Effective segment ID */
+ unsigned long v: 1; /* Entry valid (v=1) or invalid */
+ unsigned long null1:15; /* padding to a 64b boundary */
+ unsigned long index:12; /* Index to select SLB entry. Used by slbmte */
+} slb_dword0;
+
+typedef struct {
+ unsigned long vsid: 52; /* Virtual segment ID */
+ unsigned long ks: 1; /* Supervisor (privileged) state storage key */
+ unsigned long kp: 1; /* Problem state storage key */
+ unsigned long n: 1; /* No-execute if n=1 */
+ unsigned long l: 1; /* Virt pages are large (l=1) or 4KB (l=0) */
+ unsigned long c: 1; /* Class */
+ unsigned long resv0: 7; /* Padding to a 64b boundary */
+} slb_dword1;
+
+typedef struct {
+ union {
+ unsigned long dword0;
+ slb_dword0 dw0;
+ } dw0;
+
+ union {
+ unsigned long dword1;
+ slb_dword1 dw1;
+ } dw1;
+} SLBE;
+
/* Hardware Page Table Entry */
#define HPTES_PER_GROUP 8
#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
-#define SLB_NUM_BOLTED 2
-#define SLB_CACHE_ENTRIES 8
-
-/* Bits in the SLB ESID word */
-#define SLB_ESID_V 0x0000000008000000 /* entry is valid */
-
-/* Bits in the SLB VSID word */
-#define SLB_VSID_SHIFT 12
-#define SLB_VSID_KS 0x0000000000000800
-#define SLB_VSID_KP 0x0000000000000400
-#define SLB_VSID_N 0x0000000000000200 /* no-execute */
-#define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
-#define SLB_VSID_C 0x0000000000000080 /* class */
-
-#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
-#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
-
-#define VSID_RANDOMIZER ASM_CONST(42470972311)
-#define VSID_MASK 0xfffffffffUL
-/* Because we never access addresses below KERNELBASE as kernel
- * addresses, this VSID is never used for anything real, and will
- * never have pages hashed into it */
-#define BAD_VSID ASM_CONST(0)
-
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
}
extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
-extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void flush_slb(struct task_struct *tsk, struct mm_struct *mm);
/*
* switch_mm is the entry point called from the architecture independent
return;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
- switch_slb(tsk, next);
+ flush_slb(tsk, next);
else
flush_stab(tsk, next);
}
local_irq_restore(flags);
}
+#define VSID_RANDOMIZER 42470972311UL
+#define VSID_MASK 0xfffffffffUL
+
+
/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
*/
static inline unsigned long
u64 exmc[8]; /* used for machine checks */
u64 exslb[8]; /* used for SLB/segment table misses
* on the linear mapping */
- u64 slb_r3; /* spot to save R3 on SLB miss */
- mm_context_t context;
- u16 slb_cache[SLB_CACHE_ENTRIES];
- u16 slb_cache_ptr;
+ u64 exdsi[8]; /* used for linear mapping hash table misses */
/*
* then miscellaneous read-write fields
*/
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
- u64 stab_rr; /* stab/slb round-robin counter */
+ u64 stab_next_rr; /* stab/slb round-robin counter */
u64 next_jiffy_update_tb; /* TB value for next jiffy update */
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u32 lpevent_count; /* lpevents processed */
u8 proc_enabled; /* irq soft-enable flag */
- /* not yet used */
- u64 exdsi[8]; /* used for linear mapping hash table misses */
-
/*
* iSeries structues which the hypervisor knows about - Not
* sure if these particularly need to be cacheline aligned.
#define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL
-#define ESID_MASK 0xfffffffff0000000UL
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
#ifdef CONFIG_HUGETLB_PAGE
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/* For 64-bit processes the hugepage range is 1T-1.5T */
-#define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000)
-#define TASK_HPAGE_END ASM_CONST(0x0000018000000000)
+#define TASK_HPAGE_BASE (0x0000010000000000UL)
+#define TASK_HPAGE_END (0x0000018000000000UL)
#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- (1U << GET_ESID(addr))) & 0xffff)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _PPC64_PAGE_H */
* the PCI memory space in the CPU bus space
*/
unsigned long pci_mem_offset;
+ unsigned long pci_io_offset;
struct pci_ops *ops;
volatile unsigned int *cfg_addr;
#define PVR SPRN_PVR /* Processor Version */
#define PIR SPRN_PIR /* Processor ID */
#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
-//#define RPA SPRN_RPA /* Required Physical Address Register */
+#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
#endif /* ASSEMBLY */
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
/*
* Number of entries in the SLB. If this ever changes we should handle
* it with a use a cpu feature fixup.
extern void print_properties(struct device_node *node);
extern int prom_n_addr_cells(struct device_node* np);
extern int prom_n_size_cells(struct device_node* np);
-extern int prom_n_intr_cells(struct device_node* np);
extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
extern void prom_add_property(struct device_node* np, struct property* prop);
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
(x) = (__typeof__(*(ptr)))__gu_val; \
void xics_setup_cpu(void);
void xics_cause_IPI(int cpu);
-/* first argument is ignored for now*/
-void pSeriesLP_cppr_info(int n_cpu, u8 value);
-
struct xics_ipi_struct {
volatile unsigned long value;
} ____cacheline_aligned;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup (tux) */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _S390_PAGE_H */
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
typedef struct {
__u32 ar4;
} mm_segment_t;
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#endif
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#endif
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_PAGE_H */
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_LARGEFILE 0x40000
-#define O_ATOMICLOOKUP 0x80000 /* do atomic file lookup */
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
#ifndef _SPARC_OPENPROMIO_H
#define _SPARC_OPENPROMIO_H
-#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
{
int op_nodeid; /* PROM Node ID (value-result) */
int op_namelen; /* Length of op_name. */
- char __user *op_name; /* Pointer to the property name. */
+ char *op_name; /* Pointer to the property name. */
int op_buflen; /* Length of op_buf (value-result) */
- char __user *op_buf; /* Pointer to buffer. */
+ char *op_buf; /* Pointer to buffer. */
};
#define OPIOCGET _IOWR('O', 1, struct opiocdesc)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* __KERNEL__ */
#endif /* _SPARC_PAGE_H */
extern void pci_unmap_page(struct pci_dev *hwdev,
dma_addr_t dma_address, size_t size, int direction);
+/* map_page and map_single cannot fail */
+static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
{
}
-#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return (dma_addr == PCI_DMA_ERROR_CODE);
-}
-
#endif /* __KERNEL__ */
/* generic pci stuff */
{ 0, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{INR_OPEN, INR_OPEN}, {0, 0}, \
- {32768, 32768}, \
+ {PAGE_SIZE, PAGE_SIZE}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{MAX_SIGPENDING, MAX_SIGPENDING}, \
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
-#define flush_cache_range(vma, start, end) \
- flush_cache_mm((vma)->vm_mm)
+extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
#ifndef __LINUX_FBIO_H
#define __LINUX_FBIO_H
-#include <linux/compiler.h>
-
/* Constants used for fbio SunOS compatibility */
/* (C) 1996 Miguel de Icaza */
struct fbcmap {
int index; /* first element (0 origin) */
int count;
- unsigned char __user *red;
- unsigned char __user *green;
- unsigned char __user *blue;
+ unsigned char *red;
+ unsigned char *green;
+ unsigned char *blue;
};
#ifdef __KERNEL__
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_LARGEFILE 0x40000
-#define O_ATOMICLOOKUP 0x80000 /* do atomic file lookup */
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
volatile int doing_pdma = 0;
/* This is software state */
-char *pdma_base = NULL;
+char *pdma_base = 0;
unsigned long pdma_areasize;
/* Common routines to all controller types on the Sparc. */
doing_pdma = 0;
if (pdma_base) {
mmu_unlockarea(pdma_base, pdma_areasize);
- pdma_base = NULL;
+ pdma_base = 0;
}
}
} else {
#ifdef CONFIG_PCI
struct linux_ebus *ebus;
- struct linux_ebus_device *edev = NULL;
+ struct linux_ebus_device *edev = 0;
unsigned long config = 0;
unsigned long auxio_reg;
spin_unlock(&mm->page_table_lock);
}
+extern void __flush_tlb_mm(unsigned long, unsigned long);
+
#define deactivate_mm(tsk,mm) do { } while (0)
/* Activate a new MM instance for the current task. */
#ifndef _SPARC64_OPENPROMIO_H
#define _SPARC64_OPENPROMIO_H
-#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
{
int op_nodeid; /* PROM Node ID (value-result) */
int op_namelen; /* Length of op_name. */
- char __user *op_name; /* Pointer to the property name. */
+ char *op_name; /* Pointer to the property name. */
int op_buflen; /* Length of op_buf (value-result) */
- char __user *op_buf; /* Pointer to buffer. */
+ char *op_buf; /* Pointer to buffer. */
};
#define OPIOCGET _IOWR('O', 1, struct opiocdesc)
#define clear_page(X) _clear_page((void *)(X))
struct page;
extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
-#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
+#define copy_page(X,Y) __memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
/* GROSS, defining this makes gcc pass these types as aggregates,
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define devmem_is_allowed(x) 1
-
#endif /* !(__KERNEL__) */
#endif /* !(_SPARC64_PAGE_H) */
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
-extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte = __pte_alloc_one_kernel(mm, address);
- if (pte) {
- struct page *page = virt_to_page(pte);
- page->mapping = (void *) mm;
- page->index = address & PMD_MASK;
- }
- return pte;
-}
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = __pte_alloc_one_kernel(mm, addr);
- if (pte) {
- struct page *page = virt_to_page(pte);
- page->mapping = (void *) mm;
- page->index = addr & PMD_MASK;
- return page;
- }
+ pte_t *pte = pte_alloc_one_kernel(mm, addr);
+ if (pte)
+ return virt_to_page(pte);
return NULL;
}
free_page((unsigned long)pte);
}
-static inline void pte_free_kernel(pte_t *pte)
-{
- virt_to_page(pte)->mapping = NULL;
- free_pte_fast(pte);
-}
-
-static inline void pte_free(struct page *ptepage)
-{
- ptepage->mapping = NULL;
- free_pte_fast(page_address(ptepage));
-}
-
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(page_address(pte))
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
#include <linux/sched.h>
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+
/* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
* is different so we can optimize correctly for 32-bit tasks.
*/
#define REAL_PTRS_PER_PMD (1UL << PMD_BITS)
-
-/* This is gross, but unless we do this gcc retests the
- * thread flag every interation in pmd traversal loops.
- */
-extern unsigned long __ptrs_per_pmd(void) __attribute_const__;
-#define PTRS_PER_PMD __ptrs_per_pmd()
+#define PTRS_PER_PMD ((const int)(test_thread_flag(TIF_32BIT) ? \
+ (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : \
+ (REAL_PTRS_PER_PMD)))
/*
* We cannot use the top address range because VPTE table lives there. This
((unsigned long) __va((((unsigned long)pgd_val(pgd))<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (0)
#define pmd_present(pmd) (pmd_val(pmd) != 0U)
* Undefined behaviour if not..
*/
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
-#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
+#define pte_exec(pte) pte_read(pte)
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
-/* Actual page table PTE updates. */
-extern void tlb_batch_add(pte_t *ptep, pte_t orig);
-
-static inline void set_pte(pte_t *ptep, pte_t pte)
-{
- pte_t orig = *ptep;
-
- *ptep = pte;
- if (pte_present(orig))
- tlb_batch_add(ptep, orig);
-}
-
-#define pte_clear(ptep) set_pte((ptep), __pte(0UL))
-
extern pgd_t swapper_pg_dir[1];
/* These do nothing with the way I have things setup. */
{ 0, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{INR_OPEN, INR_OPEN}, {0, 0}, \
- {32768, 32768 }, \
+ {PAGE_SIZE, PAGE_SIZE }, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{MAX_SIGPENDING, MAX_SIGPENDING}, \
#ifdef __KERNEL__
-#include <linux/config.h>
#include <linux/compat.h>
-#ifdef CONFIG_COMPAT
-
typedef union sigval32 {
int sival_int;
u32 sival_ptr;
} _sigpoll;
} _sifields;
} siginfo_t32;
-#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#ifdef __KERNEL__
-#ifdef CONFIG_COMPAT
-
typedef struct sigevent32 {
sigval_t32 sigev_value;
int sigev_signo;
extern int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from);
-#endif /* CONFIG_COMPAT */
-
#endif /* __KERNEL__ */
#endif
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#include <linux/config.h>
#include <linux/personality.h>
#include <linux/types.h>
#include <linux/compat.h>
};
#ifdef __KERNEL__
-
-#ifdef CONFIG_COMPAT
struct __new_sigaction32 {
unsigned sa_handler;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
compat_sigset_t sa_mask;
};
-#endif
struct k_sigaction {
struct __new_sigaction sa;
};
#ifdef __KERNEL__
-
-#ifdef CONFIG_COMPAT
struct __old_sigaction32 {
unsigned sa_handler;
compat_old_sigset_t sa_mask;
};
#endif
-#endif
-
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
} stack_t;
#ifdef __KERNEL__
-
-#ifdef CONFIG_COMPAT
typedef struct sigaltstack32 {
u32 ss_sp;
int ss_flags;
compat_size_t ss_size;
} stack_t32;
-#endif
struct signal_deliver_cookie {
int restart_syscall;
do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock))
-/* arch/sparc64/lib/spinlock.S */
-extern void _raw_spin_lock(spinlock_t *lock);
+static __inline__ void _raw_spin_lock(spinlock_t *lock)
+{
+ __asm__ __volatile__(
+"1: ldstub [%0], %%g7\n"
+" brnz,pn %%g7, 2f\n"
+" membar #StoreLoad | #StoreStore\n"
+" .subsection 2\n"
+"2: ldub [%0], %%g7\n"
+" brnz,pt %%g7, 2b\n"
+" membar #LoadLoad\n"
+" b,a,pt %%xcc, 1b\n"
+" .previous\n"
+ : /* no outputs */
+ : "r" (lock)
+ : "g7", "memory");
+}
static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{
#include <asm/asi.h>
extern void __memmove(void *,const void *,__kernel_size_t);
+extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
extern void *__memset(void *,int,__kernel_size_t);
+extern void *__builtin_memcpy(void *,const void *,__kernel_size_t);
extern void *__builtin_memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
#define __HAVE_ARCH_MEMCPY
-extern void * memcpy(void *,const void *,__kernel_size_t);
+static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
+{
+ if(n) {
+ if(n <= 32) {
+ __builtin_memcpy(to, from, n);
+ } else {
+ __memcpy(to, from, n);
+ }
+ }
+ return to;
+}
+
+static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
+{
+ __memcpy(to, from, n);
+ return to;
+}
+
+#undef memcpy
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __nonconstant_memcpy((t),(f),(n)))
#define __HAVE_ARCH_MEMSET
current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
} \
- flush_tlb_pending(); \
save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
struct pt_regs *kregs;
struct exec_domain *exec_domain;
int preempt_count;
- int __pad;
unsigned long *utraps;
#define TI_FPREGS 0x00000500
/* We embed this in the uppermost byte of thread_info->flags */
-#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
-#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
-#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
-#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
-#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
+#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
+#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
+#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
+#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
#if PAGE_SHIFT == 13
#define THREAD_SIZE (2*PAGE_SIZE)
#define TIF_NEWSIGNALS 6 /* wants new-style signals */
#define TIF_32BIT 7 /* 32-bit binary */
#define TIF_NEWCHILD 8 /* just-spawned child process */
-/* TIF_* value 9 is available */
+
+/* XXX Make this one FAULT_CODE_BLKCOMMIT XXX */
+#define TIF_BLKCOMMIT 9 /* use ASI_BLK_COMMIT_* in copy_user_page */
+
#define TIF_POLLING_NRFLAG 10
#define TIF_SYSCALL_SUCCESS 11
/* NOTE: Thread flags >= 12 should be ones we have no interest
#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_NEWCHILD (1<<TIF_NEWCHILD)
+#define _TIF_BLKCOMMIT (1<<TIF_BLKCOMMIT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
#ifndef _SPARC64_TLB_H
#define _SPARC64_TLB_H
-#include <linux/config.h>
-#include <linux/swap.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
+#define tlb_flush(tlb) \
+do { if ((tlb)->fullmm) \
+ flush_tlb_mm((tlb)->mm);\
+} while (0)
-#define TLB_BATCH_NR 192
+#define tlb_start_vma(tlb, vma) \
+do { if (!(tlb)->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
-/*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
-#ifdef CONFIG_SMP
- #define FREE_PTE_NR 506
- #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
-#else
- #define FREE_PTE_NR 1
- #define tlb_fast_mode(bp) 1
-#endif
+#define tlb_end_vma(tlb, vma) \
+do { if (!(tlb)->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int pages_nr;
- unsigned int need_flush;
- unsigned int tlb_frozen;
- unsigned int tlb_nr;
- unsigned long freed;
- unsigned long vaddrs[TLB_BATCH_NR];
- struct page *pages[FREE_PTE_NR];
-};
+#define __tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { } while (0)
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+#include <asm-generic/tlb.h>
-#ifdef CONFIG_SMP
-extern void smp_flush_tlb_pending(struct mm_struct *,
- unsigned long, unsigned long *);
-#endif
-
-extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
-extern void flush_tlb_pending(void);
-
-static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
-{
- struct mmu_gather *mp = &per_cpu(mmu_gathers, smp_processor_id());
-
- BUG_ON(mp->tlb_nr);
-
- mp->mm = mm;
- mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
- mp->tlb_frozen = full_mm_flush;
- mp->freed = 0;
-
- return mp;
-}
-
-
-static inline void tlb_flush_mmu(struct mmu_gather *mp)
-{
- if (mp->need_flush) {
- mp->need_flush = 0;
- if (!tlb_fast_mode(mp)) {
- free_pages_and_swap_cache(mp->pages, mp->pages_nr);
- mp->pages_nr = 0;
- }
- }
-
-}
-
-#ifdef CONFIG_SMP
-extern void smp_flush_tlb_mm(struct mm_struct *mm);
-#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
-#else
-#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
-#endif
-
-static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
-{
- unsigned long freed = mp->freed;
- struct mm_struct *mm = mp->mm;
- unsigned long rss = mm->rss;
-
- if (rss < freed)
- freed = rss;
- mm->rss = rss - freed;
-
- tlb_flush_mmu(mp);
-
- if (mp->tlb_frozen) {
- unsigned long context = mm->context;
-
- if (CTX_VALID(context))
- do_flush_tlb_mm(mm);
- mp->tlb_frozen = 0;
- } else
- flush_tlb_pending();
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp)
-{
- return mp->tlb_frozen;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
-{
- mp->need_flush = 1;
- if (tlb_fast_mode(mp)) {
- free_page_and_swap_cache(page);
- return;
- }
- mp->pages[mp->pages_nr++] = page;
- if (mp->pages_nr >= FREE_PTE_NR)
- tlb_flush_mmu(mp);
-}
-
-#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp,ptepage) pte_free(ptepage)
-#define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define __pte_free_tlb(tlb, pte) pte_free(pte)
#endif /* _SPARC64_TLB_H */
/* TLB flush operations. */
-extern void flush_tlb_pending(void);
-
-#define flush_tlb_range(vma,start,end) \
- do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr) flush_tlb_pending()
-#define flush_tlb_mm(mm) flush_tlb_pending()
-
extern void __flush_tlb_all(void);
+extern void __flush_tlb_mm(unsigned long context, unsigned long r);
+extern void __flush_tlb_range(unsigned long context, unsigned long start,
+ unsigned long r, unsigned long end,
+ unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb_kernel_range(start,end) \
__flush_tlb_kernel_range(start,end)
+#define flush_tlb_mm(__mm) \
+do { if (CTX_VALID((__mm)->context)) \
+ __flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
+} while (0)
+
+#define flush_tlb_range(__vma, start, end) \
+do { if (CTX_VALID((__vma)->vm_mm->context)) { \
+ unsigned long __start = (start)&PAGE_MASK; \
+ unsigned long __end = PAGE_ALIGN(end); \
+ __flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
+ SECONDARY_CONTEXT, __end, PAGE_SIZE, \
+ (__end - __start)); \
+ } \
+} while (0)
+
+#define flush_tlb_vpte_range(__mm, start, end) \
+do { if (CTX_VALID((__mm)->context)) { \
+ unsigned long __start = (start)&PAGE_MASK; \
+ unsigned long __end = PAGE_ALIGN(end); \
+ __flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
+ SECONDARY_CONTEXT, __end, PAGE_SIZE, \
+ (__end - __start)); \
+ } \
+} while (0)
+
+#define flush_tlb_page(vma, page) \
+do { struct mm_struct *__mm = (vma)->vm_mm; \
+ if (CTX_VALID(__mm->context)) \
+ __flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
+ SECONDARY_CONTEXT); \
+} while (0)
+
+#define flush_tlb_vpte_page(mm, addr) \
+do { struct mm_struct *__mm = (mm); \
+ if (CTX_VALID(__mm->context)) \
+ __flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
+ SECONDARY_CONTEXT); \
+} while (0)
+
#else /* CONFIG_SMP */
extern void smp_flush_tlb_all(void);
+extern void smp_flush_tlb_mm(struct mm_struct *mm);
+extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_tlb_all() smp_flush_tlb_all()
+#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
+#define flush_tlb_range(vma, start, end) \
+ smp_flush_tlb_range((vma)->vm_mm, start, end)
+#define flush_tlb_vpte_range(mm, start, end) \
+ smp_flush_tlb_range(mm, start, end)
#define flush_tlb_kernel_range(start, end) \
smp_flush_tlb_kernel_range(start, end)
+#define flush_tlb_page(vma, page) \
+ smp_flush_tlb_page((vma)->vm_mm, page)
+#define flush_tlb_vpte_page(mm, page) \
+ smp_flush_tlb_page((mm), page)
#endif /* ! CONFIG_SMP */
-extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long);
+static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ /* Note the signed type. */
+ long s = start, e = end, vpte_base;
+ /* Nobody should call us with start below VM hole and end above.
+ See if it is really true. */
+ BUG_ON(s > e);
+#if 0
+ /* Currently free_pgtables guarantees this. */
+ s &= PMD_MASK;
+ e = (e + PMD_SIZE - 1) & PMD_MASK;
+#endif
+ vpte_base = (tlb_type == spitfire ?
+ VPTE_BASE_SPITFIRE :
+ VPTE_BASE_CHEETAH);
+
+ flush_tlb_vpte_range(mm,
+ vpte_base + (s >> (PAGE_SHIFT - 3)),
+ vpte_base + (e >> (PAGE_SHIFT - 3)));
+}
#endif /* _SPARC64_TLBFLUSH_H */
#else
#define SUNOS_SYSCALL_TRAP TRAP(sunos_syscall)
#endif
-#ifdef CONFIG_COMPAT
#define LINUX_32BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sys_call_table32)
-#else
-#define LINUX_32BIT_SYSCALL_TRAP BTRAP(0x110)
-#endif
#define LINUX_64BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table64)
#define GETCC_TRAP TRAP(getcc)
#define SETCC_TRAP TRAP(setcc)
+++ /dev/null
-#ifndef __UM_MODULE_H
-#define __UM_MODULE_H
-
-/* UML is simple */
-struct mod_arch_specific
-{
-};
-
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
-
-#endif
extern struct page *arch_validate(struct page *page, int mask, int order);
#define HAVE_ARCH_VALIDATE
-#define devmem_is_allowed(x) 1
extern void arch_free_page(struct page *page, int order);
#define HAVE_ARCH_FREE_PAGE
return result + generic_ffs_for_find_next_bit(tmp);
}
-/*
- * find_first_bit - find the first set bit in a memory region
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
#define ffs(x) generic_ffs (x)
#define fls(x) generic_fls (x)
#define __va(x) ((void *)__phys_to_virt ((unsigned long)(x)))
-#define devmem_is_allowed(x) 1
-
#endif /* KERNEL */
#endif /* __V850_PAGE_H__ */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE, PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffff
struct compat_ipc64_perm {
compat_key_t key;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
-#define O_ATOMICLOOKUP 02000000 /* TUX */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_PCI_USE_VECTOR
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
};
extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
-extern cpumask_t pci_bus_to_cpumask [256];
+extern cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES];
extern unsigned int boot_cpu_physical_apicid;
extern int smp_found_config;
struct task_struct;
struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
int in_gate_area(struct task_struct *task, unsigned long addr);
-extern int devmem_is_allowed(unsigned long pagenr);
#endif
#endif /* __KERNEL__ */
#define MCA_bus__is_a_macro
+/*
+ * User space process size: 512GB - 1GB (default).
+ */
+#define TASK_SIZE (0x0000007fc0000000UL)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
#define TASK_UNMAPPED_BASE \
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
-
-/*
- * User space process size: 512GB - 1GB (default).
- */
-#define TASK_SIZE_64 (0x0000007fc0000000UL)
-
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE_64)
-
/*
* Size of io_bitmap, covering ports 0 to 0x3ff.
*/
#define ARCH_HAS_SCHED_WAKE_IDLE
#endif
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
#endif /* __ASM_X86_64_PROCESSOR_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { 32768, 32768 }, \
+ { PAGE_SIZE , PAGE_SIZE }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
static inline cpumask_t pcibus_to_cpumask(int bus)
{
- cpumask_t res;
- cpus_and(res, pci_bus_to_cpumask[bus], cpu_online_map);
- return res;
+ cpumask_t tmp;
+ cpus_and(tmp, mp_bus_to_cpumask[bus], cpu_online_map);
+ return tmp;
}
#define NODE_BALANCE_RATE 30 /* CHECKME */
#define __put_user_check(x,ptr,size) \
({ \
int __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
#define __NR_kexec_load 246
__SYSCALL(__NR_kexec_load, sys_ni_syscall)
-#define __NR_ioprio_set 247
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set);
-#define __NR_ioprio_get 248
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get);
-#define __NR_syscall_max __NR_ioprio_get
+#define __NR_syscall_max __NR_kexec_load
#ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */
#include <linux/types.h>
#include <asm/param.h>
-#include <asm/byteorder.h>
/*
* comp_t is a 16-bit "floating" point number with a 3-bit base 8
#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */
#define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */
-
-#ifdef __BIG_ENDIAN
-#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
-#else
-#define ACCT_BYTEORDER 0x00 /* accounting file is little endian */
-#endif
+#define ABYTESEX 0x80 /* always set, allows to detect byteorder */
#ifdef __KERNEL__
#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10))
#define ata_id_has_wcache(dev) ((dev)->id[82] & (1 << 5))
#define ata_id_has_pm(dev) ((dev)->id[82] & (1 << 3))
-#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 9))
-#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 8))
+#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8))
+#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9))
#define ata_id_removeable(dev) ((dev)->id[0] & (1 << 7))
#define ata_id_u32(dev,n) \
(((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)]))
struct atm_iobuf {
int length;
- void __user *buffer;
+ void *buffer;
};
/* for ATM_GETCIRANGE / ATM_SETCIRANGE */
#define ATM_CI_MAX -1 /* use maximum range of VPI/VCI */
struct atm_cirange {
- signed char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */
- signed char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */
+ char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */
+ char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */
};
/* for ATM_SETSC; actually taken from the ATM_VF number space */
*
* ATM Lan Emulation Daemon vs. driver interface
*
- * mkiiskila@yahoo.com
+ * carnil@cs.tut.fi
*
*/
#define _LINUX_BINFMTS_H
#include <linux/capability.h>
+#include <linux/vs_memory.h>
struct pt_regs;
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 6 /* contains user pages */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
extern int bio_get_nr_vecs(struct block_device *);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);
-extern void bio_unmap_user(struct bio *);
+extern void bio_unmap_user(struct bio *, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
-extern int bio_uncopy_user(struct bio *);
#ifdef CONFIG_HIGHMEM
/*
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
-extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
+extern int scsi_cmd_ioctl(struct gendisk *, unsigned int, void __user *);
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int);
-extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int);
+extern int blk_rq_unmap_user(struct request *, void __user *, struct bio *, unsigned int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
int generic_cont_expand(struct inode *inode, loff_t size) ;
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_sync_page(struct page *);
-void flush_inode_pages (struct inode * inode);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
extern int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip,
struct file *fp);
extern int cdrom_release(struct cdrom_device_info *cdi, struct file *fp);
-extern int cdrom_ioctl(struct file *file, struct cdrom_device_info *cdi,
- struct inode *ip, unsigned int cmd, unsigned long arg);
+extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct inode *ip,
+ unsigned int cmd, unsigned long arg);
extern int cdrom_media_changed(struct cdrom_device_info *);
extern int register_cdrom(struct cdrom_device_info *cdi);
typedef void *(*icls_tsk_t) (struct task_struct *tsk);
typedef int (*icls_ioprio_t) (struct task_struct *tsk);
+
#ifdef CONFIG_CKRM_RES_BLKIO
-extern void *cki_tsk_icls (struct task_struct *tsk);
-extern int cki_tsk_ioprio (struct task_struct *tsk);
+#ifdef DOES_NOT_WORK_AND_NOT_NEEDED
+extern inline icls_tsk_t cki_tsk_icls;
+extern inline icls_ioprio_t cki_tsk_ioprio;
+#endif
#endif /* CONFIG_CKRM_RES_BLKIO */
*
* Latest version, more details at http://ckrm.sf.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
*/
*
* Latest version, more details at http://ckrm.sf.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
*/
#ifdef CONFIG_CKRM
-#include <linux/ckrm.h> // getting the event names
+#include "ckrm.h" // getting the event names
/* Action parameters identifying the cause of a task<->class notify callback
* these can perculate up to user daemon consuming records send by the
#include <linux/list.h>
-#define CLASSQUEUE_SIZE 1024 // acb: changed from 128
-//#define CLASSQUEUE_SIZE 128
+#define CLASSQUEUE_SIZE 128
#define CQ_BITMAP_SIZE ((((CLASSQUEUE_SIZE+1+7)/8)+sizeof(long)-1)/sizeof(long))
/**
cq_node_t *classqueue_get_head(struct classqueue_struct *cq);
/*update the base priority of the classqueue*/
-void classqueue_update_base(struct classqueue_struct *cq);
+void classqueue_update_base(struct classqueue_struct *cq, int new_base);
/**
* class_compare_prio: compare the priority of this two nodes
// more than this is needed.
int nr_active[MAX_NR_ZONES];
int nr_inactive[MAX_NR_ZONES];
- int tmp_cnt;
int shrink_count;
unsigned long last_shrink;
int over_limit_failures;
// used to fill reclaim_flags, used only when memory is low in the system
#define CLS_CLEAR (0) // class under its guarantee
#define CLS_OVER_GUAR (1 << 0) // class is over its guarantee
-#define CLS_PARENT_OVER (1 << 1) // parent is over 110% mark over limit
-#define CLS_OVER_25 (1 << 2) // class over 25% mark bet guar(0) & limit(100)
-#define CLS_OVER_50 (1 << 3) // class over 50% mark bet guar(0) & limit(100)
-#define CLS_OVER_75 (1 << 4) // class over 75% mark bet guar(0) & limit(100)
-#define CLS_OVER_100 (1 << 5) // class over its limit
-#define CLS_OVER_110 (1 << 6) // class over 110% mark over limit
-#define CLS_FLAGS_ALL ( CLS_OVER_GUAR | CLS_PARENT_OVER | CLS_OVER_25 | \
- CLS_OVER_50 | CLS_OVER_75 | CLS_OVER_100 | CLS_OVER_110 )
+#define CLS_PARENT_OVER (1 << 1) // parent is over 120% mark over limit
+#define CLS_OVER_75 (1 << 2) // class over 75% mark bet guar(0) & limit(100)
+#define CLS_OVER_100 (1 << 3) // class over its limit
+#define CLS_OVER_110 (1 << 4) // class over 110% mark over limit
+#define CLS_FLAGS_ALL ( CLS_OVER_GUAR | CLS_PARENT_OVER | CLS_OVER_75 | \
+ CLS_OVER_100 | CLS_OVER_110 )
#define CLS_SHRINK_BIT (31) // used to both lock and set the bit
#define CLS_SHRINK (1 << CLS_SHRINK_BIT) // shrink the given class
// used in flags. set when a class is more than 90% of its maxlimit
-#define MEM_AT_LIMIT 1
+#define MEM_NEAR_LIMIT 1
extern void ckrm_set_aggressive(ckrm_mem_res_t *);
extern unsigned int ckrm_setup_reclamation(void);
extern void ckrm_get_reclaim_bits(unsigned int *, unsigned int *);
extern void ckrm_init_mm_to_task(struct mm_struct *, struct task_struct *);
extern void ckrm_mem_evaluate_mm(struct mm_struct *);
-extern void ckrm_at_limit(ckrm_mem_res_t *);
-extern int ckrm_memclass_valid(ckrm_mem_res_t *);
+extern void ckrm_mem_evaluate_page_byadd(struct page *, struct mm_struct *);
+extern void ckrm_near_limit(ckrm_mem_res_t *);
#define ckrm_get_reclaim_flags(cls) ((cls)->reclaim_flags)
#else
#define ckrm_init_mm_to_current(a) do {} while (0)
#define ckrm_mem_evaluate_mm(a) do {} while (0)
+#define ckrm_mem_evaluate_page_byadd(a,b) do {} while (0)
+#define page_class(page) (NULL)
#define ckrm_get_reclaim_flags(a) (0)
#define ckrm_setup_reclamation() (0)
#define ckrm_teardown_reclamation() do {} while (0)
return -(b != NULL) ;
if (b == NULL)
return 0;
- if (a->pg_guar == CKRM_SHARE_DONTCARE)
- return 1;
- if (b->pg_guar == CKRM_SHARE_DONTCARE)
- return -1;
return (a->pg_unused - b->pg_unused);
}
static inline void
mem_class_put(ckrm_mem_res_t *cls)
{
- const char *name;
-
if (cls && atomic_dec_and_test(&(cls->nr_users)) ) {
- if (cls->core == NULL) {
- name = "unknown";
- } else {
- name = cls->core->name;
- }
- printk(KERN_DEBUG "freeing memclass %p of <core:%s>\n", cls, name);
-
- // BUG_ON(ckrm_memclass_valid(cls));
- // kfree(cls);
+ printk("freeing memclass %p of <core:%s>\n", cls, cls->core->name);
+ //kfree(cls);
}
}
-static inline void
+static inline int
incr_use_count(ckrm_mem_res_t *cls, int borrow)
{
+ int over_limit;
+
atomic_inc(&cls->pg_total);
+ over_limit = (atomic_read(&cls->pg_total) > ((9 * cls->pg_limit) / 10));
if (borrow)
cls->pg_lent++;
- if ((cls->pg_guar == CKRM_SHARE_DONTCARE) ||
+ if ((cls->pg_guar != CKRM_SHARE_DONTCARE) &&
(atomic_read(&cls->pg_total) > cls->pg_unused)) {
ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
mem_rcbs.resid, ckrm_mem_res_t);
if (parcls) {
- incr_use_count(parcls, 1);
+ over_limit |= incr_use_count(parcls, 1);
cls->pg_borrowed++;
+ return over_limit;
}
- } else {
- atomic_inc(&ckrm_mem_real_count);
}
- if ((cls->pg_limit != CKRM_SHARE_DONTCARE) &&
- (atomic_read(&cls->pg_total) >= cls->pg_limit) &&
- ((cls->flags & MEM_AT_LIMIT) != MEM_AT_LIMIT)) {
- ckrm_at_limit(cls);
- }
- return;
+ atomic_inc(&ckrm_mem_real_count);
+ return over_limit;
}
static inline void
}
static inline void
-ckrm_change_page_class(struct page *page, ckrm_mem_res_t *newcls)
+ckrm_change_page_class(struct page *page, ckrm_mem_res_t *cls)
{
- ckrm_mem_res_t *oldcls = page_class(page);
-
- if (!newcls || oldcls == newcls)
- return;
-
ckrm_clear_page_class(page);
- ckrm_set_page_class(page, newcls);
- if (test_bit(PG_ckrm_account, &page->flags)) {
- decr_use_count(oldcls, 0);
- incr_use_count(newcls, 0);
- if (PageActive(page)) {
- oldcls->nr_active[page_zonenum(page)]--;
- newcls->nr_active[page_zonenum(page)]++;
- } else {
- oldcls->nr_inactive[page_zonenum(page)]--;
- newcls->nr_inactive[page_zonenum(page)]++;
- }
- }
+ ckrm_set_page_class(page, cls);
}
static inline void
static inline void
ckrm_mem_inc_active(struct page *page)
{
- ckrm_mem_res_t *cls = page_class(page), *curcls;
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(test_bit(PG_ckrm_account, &page->flags));
- if (unlikely(cls != (curcls = GET_MEM_CLASS(current)))) {
- cls = curcls;
- ckrm_change_page_class(page, cls);
- }
+ ckrm_mem_res_t *cls = page_class(page);
+ BUG_ON(cls == NULL);
cls->nr_active[page_zonenum(page)]++;
- incr_use_count(cls, 0);
- set_bit(PG_ckrm_account, &page->flags);
+ if (incr_use_count(cls, 0)) {
+ ckrm_near_limit(cls);
+ }
}
static inline void
ckrm_mem_dec_active(struct page *page)
{
ckrm_mem_res_t *cls = page_class(page);
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(!test_bit(PG_ckrm_account, &page->flags));
+ BUG_ON(cls == NULL);
cls->nr_active[page_zonenum(page)]--;
decr_use_count(cls, 0);
- clear_bit(PG_ckrm_account, &page->flags);
}
static inline void
ckrm_mem_inc_inactive(struct page *page)
{
- ckrm_mem_res_t *cls = page_class(page), *curcls;
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(test_bit(PG_ckrm_account, &page->flags));
- if (unlikely(cls != (curcls = GET_MEM_CLASS(current)))) {
- cls = curcls;
- ckrm_change_page_class(page, cls);
- }
+ ckrm_mem_res_t *cls = page_class(page);
+ BUG_ON(cls == NULL);
cls->nr_inactive[page_zonenum(page)]++;
- incr_use_count(cls, 0);
- set_bit(PG_ckrm_account, &page->flags);
+ if (incr_use_count(cls, 0) &&
+ ((cls->flags & MEM_NEAR_LIMIT) != MEM_NEAR_LIMIT)) {
+ ckrm_near_limit(cls);
+ }
}
static inline void
ckrm_mem_dec_inactive(struct page *page)
{
ckrm_mem_res_t *cls = page_class(page);
- if (unlikely(!cls)) {
- return;
- }
- BUG_ON(!test_bit(PG_ckrm_account, &page->flags));
+ BUG_ON(cls == NULL);
cls->nr_inactive[page_zonenum(page)]--;
decr_use_count(cls, 0);
- clear_bit(PG_ckrm_account, &page->flags);
}
static inline int
if ((mem_rcbs.resid == -1) || !cls) {
return 1;
}
- if (cls->pg_limit == CKRM_SHARE_DONTCARE) {
- ckrm_mem_res_t *parcls = ckrm_get_res_class(cls->parent,
- mem_rcbs.resid, ckrm_mem_res_t);
- return (!parcls ?: ckrm_class_limit_ok(parcls));
- } else {
- return (atomic_read(&cls->pg_total) <= (11 * cls->pg_limit) / 10);
- }
+ return (atomic_read(&cls->pg_total) <= (11 * cls->pg_limit) / 10);
}
#else // !CONFIG_CKRM_RES_MEM
int num_classes;
/* state about my ce interaction */
- atomic_t ce_regd; // if CE registered
+ int ce_regd; // if CE registered
int ce_cb_active; // if Callbacks active
atomic_t ce_nr_users; // number of active transient calls
struct ckrm_eng_callback ce_callbacks; // callback engine
* OTHER
******************************************************************************/
-#define ckrm_get_res_class(rescls, resid, type) \
- ((type*) (((resid != -1) && ((rescls) != NULL) \
- && ((rescls) != (void *)-1)) ? \
- ((struct ckrm_core_class *)(rescls))->res_class[resid] : NULL))
-
+#define ckrm_get_res_class(rescls,resid,type) ((type*)((rescls)->res_class[resid]))
extern int ckrm_register_res_ctlr(struct ckrm_classtype *, ckrm_res_ctlr_t *);
extern int ckrm_unregister_res_ctlr(ckrm_res_ctlr_t *);
#ifndef _CKRM_SCHED_H
#define _CKRM_SCHED_H
+#define CC_BUG_ON_DO(cond,action) do { if (cond) action; BUG_ON(cond); } while(0)
+#define CC_BUG_ON(cond) BUG_ON(cond)
+
#include <linux/sched.h>
#include <linux/ckrm_rc.h>
#include <linux/ckrm_classqueue.h>
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+//update every second
+#define CVT_UPDATE_TICK (1*HZ/1 ?: 1)
+#define CLASS_BONUS_RATE 22 // shift from ns to increase class bonus
+#define PRIORITY_BONUS_RATE 0 // ?? Hubertus
+#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
struct prio_array {
- unsigned int nr_active;
+ int nr_active;
unsigned long bitmap[BITMAP_SIZE];
struct list_head queue[MAX_PRIO];
};
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-#define rq_active(p,rq) (get_task_lrq(p)->active)
-#define rq_expired(p,rq) (get_task_lrq(p)->expired)
-int __init init_ckrm_sched_res(void);
-#else
-#define rq_active(p,rq) (rq->active)
-#define rq_expired(p,rq) (rq->expired)
-static inline void init_ckrm_sched_res(void) {}
-static inline int ckrm_cpu_monitor_init(void) {return 0;}
-#endif //CONFIG_CKRM_CPU_SCHEDULE
-
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-struct ckrm_runqueue {
+struct ckrm_local_runqueue {
cq_node_t classqueue_linkobj; /*links in classqueue */
struct ckrm_cpu_class *cpu_class; // class it belongs to
struct classqueue_struct *classqueue; // classqueue it belongs tow
+ CVT_t uncounted_cvt;
unsigned long long uncounted_ns;
prio_array_t *active, *expired, arrays[2];
* updated on enqueue, dequeue
*/
int top_priority;
- CVT_t local_cvt;
-
- unsigned long lrq_load;
- int local_weight;
-
-
- /*
- * unused CPU time accumulated while thoe class
- * is inactive goes to savings
- *
- * initialized to be 0
- * a class can't accumulate more than SAVING_THRESHOLD of savings
- */
- unsigned long long savings;
-
+ CVT_t local_cvt; // snapshot of local_cvt, update on every loadbalance
unsigned long magic; //for debugging
};
-typedef struct ckrm_runqueue ckrm_lrq_t;
+/**
+ * @last_sleep: the last time it sleeps, last_sleep = 0 when not sleeping
+ */
+struct ckrm_cpu_class_local_stat {
+ unsigned long long run;
+ unsigned long long total;
+ unsigned long long last_sleep;
+ unsigned long cpu_demand; /*estimated cpu demand */
+};
/**
* ckrm_cpu_class_stat - cpu usage statistics maintained for each class
unsigned long long total_ns; /*how much nano-secs it has consumed */
- struct ckrm_cpu_demand_stat local_stats[NR_CPUS];
-
- /*
- *
- */
- unsigned long max_demand; /* the maximun a class can consume */
- int egrt,megrt; /*effective guarantee*/
- int ehl,mehl; /*effective hard limit, my effective hard limit*/
+ struct ckrm_cpu_class_local_stat local_stats[NR_CPUS];
+ unsigned long cpu_demand;
+ /*temp stat used by cpu monitor */
+ int effective_guarantee;
+ int effective_limit;
+ int glut; //true or false
/*
- * eshare: for both default class and its children
- * meshare: just for the default class
+ * effective_share: for both default class and its children
+ * self_effective_share: just for the default class
*/
- int eshare;
- int meshare;
+ int effective_share;
+ int self_effective_share;
};
-#define CKRM_CPU_CLASS_MAGIC 0x7af2abe3
-
-#define USAGE_SAMPLE_FREQ HZ //sample every 1 seconds
-#define NS_PER_SAMPLE (USAGE_SAMPLE_FREQ*(NSEC_PER_SEC/HZ))
-#define USAGE_WINDOW_SIZE 60 //keep the last 60 sample
-
-struct ckrm_usage {
- unsigned long samples[USAGE_WINDOW_SIZE]; //record usages
- unsigned long sample_pointer; //pointer for the sliding window
- unsigned long long last_ns; //ns for last sample
- long long last_sample_jiffies; //in number of jiffies
-};
+typedef struct ckrm_cpu_class_stat ckrm_stat_t;
/*
* manages the class status
struct ckrm_core_class *parent;
struct ckrm_shares shares;
spinlock_t cnt_lock; // always grab parent's lock first and then child's
+ CVT_t global_cvt; // total cummulative virtual time
struct ckrm_cpu_class_stat stat;
struct list_head links; // for linking up in cpu classes
- ckrm_lrq_t local_queues[NR_CPUS]; // runqueues
- struct ckrm_usage usage;
- unsigned long magic; //for debugging
+ struct ckrm_local_runqueue local_queues[NR_CPUS]; // runqueues
};
-#define cpu_class_weight(cls) (cls->stat.meshare)
-#define local_class_weight(lrq) (lrq->local_weight)
-
-static inline int valid_cpu_class(struct ckrm_cpu_class * cls)
-{
- return (cls && cls->magic == CKRM_CPU_CLASS_MAGIC);
-}
-
-struct classqueue_struct *get_cpu_classqueue(int cpu);
-struct ckrm_cpu_class * get_default_cpu_class(void);
-
-
-static inline void ckrm_usage_init(struct ckrm_usage* usage)
-{
- int i;
-
- for (i=0; i < USAGE_WINDOW_SIZE; i++)
- usage->samples[i] = 0;
- usage->sample_pointer = 0;
- usage->last_ns = 0;
- usage->last_sample_jiffies = 0;
-}
-
-/*
- * this function can be called at any frequency
- * it's self-contained
- */
-static inline void ckrm_sample_usage(struct ckrm_cpu_class* clsptr)
-{
- struct ckrm_usage* usage = &clsptr->usage;
- unsigned long long cur_sample;
- int duration = jiffies - usage->last_sample_jiffies;
-
- //jiffies wasn't start from 0
- //so it need to be properly handled
- if (unlikely(!usage->last_sample_jiffies))
- usage->last_sample_jiffies = jiffies;
-
- //called too frequenctly
- if (duration < USAGE_SAMPLE_FREQ)
- return;
-
- usage->last_sample_jiffies = jiffies;
-
- cur_sample = clsptr->stat.total_ns - usage->last_ns;
- usage->last_ns = clsptr->stat.total_ns;
+#if CONFIG_CKRM_CPU_SCHEDULE
+#define rq_active(p,rq) (get_task_class_queue(p)->active)
+#define rq_expired(p,rq) (get_task_class_queue(p)->expired)
+#else
+#define rq_active(p,rq) (rq->active)
+#define rq_expired(p,rq) (rq->expired)
+#endif
- //scale it based on the sample duration
- cur_sample *= ((USAGE_SAMPLE_FREQ<< 15)/duration);
- cur_sample >>= 15;
- usage->samples[usage->sample_pointer] = cur_sample;
- // printk("sample = %llu jiffies=%lu \n",cur_sample, jiffies);
+//#define cpu_class_weight(cls) (cls->shares.my_guarantee)
+#define cpu_class_weight(cls) (cls->stat.self_effective_share)
- usage->sample_pointer ++;
- if (usage->sample_pointer >= USAGE_WINDOW_SIZE)
- usage->sample_pointer = 0;
-}
+#define bpt_queue(cpu) (& (cpu_rq(cpu)->classqueue) )
+CVT_t get_min_cvt(int cpu);
-//duration is specified in number of jiffies
-//return the usage in percentage
-static inline int get_ckrm_usage(struct ckrm_cpu_class* clsptr, int duration)
-{
- int nr_samples = duration/USAGE_SAMPLE_FREQ?:1;
- struct ckrm_usage* usage = &clsptr->usage;
- unsigned long long total = 0;
- int i, idx;
-
- if (nr_samples > USAGE_WINDOW_SIZE)
- nr_samples = USAGE_WINDOW_SIZE;
-
- idx = usage->sample_pointer;
- for (i = 0; i< nr_samples; i++) {
- if (! idx)
- idx = USAGE_WINDOW_SIZE;
- idx --;
- total += usage->samples[idx];
- }
- total *= 100;
- do_div(total,nr_samples);
- do_div(total,NS_PER_SAMPLE);
- do_div(total,cpus_weight(cpu_online_map));
- return total;
-}
+struct classqueue_struct *get_cpu_classqueue(int cpu);
+extern struct ckrm_cpu_class default_cpu_class_obj;
+#define default_cpu_class (&default_cpu_class_obj)
-#define lrq_nr_running(lrq) \
- (lrq->active->nr_active + lrq->expired->nr_active)
+#define local_queue_nr_running(local_queue) \
+ (local_queue->active->nr_active + local_queue->expired->nr_active)
-static inline ckrm_lrq_t *
-get_ckrm_lrq(struct ckrm_cpu_class*cls, int cpu)
+static inline struct ckrm_local_runqueue *
+get_ckrm_local_runqueue(struct ckrm_cpu_class*cls, int cpu)
{
return &(cls->local_queues[cpu]);
}
-static inline ckrm_lrq_t *get_task_lrq(struct task_struct *p)
+static inline struct ckrm_local_runqueue *get_task_class_queue(struct task_struct *p)
{
return &(p->cpu_class->local_queues[task_cpu(p)]);
}
#define task_list_entry(list) list_entry(list,struct task_struct,run_list)
-#define class_list_entry(list) list_entry(list,struct ckrm_runqueue,classqueue_linkobj)
+#define class_list_entry(list) list_entry(list,struct ckrm_local_runqueue,classqueue_linkobj)
/* some additional interfaces exported from sched.c */
struct runqueue;
+void dequeue_task(struct task_struct *p, prio_array_t * array);
+void enqueue_task(struct task_struct *p, prio_array_t * array);
+struct runqueue *task_rq_lock(task_t * p, unsigned long *flags);
+void task_rq_unlock(struct runqueue *rq, unsigned long *flags);
+extern spinlock_t cvt_lock;
extern rwlock_t class_list_lock;
extern struct list_head active_cpu_classes;
-unsigned int task_timeslice(task_t *p);
-void _ckrm_cpu_change_class(task_t *task, struct ckrm_cpu_class *newcls);
+/*functions exported by ckrm_cpu_class.c*/
+int __init init_ckrm_sched_res(void);
void init_cpu_classes(void);
-void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares);
-void ckrm_cpu_change_class(void *task, void *old, void *new);
-
+/*functions exported by ckrm_cpu_monitor.c*/
+void ckrm_cpu_monitor(void);
+void ckrm_cpu_stat_init(struct ckrm_cpu_class_stat *stat);
#define CPU_DEMAND_ENQUEUE 0
#define CPU_DEMAND_DEQUEUE 1
#define CPU_DEMAND_DESCHEDULE 2
-#define CPU_DEMAND_INIT 3
-
-/*functions exported by ckrm_cpu_monitor.c*/
-void ckrm_cpu_monitor(int check_min);
-int ckrm_cpu_monitor_init(void);
-void ckrm_cpu_stat_init(struct ckrm_cpu_class_stat *stat);
-void cpu_demand_event(struct ckrm_cpu_demand_stat* local_stat, int event, unsigned long long len);
-void adjust_local_weight(void);
-
-#define get_task_lrq_stat(p) (&(p)->cpu_class->stat.local_stats[task_cpu(p)])
-#define get_cls_local_stat(cls,cpu) (&(cls)->stat.local_stats[cpu])
-#define get_rq_local_stat(lrq,cpu) (get_cls_local_stat((lrq)->cpu_class,cpu))
-
-/********************************************************************
- * Parameters that determine how quickly CVT's progress and how
- * priority can impact a LRQ's runqueue position. See also
- * get_effective_prio(). These parameters need to adjusted
- * in accordance to the following example and understanding.
- *
- * CLASS_QUANTIZER:
- *
- * A class with 50% share, can execute 500 ms / per sec ~ 2^29 ns.
- * It's share will be set to 512 = 2^9. The globl CLASSQUEUE_SIZE is set to 2^7.
- * With CLASS_QUANTIZER=16, the local_cvt of this class will increase
- * by 2^29/2^9 = 2^20 = 1024K.
- * Setting CLASS_QUANTIZER to 16, 2^(20-16) = 16 slots / per second.
- * Do the same math, a class with any share value, will cover 16 slots / per second.
- * So 2^8 total slots is good track for 8 seconds of system execution
- *
- * PRIORITY_QUANTIZER:
- *
- * How much can top priorities of class impact slot bonus.
- * There are 40 nice priorities, range from -20 to 19, with default nice = 0
- * "2" will allow upto 5 slots improvement
- * when certain task within the class has a nice value of -20
- * in the RQ thus for 50% class it can perform ~300 msec starvation.
- *
- *******************************************************************/
-
-#define CLASS_QUANTIZER 16 //shift from ns to increase class bonus
-#define PRIORITY_QUANTIZER 2 //controls how much a high prio task can borrow
-
-#define CKRM_SHARE_ACCURACY 13
-#define NSEC_PER_MS 1000000
-#define NSEC_PER_JIFFIES (NSEC_PER_SEC/HZ)
-
-
-#define MAX_SAVINGS_ABSOLUTE (10LLU*NSEC_PER_SEC) // 10 seconds
-
-#define CVT_UPDATE_TICK ((HZ/2)?:1)
-
-// ABSOLUTE_CKRM_TUNING determines whether classes can make up
-// lost time in absolute time or in relative values
-
-#define ABSOLUTE_CKRM_TUNING // preferred due to more predictable behavior
-
-#ifdef ABSOLUTE_CKRM_TUNING
-
-#define MAX_SAVINGS MAX_SAVINGS_ABSOLUTE
-//an absolute bonus of 200ms for classes when reactivated
-#define INTERACTIVE_BONUS(lrq) ((200*NSEC_PER_MS)/local_class_weight(lrq))
-#define SAVINGS_LEAK_SPEED (CVT_UPDATE_TICK/10*NSEC_PER_JIFFIES)
-
-#define scale_cvt(val,lrq) ((val)*local_class_weight(lrq))
-#define unscale_cvt(val,lrq) (do_div(val,local_class_weight(lrq)))
-
-#else
-
-#define MAX_SAVINGS (MAX_SAVINGS_ABSOLUTE >> CKRM_SHARE_ACCURACY)
-/*
- * to improve system responsiveness
- * an inactive class is put a little bit ahead of the current class when it wakes up
- * the amount is set in normalized term to simplify the calculation
- * for class with 100% share, it can be 2s ahead
- * while for class with 10% share, it can be 200ms ahead
- */
-#define INTERACTIVE_BONUS(lrq) (2*NSEC_PER_MS)
-
-/*
- * normalized savings can't be more than MAX_NORMALIZED_SAVINGS
- * based on the current configuration
- * this means that a class with share 100% will accumulate 10s at most
- * while a class with 1% of the share can only accumulate 100ms
- */
-
-//a class with share 100% can get 100ms every 500ms
-//while a class with share 10% can only get 10ms every 500ms
-#define SAVINGS_LEAK_SPEED ((CVT_UPDATE_TICK/5*NSEC_PER_JIFFIES) >> CKRM_SHARE_ACCURACY)
-
-#define scale_cvt(val,lrq) (val)
-#define unscale_cvt(val,lrq) (val)
-
-#endif
+void cpu_demand_event(struct ckrm_cpu_class_local_stat* local_stat, int event, unsigned long long len);
+#define get_task_local_stat(p) (&(p)->cpu_class->stat.local_stats[task_cpu(p)])
+#define get_rq_local_stat(lrq,cpu) (&(lrq)->cpu_class->stat.local_stats[cpu])
/**
* get_effective_prio: return the effective priority of a class local queue
* currently, prio increases by 1 if either: top_priority increase by one
* or, local_cvt increases by 4ms
*/
-static inline int get_effective_prio(ckrm_lrq_t * lrq)
+static inline int get_effective_prio(struct ckrm_local_runqueue * lcq)
{
int prio;
- prio = lrq->local_cvt >> CLASS_QUANTIZER; // cumulative usage
-#ifndef URGENCY_SUPPORT
-#warning "ACB removing urgency calculation from get_effective_prio"
-#else
- prio += lrq->top_priority >> PRIORITY_QUANTIZER; // queue urgency
-#endif
+ // cumulative usage
+ prio = lcq->local_cvt >> CLASS_BONUS_RATE;
+ // queue urgency
+ prio += lcq->top_priority >> PRIORITY_BONUS_RATE;
return prio;
}
-CVT_t get_local_cur_cvt(int cpu);
-
/**
* update_class_priority:
*
* -- rq_get_next_task (queue switch)
* -- update_local_cvt
* -- schedule
+ * -- update_global_cvt
*/
-static inline void update_class_priority(ckrm_lrq_t *local_rq)
+static inline void update_class_priority(struct ckrm_local_runqueue *local_rq)
{
int effective_prio = get_effective_prio(local_rq);
classqueue_update_prio(local_rq->classqueue,
* set the new top priority and reposition the queue
* called when: task enqueue/dequeue and queue switch
*/
-static inline void set_top_priority(ckrm_lrq_t *lrq,
+static inline void set_top_priority(struct ckrm_local_runqueue *class_queue,
int new_priority)
{
- lrq->top_priority = new_priority;
- update_class_priority(lrq);
-}
-
-/*
- * task_load: how much load this task counts
- */
-static inline unsigned long task_load(struct task_struct* p)
-{
- return (task_timeslice(p) * p->demand_stat.cpu_demand);
-}
-
-/*
- * runqueue load is the local_weight of all the classes on this cpu
- * must be called with class_list_lock held
- */
-static inline unsigned long ckrm_cpu_load(int cpu)
-{
- struct ckrm_cpu_class *clsptr;
- ckrm_lrq_t* lrq;
- struct ckrm_cpu_demand_stat* l_stat;
- int total_load = 0;
- int load;
-
- list_for_each_entry(clsptr,&active_cpu_classes,links) {
- lrq = get_ckrm_lrq(clsptr,cpu);
- l_stat = get_cls_local_stat(clsptr,cpu);
- load = lrq->local_weight;
- if (l_stat->cpu_demand < load)
- load = l_stat->cpu_demand;
- total_load += load;
- }
- return total_load;
+ class_queue->top_priority = new_priority;
+ update_class_priority(class_queue);
}
static inline void class_enqueue_task(struct task_struct *p,
prio_array_t * array)
{
- ckrm_lrq_t *lrq;
+ struct ckrm_local_runqueue *queue;
int effective_prio;
- lrq = get_task_lrq(p);
-
- cpu_demand_event(&p->demand_stat,CPU_DEMAND_ENQUEUE,0);
- lrq->lrq_load += task_load(p);
+ queue = get_task_class_queue(p);
- if ((p->prio < lrq->top_priority) && (array == lrq->active))
- set_top_priority(lrq, p->prio);
-
- if (! cls_in_classqueue(&lrq->classqueue_linkobj)) {
- cpu_demand_event(get_task_lrq_stat(p),CPU_DEMAND_ENQUEUE,0);
- effective_prio = get_effective_prio(lrq);
- classqueue_enqueue(lrq->classqueue, &lrq->classqueue_linkobj, effective_prio);
+ if (! cls_in_classqueue(&queue->classqueue_linkobj)) {
+ cpu_demand_event(get_task_local_stat(p),CPU_DEMAND_ENQUEUE,0);
+ /*make sure the cvt of this class is up to date*/
+ queue->local_cvt = get_min_cvt(task_cpu(p));
+ effective_prio = get_effective_prio(queue);
+ classqueue_enqueue(queue->classqueue, &queue->classqueue_linkobj, effective_prio);
}
+
+ if ((p->prio < queue->top_priority) && (array == queue->active))
+ set_top_priority(queue, p->prio);
}
static inline void class_dequeue_task(struct task_struct *p,
prio_array_t * array)
{
- ckrm_lrq_t *lrq = get_task_lrq(p);
- unsigned long load = task_load(p);
+ struct ckrm_local_runqueue *queue = get_task_class_queue(p);
- BUG_ON(lrq->lrq_load < load);
- lrq->lrq_load -= load;
-
- cpu_demand_event(&p->demand_stat,CPU_DEMAND_DEQUEUE,0);
-
- if ((array == lrq->active) && (p->prio == lrq->top_priority)
+ if ((array == queue->active) && (p->prio == queue->top_priority)
&& list_empty(&(array->queue[p->prio])))
- set_top_priority(lrq,
+ set_top_priority(queue,
find_next_bit(array->bitmap, MAX_PRIO,
p->prio));
}
*/
static inline void update_local_cvt(struct task_struct *p, unsigned long nsec)
{
- ckrm_lrq_t * lrq = get_task_lrq(p);
-
- unsigned long cvt_inc = nsec / local_class_weight(lrq);
-
- lrq->local_cvt += cvt_inc;
- lrq->uncounted_ns += nsec;
+ struct ckrm_local_runqueue *class_queue = get_task_class_queue(p);
+ struct ckrm_cpu_class *cls = class_queue->cpu_class;
- update_class_priority(lrq);
-}
+ unsigned long cvt_inc = nsec / cpu_class_weight(cls);
-static inline int class_preempts_curr(struct task_struct * p, struct task_struct* curr)
-{
- struct cq_node_struct* node1 = &(get_task_lrq(p)->classqueue_linkobj);
- struct cq_node_struct* node2 = &(get_task_lrq(curr)->classqueue_linkobj);
+ class_queue->local_cvt += cvt_inc;
+ class_queue->uncounted_cvt += cvt_inc;
- return (class_compare_prio(node1,node2) < 0);
+ class_queue->uncounted_ns += nsec;
+ update_class_priority(class_queue);
}
/*
- * return a random value with range [0, (val-1)]
+ * called during loadbalancing
+ * to charge the class with locally accumulated cvt
*/
-static inline int get_ckrm_rand(unsigned long val)
-{
- int rand;
- static int last_rand[NR_CPUS];
- int cpu = smp_processor_id();
-
- rand = last_rand[cpu];
- rand ++;
- if (rand >= val)
- rand = 0;
-
- last_rand[cpu] = rand;
- return rand;
-}
-
-void update_class_cputime(int this_cpu);
+void update_global_cvts(int this_cpu);
-/**********************************************/
-/* PID_LOAD_BALANCING */
-/**********************************************/
-struct ckrm_load_struct {
- unsigned long load_p; /*propotional*/
- unsigned long load_i; /*integral */
- long load_d; /*derivative */
-};
-
-typedef struct ckrm_load_struct ckrm_load_t;
-
-static inline void ckrm_load_init(ckrm_load_t* ckrm_load) {
- ckrm_load->load_p = 0;
- ckrm_load->load_i = 0;
- ckrm_load->load_d = 0;
-}
-
-void ckrm_load_sample(ckrm_load_t* ckrm_load,int cpu);
-long pid_get_pressure(ckrm_load_t* ckrm_load, int local_group);
-#define rq_ckrm_load(rq) (&((rq)->ckrm_load))
-
-static inline void ckrm_sched_tick(unsigned long j,int this_cpu,struct ckrm_load_struct* ckrm_load)
+/**
+ *
+ */
+static inline int class_preempts_curr(struct task_struct * p, struct task_struct* curr)
{
- read_lock(&class_list_lock);
-
-#ifdef CONFIG_SMP
- ckrm_load_sample(ckrm_load,this_cpu);
-#endif
-
- if (! (j % CVT_UPDATE_TICK)) {
- // printk("ckrm_sched j=%lu\n",j);
- classqueue_update_base(get_cpu_classqueue(this_cpu));
- update_class_cputime(this_cpu);
- }
+ struct cq_node_struct* node1 = &(get_task_class_queue(p)->classqueue_linkobj);
+ struct cq_node_struct* node2 = &(get_task_class_queue(curr)->classqueue_linkobj);
- read_unlock(&class_list_lock);
+ return (class_compare_prio(node1,node2) < 0);
}
-
-#endif //CONFIG_CKRM_CPU_SCHEDULE
-
#endif
* data structure for /proc/sys/... files
*/
int do_reset_coda_vfs_stats( ctl_table * table, int write, struct file * filp,
- void __user * buffer, size_t * lenp, loff_t * ppos );
+ void __user * buffer, size_t * lenp );
int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
struct file * filp, void __user * buffer,
- size_t * lenp, loff_t * ppos );
+ size_t * lenp );
/* these functions are called to form the content of /proc/fs/coda/... files */
int coda_vfs_stats_get_info( char * buffer, char ** start, off_t offset,
COMPATIBLE_IOCTL(DVD_AUTH)
/* Big L */
ULONG_IOCTL(LOOP_SET_FD)
-ULONG_IOCTL(LOOP_CHANGE_FD)
COMPATIBLE_IOCTL(LOOP_CLR_FD)
COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
struct vc_data;
struct console_font_op;
-struct console_font;
struct module;
/*
void (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
int (*con_switch)(struct vc_data *);
int (*con_blank)(struct vc_data *, int, int);
- int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
- int (*con_font_get)(struct vc_data *, struct console_font *);
- int (*con_font_default)(struct vc_data *, struct console_font *, char *);
- int (*con_font_copy)(struct vc_data *, int);
+ int (*con_font_op)(struct vc_data *, struct console_font_op *);
int (*con_resize)(struct vc_data *, unsigned int, unsigned int);
int (*con_set_palette)(struct vc_data *, unsigned char *);
int (*con_scrolldelta)(struct vc_data *, int);
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
- struct console_font vc_font; /* Current VC font set */
+ struct console_font_op vc_font; /* Current VC font set */
unsigned short vc_video_erase_char; /* Background erase character */
/* VT terminal data */
unsigned int vc_state; /* Escape sequence parser state */
+++ /dev/null
-/*
- * crbce.h
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- *
- * This files contains the type definition of the record
- * created by the CRBCE CKRM classification engine
- *
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- *
- */
-
-
-/*
- * Changes
- *
- * 2003-11-11 Created by H.Franke
- * 2003-12-01 Sanitized for Delivery by H.Franke
- *
- */
-
-#ifndef CRBCE_RECORDS_H
-#define CRBCE_RECORDS_H
-
-#ifdef __KERNEL__
-#include <linux/autoconf.h>
-#else
-#define CONFIG_CKRM
-#define CONFIG_CRBCE
-#define CONFIG_DELAY_ACCT
-#endif
-
-#include <linux/types.h>
-#include <linux/ckrm.h>
-#include <linux/ckrm_ce.h>
-
-#define CRBCE_UKCC_NAME "crbce_ukcc"
-#define CRBCE_UKCC_PATH "/mnt/relayfs"
-
-#define CRBCE_UKCC_PATH_NAME CRBCE_UKCC_PATH"/"CRBCE_UKCC_NAME
-
-#define CRBCE_MAX_CLASS_NAME_LEN 256
-
-/****************************************************************
- *
- * CRBCE EVENT SET is and extension to the standard CKRM_EVENTS
- *
- ****************************************************************/
-enum {
-
- /* we use the standard CKRM_EVENT_<..>
- * to identify reclassification cause actions
- * and extend by additional ones we need
- */
-
- /* up event flow */
-
- CRBCE_REC_EXIT = CKRM_NUM_EVENTS,
- CRBCE_REC_DATA_DELIMITER,
- CRBCE_REC_SAMPLE,
- CRBCE_REC_TASKINFO,
- CRBCE_REC_SYS_INFO,
- CRBCE_REC_CLASS_INFO,
- CRBCE_REC_KERNEL_CMD_DONE,
- CRBCE_REC_UKCC_FULL,
-
- /* down command issueance */
- CRBCE_REC_KERNEL_CMD,
-
- CRBCE_NUM_EVENTS
-};
-
-struct task_sample_info {
- uint32_t cpu_running;
- uint32_t cpu_waiting;
- uint32_t io_delayed;
- uint32_t memio_delayed;
-};
-
-/*********************************************
- * KERNEL -> USER records *
- *********************************************/
-
-/* we have records with either a time stamp or not */
-struct crbce_hdr {
- int type;
- pid_t pid;
-};
-
-struct crbce_hdr_ts {
- int type;
- pid_t pid;
- uint32_t jiffies;
- uint64_t cls;
-};
-
-/* individual records */
-
-struct crbce_rec_fork {
- struct crbce_hdr_ts hdr;
- pid_t ppid;
-};
-
-struct crbce_rec_data_delim {
- struct crbce_hdr_ts hdr;
- int is_stop; /* 0 start, 1 stop */
-};
-
-struct crbce_rec_task_data {
- struct crbce_hdr_ts hdr;
- struct task_sample_info sample;
- struct task_delay_info delay;
-};
-
-struct crbce_ukcc_full {
- struct crbce_hdr_ts hdr;
-};
-
-struct crbce_class_info {
- struct crbce_hdr_ts hdr;
- int action;
- int namelen;
- char name[CRBCE_MAX_CLASS_NAME_LEN];
-};
-
-/*********************************************
- * USER -> KERNEL records *
- *********************************************/
-
-enum crbce_kernel_cmd {
- CRBCE_CMD_START,
- CRBCE_CMD_STOP,
- CRBCE_CMD_SET_TIMER,
- CRBCE_CMD_SEND_DATA,
-};
-
-struct crbce_command {
- int type; /* we need this for the K->U reflection */
- int cmd;
- uint32_t len; /* added in the kernel for reflection */
-};
-
-#define set_cmd_hdr(rec,tok) \
-((rec).hdr.type=CRBCE_REC_KERNEL_CMD,(rec).hdr.cmd=(tok))
-
-struct crbce_cmd_done {
- struct crbce_command hdr;
- int rc;
-};
-
-struct crbce_cmd {
- struct crbce_command hdr;
-};
-
-struct crbce_cmd_send_data {
- struct crbce_command hdr;
- int delta_mode;
-};
-
-struct crbce_cmd_settimer {
- struct crbce_command hdr;
- uint32_t interval; /* in msec .. 0 means stop */
-};
-
-#endif
struct super_block *d_sb; /* The root of the dentry tree */
int d_mounted;
void *d_fsdata; /* fs-specific data */
- void * d_extra_attributes; /* TUX-specific data */
struct rcu_head d_rcu;
struct dcookie_struct *d_cookie; /* cookie, if any */
struct hlist_node d_hash; /* lookup hash list */
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_anon(struct hlist_head *);
extern int d_invalidate(struct dentry *);
-extern void flush_dentry_attributes(void);
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
/* validate "insecure" dentry pointer */
extern int d_validate(struct dentry *, struct dentry *);
-char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
- struct dentry *root, struct vfsmount *rootmnt,
- char *buffer, int buflen);
-
extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
-
+
/* Allocation counts.. */
/**
#endif
-#define DEVPTS_SUPER_MAGIC 0x1cd1
-
#endif /* _LINUX_DEVPTS_FS_H */
#ifndef _DVBOSD_H_
#define _DVBOSD_H_
-#include <linux/compiler.h>
-
typedef enum {
// All functions return -2 on "not open"
OSD_Close=1, // ()
#ifndef _DVBVIDEO_H_
#define _DVBVIDEO_H_
-#include <linux/compiler.h>
-
#ifdef __KERNEL__
#include <linux/types.h>
#else
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_may_queue_fn) (request_queue_t *, int);
-typedef void (elevator_set_congested_fn) (request_queue_t *);
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
elevator_put_req_fn *elevator_put_req_fn;
elevator_may_queue_fn *elevator_may_queue_fn;
- elevator_set_congested_fn *elevator_set_congested_fn;
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
extern int elv_register_queue(request_queue_t *q);
extern void elv_unregister_queue(request_queue_t *q);
extern int elv_may_queue(request_queue_t *, int);
-extern void elv_set_congested(request_queue_t *);
extern void elv_completed_request(request_queue_t *, struct request *);
extern int elv_set_request(request_queue_t *, struct request *, int);
extern void elv_put_request(request_queue_t *, struct request *);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
-#define RQ_ELV_DATA(rq) (rq)->elevator_private
-
#endif
#include <linux/types.h>
#include <asm/elf.h>
-#ifndef elf_read_implies_exec
- /* Executables for which elf_read_implies_exec() returns TRUE will
- have the READ_IMPLIES_EXEC personality flag set automatically.
- Override in asm/elf.h as needed. */
-# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0
-#endif
-
/* 32-bit ELF base types. */
typedef __u32 Elf32_Addr;
typedef __u16 Elf32_Half;
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
-/* Defined for TUX async IO */
-#define EWOULDBLOCKIO 530 /* Would block due to block-IO */
-
#endif
#endif
#define EXT2_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define EXT2_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
-#ifdef CONFIG_VSERVER_LEGACY
-#define EXT2_FL_USER_VISIBLE 0x0C03DFFF /* User visible flags */
-#define EXT2_FL_USER_MODIFIABLE 0x0C0380FF /* User modifiable flags */
-#else
#define EXT2_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define EXT2_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-#endif
/*
* ioctl commands
#define EXT3_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
-#ifdef CONFIG_VSERVER_LEGACY
-#define EXT3_FL_USER_VISIBLE 0x0C03DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE 0x0C0380FF /* User modifiable flags */
-#else
#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-#endif
/*
* Inode dynamic state flags
#define FBIOGETCMAP 0x4604
#define FBIOPUTCMAP 0x4605
#define FBIOPAN_DISPLAY 0x4606
-#ifdef __KERNEL__
-#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
-#else
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor)
-#endif
/* 0x4607-0x460B are defined below */
/* #define FBIOGET_MONITORSPEC 0x460C */
/* #define FBIOPUT_MONITORSPEC 0x460D */
struct device;
struct file;
-struct fb_cmap_user {
- __u32 start; /* First entry */
- __u32 len; /* Number of entries */
- __u16 __user *red; /* Red values */
- __u16 __user *green;
- __u16 __user *blue;
- __u16 __user *transp; /* transparency, can be NULL */
-};
-
-struct fb_image_user {
- __u32 dx; /* Where to place image */
- __u32 dy;
- __u32 width; /* Size of image */
- __u32 height;
- __u32 fg_color; /* Only used when a mono bitmap */
- __u32 bg_color;
- __u8 depth; /* Depth of the image */
- const char __user *data; /* Pointer to image data */
- struct fb_cmap_user cmap; /* color map info */
-};
-
-struct fb_cursor_user {
- __u16 set; /* what to set */
- __u16 enable; /* cursor on/off */
- __u16 rop; /* bitop operation */
- const char __user *mask; /* cursor mask bits */
- struct fbcurpos hot; /* cursor hot spot */
- struct fb_image_user image; /* Cursor image */
-};
-
/*
* Register/unregister for framebuffer events
*/
/* drivers/video/fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
-extern int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to);
-extern int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to);
-extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info);
-extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info);
+extern int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to, int fsfromto);
+extern int fb_set_cmap(struct fb_cmap *cmap, int kspc, struct fb_info *fb_info);
extern struct fb_cmap *fb_default_cmap(int len);
extern void fb_invert_cmaps(void);
struct files_struct *get_files_struct(struct task_struct *);
void FASTCALL(put_files_struct(struct files_struct *fs));
-extern int dupfd(struct file *file, unsigned int start);
-
#endif /* __LINUX_FILE_H */
#include <linux/cache.h>
#include <linux/prio_tree.h>
#include <linux/kobject.h>
-#include <linux/mount.h>
#include <asm/atomic.h>
struct iovec;
/* Fixed constants first: */
#undef NR_OPEN
#define NR_OPEN (1024*1024) /* Absolute upper limit on fd num */
-#define INR_OPEN 4096 /* Initial setting for nfile rlimits */
+#define INR_OPEN 1024 /* Initial setting for nfile rlimits */
#define BLOCK_SIZE_BITS 10
#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
#define FMODE_READ 1
#define FMODE_WRITE 2
-/* Internal kernel extensions */
-#define FMODE_LSEEK 4
-#define FMODE_PREAD 8
-#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
-
#define RW_MASK 1
#define RWA_MASK 2
#define READ 0
*/
#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
-#define IS_RDONLY(inode) __IS_FLG(inode, MS_RDONLY)
+#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
#define ATTR_ATTR_FLAG 1024
#define ATTR_KILL_SUID 2048
#define ATTR_KILL_SGID 4096
-#define ATTR_XID 8192
+#define ATTR_XID 8192
/*
* This is the Inode Attributes structure, used for notify_change(). It
struct block_device * bd_contains;
unsigned bd_block_size;
struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
unsigned bd_part_count;
int bd_invalidated;
struct gendisk * bd_disk;
#include <linux/fcntl.h>
+extern long generic_file_fcntl(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp);
+
extern int fcntl_getlk(struct file *, struct flock __user *);
extern int fcntl_setlk(struct file *, unsigned int, struct flock __user *);
ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- int (*check_flags)(int);
- int (*dir_notify)(struct file *filp, unsigned long arg);
+ long (*fcntl)(int fd, unsigned int cmd,
+ unsigned long arg, struct file *filp);
};
struct inode_operations {
static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
-
- if (MNT_IS_NOATIME(mnt))
- return;
- if (S_ISDIR(inode->i_mode) && MNT_IS_NODIRATIME(mnt))
- return;
- if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
- return;
-
- update_atime(inode);
+ /* per-mountpoint checks will go here */
+ update_atime(dentry->d_inode);
}
static inline void file_accessed(struct file *file)
extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
extern void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *, struct file *,
- loff_t *, read_descriptor_t *, read_actor_t, int);
+ loff_t *, read_descriptor_t *, read_actor_t);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb,
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
extern int generic_file_open(struct inode * inode, struct file * filp);
-extern int nonseekable_open(struct inode * inode, struct file * filp);
static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
read_descriptor_t * desc,
- read_actor_t actor, int nonblock)
+ read_actor_t actor)
{
do_generic_mapping_read(filp->f_mapping,
&filp->f_ra,
filp,
ppos,
desc,
- actor,
- nonblock);
+ actor);
}
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
-extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
-
extern int inode_change_ok(struct inode *, struct iattr *);
extern int __must_check inode_setattr(struct inode *, struct iattr *);
-extern void inode_update_time(struct inode *inode, struct vfsmount *mnt, int ctime_too);
+extern void inode_update_time(struct inode *inode, int ctime_too);
static inline ino_t parent_ino(struct dentry *dentry)
{
{ }
#endif /* CONFIG_SECURITY */
-/* io priorities */
-
-#define IOPRIO_NR 21
-
-#define IOPRIO_IDLE 0
-#define IOPRIO_NORM 10
-#define IOPRIO_RT 20
-
-asmlinkage int sys_ioprio_set(int ioprio);
-asmlinkage int sys_ioprio_get(void);
-
-
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
void gs_set_termios (struct tty_struct * tty,
struct termios * old_termios);
int gs_init_port(struct gs_port *port);
-int gs_setserial(struct gs_port *port, struct serial_struct __user *sp);
-int gs_getserial(struct gs_port *port, struct serial_struct __user *sp);
+int gs_setserial(struct gs_port *port, struct serial_struct *sp);
+int gs_getserial(struct gs_port *port, struct serial_struct *sp);
void gs_got_break(struct gs_port *port);
extern int gs_debug;
struct gendisk {
int major; /* major number of driver */
int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
+ int minors;
char disk_name[32]; /* name of major driver */
struct hd_struct **part; /* [indexed by minor] */
struct block_device_operations *fops;
return vma->vm_flags & VM_HUGETLB;
}
-int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
+int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
#define ICMPV6_MGM_REPORT 131
#define ICMPV6_MGM_REDUCTION 132
-#define ICMPV6_NI_QUERY 139
-#define ICMPV6_NI_REPLY 140
+/* definitions for MLDv2 */
-#define ICMPV6_MLD2_REPORT 143
+#define MLD2_MODE_IS_INCLUDE 1
+#define MLD2_MODE_IS_EXCLUDE 2
+#define MLD2_CHANGE_TO_INCLUDE 3
+#define MLD2_CHANGE_TO_EXCLUDE 4
+#define MLD2_ALLOW_NEW_SOURCES 5
+#define MLD2_BLOCK_OLD_SOURCES 6
-#define ICMPV6_DHAAD_REQUEST 144
-#define ICMPV6_DHAAD_REPLY 145
-#define ICMPV6_MOBILE_PREFIX_SOL 146
-#define ICMPV6_MOBILE_PREFIX_ADV 147
+#define ICMPV6_MLD2_REPORT 143
+#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } }
/*
* Codes for Destination Unreachable
__u32 data[8];
};
-/*
- * Definitions for MLDv2
- */
-#define MLD2_MODE_IS_INCLUDE 1
-#define MLD2_MODE_IS_EXCLUDE 2
-#define MLD2_CHANGE_TO_INCLUDE 3
-#define MLD2_CHANGE_TO_EXCLUDE 4
-#define MLD2_ALLOW_NEW_SOURCES 5
-#define MLD2_BLOCK_OLD_SOURCES 6
-
-#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } }
-
#ifdef __KERNEL__
#include <linux/netdevice.h>
#define DRIVER(drive) ((drive)->driver)
-extern int generic_ide_ioctl(struct file *, struct block_device *, unsigned, unsigned long);
+extern int generic_ide_ioctl(struct block_device *, unsigned, unsigned long);
/*
* ide_hwifs[] is the master data structure used to keep track
struct packet_type;
struct vlan_collection;
struct vlan_dev_info;
-struct hlist_node;
#include <linux/proc_fs.h> /* for proc_dir_entry */
#include <linux/netdevice.h>
struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
- struct hlist_node hlist; /* linked list */
struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN];
- struct rcu_head rcu;
+
+ struct vlan_group *next; /* the next in the list */
};
struct vlan_priority_tci_mapping {
.switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
.xid = 0, \
- .vx_info = NULL, \
.nid = 0, \
+ .vx_info = NULL, \
.nx_info = NULL, \
- .ioprio = IOPRIO_NORM, \
}
* out).
*/
struct ipmi_msg
-{
- unsigned char netfn;
- unsigned char cmd;
- unsigned short data_len;
- unsigned char __user *data;
-};
-
-struct kernel_ipmi_msg
{
unsigned char netfn;
unsigned char cmd;
ipmi_user_t user;
struct ipmi_addr addr;
long msgid;
- struct kernel_ipmi_msg msg;
+ struct ipmi_msg msg;
/* The user_msg_data is the data supplied when a message was
sent, if this is a response to a sent message. If this is
int ipmi_request(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority);
int ipmi_request_settime(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
int max_retries,
int ipmi_request_with_source(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
int priority,
unsigned char source_address,
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct kernel_ipmi_msg *msg,
+ struct ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
struct console_font_op {
unsigned int op; /* operation code KD_FONT_OP_* */
unsigned int flags; /* KD_FONT_FLAG_* */
- unsigned int width, height; /* font size */
- unsigned int charcount;
- unsigned char __user *data; /* font data with height fixed to 32 */
-};
-
-struct console_font {
unsigned int width, height; /* font size */
unsigned int charcount;
unsigned char *data; /* font data with height fixed to 32 */
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+/* Cannot easily do prefetch unfortunately */
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ for (pos = (head)->first; n = pos ? pos->next : 0, pos; \
pos = n)
/**
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
-
-/**
- * hlist_for_each_entry_rcu - iterate over rcu list of given type
- * @pos: the type * to use as a loop counter.
- * @pos: the &struct hlist_node to use as a loop counter.
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as hlist_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
-
#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */
extern unsigned long vmalloc_earlyreserve;
extern int page_cluster;
-extern int sysctl_legacy_va_layout;
-
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CKRM_RES_MEM
- void *memclass;
-#endif // CONFIG_CKRM_RES_MEM
};
/*
struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+int shmem_lock(struct file * file, int lock, struct user_struct *);
int shmem_zero_setup(struct vm_area_struct *);
static inline int can_do_mlock(void)
return 1;
return 0;
}
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
+
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
-extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
-
-
-static inline unsigned long get_unmapped_area(struct file * file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
-}
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
+extern unsigned int nr_used_zone_pages(void);
+
extern struct page * vmalloc_to_page(void *addr);
extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write);
-#include <linux/ckrm_mem_inline.h>
static inline void
add_page_to_active_list(struct zone *zone, struct page *page)
{
list_add(&page->lru, &zone->active_list);
zone->nr_active++;
- ckrm_mem_inc_active(page);
}
static inline void
{
list_add(&page->lru, &zone->inactive_list);
zone->nr_inactive++;
- ckrm_mem_inc_inactive(page);
}
static inline void
{
list_del(&page->lru);
zone->nr_active--;
- ckrm_mem_dec_active(page);
}
static inline void
{
list_del(&page->lru);
zone->nr_inactive--;
- ckrm_mem_dec_inactive(page);
}
static inline void
if (PageActive(page)) {
ClearPageActive(page);
zone->nr_active--;
- ckrm_mem_dec_active(page);
} else {
zone->nr_inactive--;
- ckrm_mem_dec_inactive(page);
}
}
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
-static inline int is_highmem_idx(int idx)
-{
- return (idx == ZONE_HIGHMEM);
-}
-
-static inline int is_normal_idx(int idx)
-{
- return (idx == ZONE_NORMAL);
-}
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
*/
static inline int is_highmem(struct zone *zone)
{
- return (is_highmem_idx(zone - zone->zone_pgdat->node_zones));
+ return (zone - zone->zone_pgdat->node_zones == ZONE_HIGHMEM);
}
static inline int is_normal(struct zone *zone)
{
- return (is_normal_idx(zone - zone->zone_pgdat->node_zones));
+ return (zone - zone->zone_pgdat->node_zones == ZONE_NORMAL);
}
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
int lower_zone_protection_sysctl_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
#define MNT_NOSUID 1
#define MNT_NODEV 2
#define MNT_NOEXEC 4
-#define MNT_RDONLY 8
-#define MNT_NOATIME 16
-#define MNT_NODIRATIME 32
struct vfsmount
{
struct namespace *mnt_namespace; /* containing namespace */
};
-#define MNT_IS_RDONLY(m) ((m) && ((m)->mnt_flags & MNT_RDONLY))
-#define MNT_IS_NOATIME(m) ((m) && ((m)->mnt_flags & MNT_NOATIME))
-#define MNT_IS_NODIRATIME(m) ((m) && ((m)->mnt_flags & MNT_NODIRATIME))
-
static inline struct vfsmount *mntget(struct vfsmount *mnt)
{
if (mnt)
/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
- * $Id: cfi.h,v 1.45 2004/07/20 02:44:27 dwmw2 Exp $
+ * $Id: cfi.h,v 1.44 2004/07/13 22:32:52 dwmw2 Exp $
*/
#ifndef __MTD_CFI_H__
static inline void cfi_udelay(int us)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
unsigned long t = us * HZ / 1000000;
if (t) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(t);
return;
}
+#endif
udelay(us);
cond_resched();
}
/*
- * $Id: mtd.h,v 1.56 2004/08/09 18:46:04 dmarlin Exp $
+ * $Id: mtd.h,v 1.54 2004/07/15 01:13:12 dwmw2 Exp $
*
* Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al.
*
#include <linux/module.h>
#include <linux/uio.h>
-#include <linux/mtd/compatmac.h>
#include <mtd/mtd-abi.h>
#define MTD_CHAR_MAJOR 90
#define MTD_WRITEOOB(mtd, args...) (*(mtd->write_oob))(mtd, args)
#define MTD_SYNC(mtd) do { if (mtd->sync) (*(mtd->sync))(mtd); } while (0)
-
-#ifdef CONFIG_MTD_PARTITIONS
-void mtd_erase_callback(struct erase_info *instr);
-#else
-static inline void mtd_erase_callback(struct erase_info *instr)
-{
- if (instr->callback)
- instr->callback(instr);
-}
-#endif
-
/*
* Debugging macro and defines
*/
* For boards with physically mapped flash and using
* drivers/mtd/maps/physmap.c mapping driver.
*
- * $Id: physmap.h,v 1.3 2004/07/21 00:16:15 jwboyer Exp $
+ * $Id: physmap.h,v 1.2 2004/07/14 17:48:46 dwmw2 Exp $
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
/*
* Board needs to specify the exact mapping during their setup time.
*/
-static inline void physmap_configure(unsigned long addr, unsigned long size, int bankwidth, void (*set_vpp)(struct map_info *, int) )
+static inline void physmap_configure(unsigned long addr, unsigned long size, int buswidth, void (*set_vpp)(struct map_info *, int) )
{
physmap_map.phys = addr;
physmap_map.size = size;
- physmap_map.bankwidth = bankwidth;
+ physmap_map.buswidth = buswidth;
physmap_map.set_vpp = set_vpp;
}
unsigned mt_segno; /* the segment to read or write */
unsigned mt_mode; /* modes for read/write (sync/async etc.) */
int mt_result; /* result of r/w request, not of the ioctl */
- void __user *mt_data; /* User space buffer: must be 29kb */
+ void *mt_data; /* User space buffer: must be 29kb */
};
/* get tape capacity (ftape/zftape)
int create_mode;
};
-enum { MAX_NESTED_LINKS = 8 };
+enum { MAX_NESTED_LINKS = 5 };
struct nameidata {
struct dentry *dentry;
#define LOOKUP_CONTINUE 4
#define LOOKUP_PARENT 16
#define LOOKUP_NOALT 32
-#define LOOKUP_ATOMIC 64
-
/*
* Intent data
*/
};
struct iovec;
-struct kvec;
extern int sock_wake_async(struct socket *sk, int how, int band);
extern int sock_register(struct net_proto_family *fam);
extern unsigned long net_random(void);
extern void net_srandom(unsigned long);
-extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
-extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num,
- size_t len, int flags);
-
#ifndef CONFIG_SMP
#define SOCKOPS_WRAPPED(name) name
#define SOCKOPS_WRAP(name, fam)
struct Qdisc *qdisc;
struct Qdisc *qdisc_sleeping;
+ struct Qdisc *qdisc_list;
struct Qdisc *qdisc_ingress;
- struct list_head qdisc_list;
unsigned long tx_queue_len; /* Max frames per queue allowed */
/* ingress path synchronizer */
/* bridge stuff */
struct net_bridge_port *br_port;
+#ifdef CONFIG_NET_FASTROUTE
+#define NETDEV_FASTROUTE_HMASK 0xF
+ /* Semi-private data. Keep it at the end of device struct. */
+ rwlock_t fastpath_lock;
+ struct dst_entry *fastpath[NETDEV_FASTROUTE_HMASK+1];
+#endif
#ifdef CONFIG_NET_DIVERT
/* this will get initialized at each interface type init routine */
struct divert_blk *divert;
extern atomic_t netdev_dropping;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff **pskb, int inward);
+#ifdef CONFIG_NET_FASTROUTE
+extern int netdev_fastroute;
+extern int netdev_fastroute_obstacles;
+extern void dev_clear_fastroute(struct net_device *dev);
+#endif
#ifdef CONFIG_SYSCTL
extern char *net_sysctl_strdup(const char *s);
+++ /dev/null
-/* PPTP constants and structs */
-#ifndef _CONNTRACK_PPTP_H
-#define _CONNTRACK_PPTP_H
-
-/* state of the control session */
-enum pptp_ctrlsess_state {
- PPTP_SESSION_NONE, /* no session present */
- PPTP_SESSION_ERROR, /* some session error */
- PPTP_SESSION_STOPREQ, /* stop_sess request seen */
- PPTP_SESSION_REQUESTED, /* start_sess request seen */
- PPTP_SESSION_CONFIRMED, /* session established */
-};
-
-/* state of the call inside the control session */
-enum pptp_ctrlcall_state {
- PPTP_CALL_NONE,
- PPTP_CALL_ERROR,
- PPTP_CALL_OUT_REQ,
- PPTP_CALL_OUT_CONF,
- PPTP_CALL_IN_REQ,
- PPTP_CALL_IN_REP,
- PPTP_CALL_IN_CONF,
- PPTP_CALL_CLEAR_REQ,
-};
-
-
-/* conntrack private data */
-struct ip_ct_pptp_master {
- enum pptp_ctrlsess_state sstate; /* session state */
-
- /* everything below is going to be per-expectation in newnat,
- * since there could be more than one call within one session */
- enum pptp_ctrlcall_state cstate; /* call state */
- u_int16_t pac_call_id; /* call id of PAC, host byte order */
- u_int16_t pns_call_id; /* call id of PNS, host byte order */
-};
-
-/* conntrack_expect private member */
-struct ip_ct_pptp_expect {
- enum pptp_ctrlcall_state cstate; /* call state */
- u_int16_t pac_call_id; /* call id of PAC */
- u_int16_t pns_call_id; /* call id of PNS */
-};
-
-
-#ifdef __KERNEL__
-
-#include <linux/netfilter_ipv4/lockhelp.h>
-DECLARE_LOCK_EXTERN(ip_pptp_lock);
-
-#define IP_CONNTR_PPTP PPTP_CONTROL_PORT
-
-#define PPTP_CONTROL_PORT 1723
-
-#define PPTP_PACKET_CONTROL 1
-#define PPTP_PACKET_MGMT 2
-
-#define PPTP_MAGIC_COOKIE 0x1a2b3c4d
-
-struct pptp_pkt_hdr {
- __u16 packetLength;
- __u16 packetType;
- __u32 magicCookie;
-};
-
-/* PptpControlMessageType values */
-#define PPTP_START_SESSION_REQUEST 1
-#define PPTP_START_SESSION_REPLY 2
-#define PPTP_STOP_SESSION_REQUEST 3
-#define PPTP_STOP_SESSION_REPLY 4
-#define PPTP_ECHO_REQUEST 5
-#define PPTP_ECHO_REPLY 6
-#define PPTP_OUT_CALL_REQUEST 7
-#define PPTP_OUT_CALL_REPLY 8
-#define PPTP_IN_CALL_REQUEST 9
-#define PPTP_IN_CALL_REPLY 10
-#define PPTP_IN_CALL_CONNECT 11
-#define PPTP_CALL_CLEAR_REQUEST 12
-#define PPTP_CALL_DISCONNECT_NOTIFY 13
-#define PPTP_WAN_ERROR_NOTIFY 14
-#define PPTP_SET_LINK_INFO 15
-
-#define PPTP_MSG_MAX 15
-
-/* PptpGeneralError values */
-#define PPTP_ERROR_CODE_NONE 0
-#define PPTP_NOT_CONNECTED 1
-#define PPTP_BAD_FORMAT 2
-#define PPTP_BAD_VALUE 3
-#define PPTP_NO_RESOURCE 4
-#define PPTP_BAD_CALLID 5
-#define PPTP_REMOVE_DEVICE_ERROR 6
-
-struct PptpControlHeader {
- __u16 messageType;
- __u16 reserved;
-};
-
-/* FramingCapability Bitmap Values */
-#define PPTP_FRAME_CAP_ASYNC 0x1
-#define PPTP_FRAME_CAP_SYNC 0x2
-
-/* BearerCapability Bitmap Values */
-#define PPTP_BEARER_CAP_ANALOG 0x1
-#define PPTP_BEARER_CAP_DIGITAL 0x2
-
-struct PptpStartSessionRequest {
- __u16 protocolVersion;
- __u8 reserved1;
- __u8 reserved2;
- __u32 framingCapability;
- __u32 bearerCapability;
- __u16 maxChannels;
- __u16 firmwareRevision;
- __u8 hostName[64];
- __u8 vendorString[64];
-};
-
-/* PptpStartSessionResultCode Values */
-#define PPTP_START_OK 1
-#define PPTP_START_GENERAL_ERROR 2
-#define PPTP_START_ALREADY_CONNECTED 3
-#define PPTP_START_NOT_AUTHORIZED 4
-#define PPTP_START_UNKNOWN_PROTOCOL 5
-
-struct PptpStartSessionReply {
- __u16 protocolVersion;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u32 framingCapability;
- __u32 bearerCapability;
- __u16 maxChannels;
- __u16 firmwareRevision;
- __u8 hostName[64];
- __u8 vendorString[64];
-};
-
-/* PptpStopReasons */
-#define PPTP_STOP_NONE 1
-#define PPTP_STOP_PROTOCOL 2
-#define PPTP_STOP_LOCAL_SHUTDOWN 3
-
-struct PptpStopSessionRequest {
- __u8 reason;
-};
-
-/* PptpStopSessionResultCode */
-#define PPTP_STOP_OK 1
-#define PPTP_STOP_GENERAL_ERROR 2
-
-struct PptpStopSessionReply {
- __u8 resultCode;
- __u8 generalErrorCode;
-};
-
-struct PptpEchoRequest {
- __u32 identNumber;
-};
-
-/* PptpEchoReplyResultCode */
-#define PPTP_ECHO_OK 1
-#define PPTP_ECHO_GENERAL_ERROR 2
-
-struct PptpEchoReply {
- __u32 identNumber;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 reserved;
-};
-
-/* PptpFramingType */
-#define PPTP_ASYNC_FRAMING 1
-#define PPTP_SYNC_FRAMING 2
-#define PPTP_DONT_CARE_FRAMING 3
-
-/* PptpCallBearerType */
-#define PPTP_ANALOG_TYPE 1
-#define PPTP_DIGITAL_TYPE 2
-#define PPTP_DONT_CARE_BEARER_TYPE 3
-
-struct PptpOutCallRequest {
- __u16 callID;
- __u16 callSerialNumber;
- __u32 minBPS;
- __u32 maxBPS;
- __u32 bearerType;
- __u32 framingType;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u16 reserved1;
- __u16 phoneNumberLength;
- __u16 reserved2;
- __u8 phoneNumber[64];
- __u8 subAddress[64];
-};
-
-/* PptpCallResultCode */
-#define PPTP_OUTCALL_CONNECT 1
-#define PPTP_OUTCALL_GENERAL_ERROR 2
-#define PPTP_OUTCALL_NO_CARRIER 3
-#define PPTP_OUTCALL_BUSY 4
-#define PPTP_OUTCALL_NO_DIAL_TONE 5
-#define PPTP_OUTCALL_TIMEOUT 6
-#define PPTP_OUTCALL_DONT_ACCEPT 7
-
-struct PptpOutCallReply {
- __u16 callID;
- __u16 peersCallID;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 causeCode;
- __u32 connectSpeed;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u32 physChannelID;
-};
-
-struct PptpInCallRequest {
- __u16 callID;
- __u16 callSerialNumber;
- __u32 callBearerType;
- __u32 physChannelID;
- __u16 dialedNumberLength;
- __u16 dialingNumberLength;
- __u8 dialedNumber[64];
- __u8 dialingNumber[64];
- __u8 subAddress[64];
-};
-
-/* PptpInCallResultCode */
-#define PPTP_INCALL_ACCEPT 1
-#define PPTP_INCALL_GENERAL_ERROR 2
-#define PPTP_INCALL_DONT_ACCEPT 3
-
-struct PptpInCallReply {
- __u16 callID;
- __u16 peersCallID;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u16 reserved;
-};
-
-struct PptpInCallConnected {
- __u16 peersCallID;
- __u16 reserved;
- __u32 connectSpeed;
- __u16 packetWindow;
- __u16 packetProcDelay;
- __u32 callFramingType;
-};
-
-struct PptpClearCallRequest {
- __u16 callID;
- __u16 reserved;
-};
-
-struct PptpCallDisconnectNotify {
- __u16 callID;
- __u8 resultCode;
- __u8 generalErrorCode;
- __u16 causeCode;
- __u16 reserved;
- __u8 callStatistics[128];
-};
-
-struct PptpWanErrorNotify {
- __u16 peersCallID;
- __u16 reserved;
- __u32 crcErrors;
- __u32 framingErrors;
- __u32 hardwareOverRuns;
- __u32 bufferOverRuns;
- __u32 timeoutErrors;
- __u32 alignmentErrors;
-};
-
-struct PptpSetLinkInfo {
- __u16 peersCallID;
- __u16 reserved;
- __u32 sendAccm;
- __u32 recvAccm;
-};
-
-
-struct pptp_priv_data {
- __u16 call_id;
- __u16 mcall_id;
- __u16 pcall_id;
-};
-
-union pptp_ctrl_union {
- struct PptpStartSessionRequest sreq;
- struct PptpStartSessionReply srep;
- struct PptpStopSessionRequest streq;
- struct PptpStopSessionReply strep;
- struct PptpOutCallRequest ocreq;
- struct PptpOutCallReply ocack;
- struct PptpInCallRequest icreq;
- struct PptpInCallReply icack;
- struct PptpInCallConnected iccon;
- struct PptpClearCallRequest clrreq;
- struct PptpCallDisconnectNotify disc;
- struct PptpWanErrorNotify wanerr;
- struct PptpSetLinkInfo setlink;
-};
-
-#endif /* __KERNEL__ */
-#endif /* _CONNTRACK_PPTP_H */
+++ /dev/null
-#ifndef _CONNTRACK_PROTO_GRE_H
-#define _CONNTRACK_PROTO_GRE_H
-#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701 0x0
-#define GRE_VERSION_PPTP 0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP 0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C 0x80
-#define GRE_FLAG_R 0x40
-#define GRE_FLAG_K 0x20
-#define GRE_FLAG_S 0x10
-#define GRE_FLAG_A 0x80
-
-#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u16 rec:3,
- srr:1,
- seq:1,
- key:1,
- routing:1,
- csum:1,
- version:3,
- reserved:4,
- ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u16 csum:1,
- routing:1,
- key:1,
- seq:1,
- srr:1,
- rec:3,
- ack:1,
- reserved:4,
- version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
- __u16 protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
- __u8 flags; /* bitfield */
- __u8 version; /* should be GRE_VERSION_PPTP */
- __u16 protocol; /* should be GRE_PROTOCOL_PPTP */
- __u16 payload_len; /* size of ppp payload, not inc. gre header */
- __u16 call_id; /* peer's call_id for this session */
- __u32 seq; /* sequence number. Present if S==1 */
- __u32 ack; /* seq number of highest packet recieved by */
- /* sender in this session */
-};
-
-
-/* this is part of ip_conntrack */
-struct ip_ct_gre {
- unsigned int stream_timeout;
- unsigned int timeout;
-};
-
-/* this is part of ip_conntrack_expect */
-struct ip_ct_gre_expect {
- struct ip_ct_gre_keymap *keymap_orig, *keymap_reply;
-};
-
-#ifdef __KERNEL__
-struct ip_conntrack_expect;
-
-/* structure for original <-> reply keymap */
-struct ip_ct_gre_keymap {
- struct list_head list;
-
- struct ip_conntrack_tuple tuple;
-};
-
-
-/* add new tuple->key_reply pair to keymap */
-int ip_ct_gre_keymap_add(struct ip_conntrack_expect *exp,
- struct ip_conntrack_tuple *t,
- int reply);
-
-/* change an existing keymap entry */
-void ip_ct_gre_keymap_change(struct ip_ct_gre_keymap *km,
- struct ip_conntrack_tuple *t);
-
-/* delete keymap entries */
-void ip_ct_gre_keymap_destroy(struct ip_conntrack_expect *exp);
-
-
-/* get pointer to gre key, if present */
-static inline u_int32_t *gre_key(struct gre_hdr *greh)
-{
- if (!greh->key)
- return NULL;
- if (greh->csum || greh->routing)
- return (u_int32_t *) (greh+sizeof(*greh)+4);
- return (u_int32_t *) (greh+sizeof(*greh));
-}
-
-/* get pointer ot gre csum, if present */
-static inline u_int16_t *gre_csum(struct gre_hdr *greh)
-{
- if (!greh->csum)
- return NULL;
- return (u_int16_t *) (greh+sizeof(*greh));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _CONNTRACK_PROTO_GRE_H */
+++ /dev/null
-/* PPTP constants and structs */
-#ifndef _NAT_PPTP_H
-#define _NAT_PPTP_H
-
-/* conntrack private data */
-struct ip_nat_pptp {
- u_int16_t pns_call_id; /* NAT'ed PNS call id */
- u_int16_t pac_call_id; /* NAT'ed PAC call id */
-};
-
-#endif /* _NAT_PPTP_H */
u32 c_vers;
unsigned long c_timestamp;
union {
- struct kvec u_vec;
+ struct iovec u_vec;
u32 u_status;
} c_u;
};
int, struct file *);
void nfsd_close(struct file *);
int nfsd_read(struct svc_rqst *, struct svc_fh *,
- loff_t, struct kvec *,int, unsigned long *);
+ loff_t, struct iovec *,int, unsigned long *);
int nfsd_write(struct svc_rqst *, struct svc_fh *,
- loff_t, struct kvec *,int, unsigned long, int *);
+ loff_t, struct iovec *,int, unsigned long, int *);
int nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
int nfsd_symlink(struct svc_rqst *, struct svc_fh *,
struct svc_fh fh;
__u32 offset;
__u32 count;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
svc_fh fh;
__u32 offset;
int len;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
struct svc_fh fh;
__u64 offset;
__u32 count;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
__u32 count;
int stable;
int len;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int vlen;
};
stateid_t rd_stateid; /* request */
u64 rd_offset; /* request */
u32 rd_length; /* request */
- struct kvec rd_iov[RPCSVC_MAXPAGES];
+ struct iovec rd_iov[RPCSVC_MAXPAGES];
int rd_vlen;
struct svc_rqst *rd_rqstp; /* response */
u64 wr_offset; /* request */
u32 wr_stable_how; /* request */
u32 wr_buflen; /* request */
- struct kvec wr_vec[RPCSVC_MAXPAGES]; /* request */
+ struct iovec wr_vec[RPCSVC_MAXPAGES]; /* request */
int wr_vlen;
u32 wr_bytes_written; /* response */
#define PG_compound 19 /* Part of a compound page */
#define PG_anon 20 /* Anonymous: anon_vma in mapping */
-#define PG_ckrm_account 21 /* This page is accounted by CKRM */
/*
extern struct pci_dev *isa_bridge;
#endif
-struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
-};
-
-#ifndef CONFIG_PCI_MSI
+#ifndef CONFIG_PCI_USE_VECTOR
static inline void pci_scan_msi_device(struct pci_dev *dev) {}
static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
-static inline void pci_disable_msi(struct pci_dev *dev) {}
-static inline int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec) {return -1;}
-static inline void pci_disable_msix(struct pci_dev *dev) {}
static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
#else
extern void pci_scan_msi_device(struct pci_dev *dev);
extern int pci_enable_msi(struct pci_dev *dev);
-extern void pci_disable_msi(struct pci_dev *dev);
-extern int pci_enable_msix(struct pci_dev* dev,
- struct msix_entry *entries, int nvec);
-extern void pci_disable_msix(struct pci_dev *dev);
extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
+extern int msi_alloc_vectors(struct pci_dev* dev, int *vector, int nvec);
+extern int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec);
#endif
#endif /* CONFIG_PCI */
#define PCI_DEVICE_ID_TTI_HPT302 0x0006
#define PCI_DEVICE_ID_TTI_HPT371 0x0007
#define PCI_DEVICE_ID_TTI_HPT374 0x0008
-#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 // apparently a 372N variant?
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_DEVICE_ID_VIA_8763_0 0x0198
#define PCI_DEVICE_ID_VIA_8380_0 0x0204
-#define PCI_DEVICE_ID_VIA_3238_0 0x0238
#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
-#define PCI_DEVICE_ID_VIA_3269_0 0x0269
#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
#define PCI_DEVICE_ID_VIA_8371_0 0x0391
#define PCI_DEVICE_ID_VIA_82C686_6 0x3068
#define PCI_DEVICE_ID_VIA_8233_0 0x3074
#define PCI_DEVICE_ID_VIA_8633_0 0x3091
-#define PCI_DEVICE_ID_VIA_8367_0 0x3099
+#define PCI_DEVICE_ID_VIA_8367_0 0x3099
#define PCI_DEVICE_ID_VIA_8653_0 0x3101
-#define PCI_DEVICE_ID_VIA_8622 0x3102
+#define PCI_DEVICE_ID_VIA_8622 0x3102
#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
#define PCI_DEVICE_ID_VIA_8361 0x3112
#define PCI_DEVICE_ID_VIA_XM266 0x3116
#define PCI_DEVICE_ID_VIA_PT880 0x3258
#define PCI_DEVICE_ID_VIA_P4M400 0x3209
#define PCI_DEVICE_ID_VIA_8237 0x3227
-#define PCI_DEVICE_ID_VIA_3296_0 0x0296
#define PCI_DEVICE_ID_VIA_86C100A 0x6100
#define PCI_DEVICE_ID_VIA_8231 0x8231
#define PCI_DEVICE_ID_VIA_8231_4 0x8235
#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578
#define PCI_DEVICE_ID_INTEL_82875_IG 0x257b
-#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
-#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
*/
enum {
MMAP_PAGE_ZERO = 0x0100000,
- ADDR_COMPAT_LAYOUT = 0x0200000,
- READ_IMPLIES_EXEC = 0x0400000,
ADDR_LIMIT_32BIT = 0x0800000,
SHORT_INODE = 0x1000000,
WHOLE_SECONDS = 0x2000000,
ADDR_LIMIT_3GB = 0x8000000,
};
-/*
- * Security-relevant compatibility flags that must be
- * cleared upon setuid or setgid exec:
- */
-#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC)
-
/*
* Personality types.
*
struct tc_police
{
__u32 index;
+#ifdef CONFIG_NET_CLS_ACT
+ int refcnt;
+ int bindcnt;
+#endif
+/* Turned off because it requires new tc
+ * to work (for now maintain ABI)
+ *
+#ifdef CONFIG_NET_CLS_ACT
+ __u32 capab;
+#endif
+*/
int action;
#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
#define TC_POLICE_OK TC_ACT_OK
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
- int refcnt;
- int bindcnt;
- __u32 capab;
};
struct tcf_t
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
+#ifdef CONFIG_NET_CLS_ACT
TCA_U32_ACT,
+#endif
+#ifdef CONFIG_NET_CLS_IND
TCA_U32_INDEV,
- TCA_U32_PCNT,
+#endif
__TCA_U32_MAX
};
__u32 val;
int off;
int offmask;
+#ifdef CONFIG_CLS_U32_PERF
+ unsigned long kcnt;
+#endif
};
struct tc_u32_sel
short hoff;
__u32 hmask;
+#ifdef CONFIG_CLS_U32_PERF
+ unsigned long rcnt;
+ unsigned long rhit;
+#endif
struct tc_u32_key keys[0];
};
-#ifdef CONFIG_CLS_U32_PERF
-struct tc_u32_pcnt
-{
- __u64 rcnt;
- __u64 rhit;
- __u64 kcnts[0];
-};
-#endif
/* Flags */
#define TC_U32_TERMINAL 1
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
- TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
- TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
+#ifdef CONFIG_NET_CLS_IND
+ TCA_FW_INDEV,
+#endif
+#ifdef CONFIG_NET_CLS_ACT
+ TCA_FW_ACT,
+#endif
__TCA_FW_MAX
};
}
static inline
-void set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
+int set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
{
+ int ret = 0;
if (ufdset)
- __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
+ ret = __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
+ if (ret)
+ return -EFAULT;
+ return 0;
}
static inline
extern struct file_operations random_fops, urandom_fops;
#endif
-unsigned int get_random_int(void);
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
-
#endif /* __KERNEL___ */
#endif /* _LINUX_RANDOM_H */
+++ /dev/null
-/* Rule-based Classification Engine (RBCE) module
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2003
- *
- * Module for loading of classification policies and providing
- * a user API for Class-based Kernel Resource Management (CKRM)
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- *
- */
-
-/* Changes
- *
- * 25 Mar 2004
- * Integrate RBCE and CRBE into a single module
- *
- */
-
-#ifndef RBCE_H
-#define RBCE_H
-
-// data types defined in main rbcemod.c
-struct rbce_private_data;
-struct rbce_class;
-struct ckrm_core_class;
-
-#ifndef RBCE_EXTENSION
-
-/****************************************************************************
- *
- * RBCE STANDALONE VERSION, NO CHOICE FOR DATA COLLECTION
- *
- ****************************************************************************/
-
-#ifdef RBCE_SHOW_INCL
-#warning " ... RBCE .."
-#endif
-
-#define RBCE_MOD_DESCR "Rule Based Classification Engine Module for CKRM"
-#define RBCE_MOD_NAME "rbce"
-
-/* extension to private data: NONE */
-struct rbce_ext_private_data {
- /* empty data */
-};
-static inline void init_ext_private_data(struct rbce_private_data *dst)
-{
-}
-
-/* sending notification to user: NONE */
-
-static void notify_class_action(struct rbce_class *cls, int action)
-{
-}
-static inline void send_fork_notification(struct task_struct *tsk,
- struct ckrm_core_class *cls)
-{
-}
-static inline void send_exit_notification(struct task_struct *tsk)
-{
-}
-static inline void send_manual_notification(struct task_struct *tsk)
-{
-}
-
-/* extension initialization and destruction at module init and exit */
-static inline int init_rbce_ext_pre(void)
-{
- return 0;
-}
-static inline int init_rbce_ext_post(void)
-{
- return 0;
-}
-static inline void exit_rbce_ext(void)
-{
-}
-
-#else
-
-/***************************************************************************
- *
- * RBCE with User Level Notification
- *
- ***************************************************************************/
-
-#ifdef RBCE_SHOW_INCL
-#warning " ... CRBCE .."
-#ifdef RBCE_DO_SAMPLE
-#warning " ... CRBCE doing sampling ..."
-#endif
-#ifdef RBCE_DO_DELAY
-#warning " ... CRBCE doing delay ..."
-#endif
-#endif
-
-#define RBCE_MOD_DESCR "Rule Based Classification Engine Module" \
- "with Data Sampling/Delivery for CKRM"
-#define RBCE_MOD_NAME "crbce"
-
-#include <linux/crbce.h>
-
-struct rbce_ext_private_data {
- struct task_sample_info sample;
-};
-
-static void notify_class_action(struct rbce_class *cls, int action);
-#if 0
-static void send_fork_notification(struct task_struct *tsk,
- struct ckrm_core_class *cls);
-static void send_exit_notification(struct task_struct *tsk);
-static void send_manual_notification(struct task_struct *tsk);
-#endif
-
-#endif
-
-#endif // RBCE_H
extern struct file_operations stats_fileops;
extern struct file_operations config_fileops;
extern struct file_operations members_fileops;
-extern struct file_operations reclassify_fileops;
extern struct file_operations rcfs_file_operations;
// Callbacks into rcfs from ckrm
static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
{
(version == KEY_FORMAT_3_5) ?
- (void)(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
- (void)(set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
+ (key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
+ (set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
}
static inline void set_le_key_k_type (int version, struct key * key, int type)
{
(version == KEY_FORMAT_3_5) ?
- (void)(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
- (void)(set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
+ (key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
+ (set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
}
static inline void set_le_ih_k_type (struct item_head * ih, int type)
{
extern struct semaphore rtnl_sem;
+#define rtnl_exlock() do { } while(0)
+#define rtnl_exunlock() do { } while(0)
+#define rtnl_exlock_nowait() (0)
+
#define rtnl_shlock() down(&rtnl_sem)
#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
extern int nr_threads;
extern int last_pid;
DECLARE_PER_CPU(unsigned long, process_counts);
-// DECLARE_PER_CPU(struct runqueue, runqueues); -- removed after ckrm cpu v7 merge
+DECLARE_PER_CPU(struct runqueue, runqueues);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
#include <linux/aio.h>
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-
-extern unsigned long
-arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
-extern void arch_unmap_area(struct vm_area_struct *area);
-extern void arch_unmap_area_topdown(struct vm_area_struct *area);
-
-
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
- unsigned long (*get_unmapped_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- unsigned long (*get_unmapped_exec_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- void (*unmap_area) (struct vm_area_struct *area);
- unsigned long mmap_base; /* base of mmap area */
unsigned long free_area_cache; /* first hole */
+ unsigned long non_executable_cache; /* last hole top */
+ unsigned long mmap_top; /* top of mmap area */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
mm_context_t context;
struct vx_info *mm_vx_info;
- /* Token based thrashing protection. */
- unsigned long swap_token_time;
- char recent_pagein;
-
/* coredumping support */
int core_waiters;
struct completion *core_startup_done, core_done;
struct kioctx *ioctx_list;
struct kioctx default_kioctx;
-#ifdef CONFIG_CKRM_RES_MEM
- struct ckrm_mem_res *memclass;
- struct list_head tasklist; /* list of all tasks sharing this address space */
- spinlock_t peertask_lock; /* protect above tasklist */
-#endif
};
extern int mmlist_nr;
struct audit_context; /* See audit.c */
struct mempolicy;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-/**
- * ckrm_cpu_demand_stat - used to track the cpu demand of a task/class
- * @run: how much time it has been running since the counter started
- * @total: total time since the counter started
- * @last_sleep: the last time it sleeps, last_sleep = 0 when not sleeping
- * @recalc_interval: how often do we recalculate the cpu_demand
- * @cpu_demand: moving average of run/total
- */
-struct ckrm_cpu_demand_stat {
- unsigned long long run;
- unsigned long long total;
- unsigned long long last_sleep;
- unsigned long long recalc_interval;
- unsigned long cpu_demand; /*estimated cpu demand */
-};
-#endif
-
-
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
unsigned int time_slice, first_time_slice;
struct list_head tasks;
- /*
- * ptrace_list/ptrace_children forms the list of my children
- * that were stolen by a ptracer.
- */
struct list_head ptrace_children;
struct list_head ptrace_list;
*/
struct task_struct *real_parent; /* real parent process (when being debugged) */
struct task_struct *parent; /* parent process */
- /*
- * children/sibling forms the list of my children plus the
- * tasks I'm ptracing.
- */
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
+
sigset_t blocked, real_blocked;
struct sigpending pending;
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
-
- /* TUX state */
- void *tux_info;
- void (*tux_exit)(void);
-
void *security;
struct audit_context *audit_context;
struct io_context *io_context;
- int ioprio;
-
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct list_head taskclass_link;
#ifdef CONFIG_CKRM_CPU_SCHEDULE
struct ckrm_cpu_class *cpu_class;
- //track cpu demand of this task
- struct ckrm_cpu_demand_stat demand_stat;
-#endif //CONFIG_CKRM_CPU_SCHEDULE
+#endif
#endif // CONFIG_CKRM_TYPE_TASKCLASS
-#ifdef CONFIG_CKRM_RES_MEM
- struct list_head mm_peers; // list of tasks using same mm_struct
-#endif // CONFIG_CKRM_RES_MEM
#endif // CONFIG_CKRM
+
struct task_delay_info delays;
};
void yield(void);
+/*
+ * These are the runqueue data structures:
+ */
+typedef struct runqueue runqueue_t;
+
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+#include <linux/ckrm_classqueue.h>
+#endif
+
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+
+/**
+ * if belong to different class, compare class priority
+ * otherwise compare task priority
+ */
+#define TASK_PREEMPTS_CURR(p, rq) \
+ (((p)->cpu_class != (rq)->curr->cpu_class) && ((rq)->curr != (rq)->idle))? class_preempts_curr((p),(rq)->curr) : ((p)->prio < (rq)->curr->prio)
+#else
+#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+struct prio_array {
+ unsigned int nr_active;
+ unsigned long bitmap[BITMAP_SIZE];
+ struct list_head queue[MAX_PRIO];
+};
+#define rq_active(p,rq) (rq->active)
+#define rq_expired(p,rq) (rq->expired)
+#define ckrm_rebalance_tick(j,this_cpu) do {} while (0)
+#define TASK_PREEMPTS_CURR(p, rq) \
+ ((p)->prio < (rq)->curr->prio)
+#endif
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct runqueue {
+ spinlock_t lock;
+
+ /*
+ * nr_running and cpu_load should be in the same cacheline because
+ * remote CPUs use both these fields when doing load calculation.
+ */
+ unsigned long nr_running;
+#if defined(CONFIG_SMP)
+ unsigned long cpu_load;
+#endif
+ unsigned long long nr_switches, nr_preempt;
+ unsigned long expired_timestamp, nr_uninterruptible;
+ unsigned long long timestamp_last_tick;
+ task_t *curr, *idle;
+ struct mm_struct *prev_mm;
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+ unsigned long ckrm_cpu_load;
+ struct classqueue_struct classqueue;
+#else
+ prio_array_t *active, *expired, arrays[2];
+#endif
+ int best_expired_prio;
+ atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+ struct sched_domain *sd;
+
+ /* For active balancing */
+ int active_balance;
+ int push_cpu;
+
+ task_t *migration_thread;
+ struct list_head migration_queue;
+#endif
+ struct list_head hold_queue;
+ int idle_tokens;
+};
+
/*
* The default (Linux) execution domain.
*/
atomic_inc(&u->__count);
return u;
}
-
extern void free_uid(struct user_struct *);
extern void switch_uid(struct user_struct *);
}
#endif
-
/*
* Routines for handling mm_structs
*/
return mm;
}
-
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
#define def_delay_var(var) unsigned long long var
#define get_delay(tsk,field) ((tsk)->delays.field)
+#define delay_value(x) (((unsigned long)(x))/1000)
#define start_delay(var) ((var) = sched_clock())
#define start_delay_set(var,flg) (set_delay_flag(current,flg),(var) = sched_clock())
#define inc_delay(tsk,field) (((tsk)->delays.field)++)
+#define add_delay_ts(tsk,field,start_ts,end_ts) ((tsk)->delays.field += delay_value((end_ts)-(start_ts)))
+#define add_delay_clear(tsk,field,start_ts,flg) (add_delay_ts(tsk,field,start_ts,sched_clock()),clear_delay_flag(tsk,flg))
-/* because of hardware timer drifts in SMPs and task continue on different cpu
- * then where the start_ts was taken there is a possibility that
- * end_ts < start_ts by some usecs. In this case we ignore the diff
- * and add nothing to the total.
- */
-#ifdef CONFIG_SMP
-#define test_ts_integrity(start_ts,end_ts) (likely((end_ts) > (start_ts)))
-#else
-#define test_ts_integrity(start_ts,end_ts) (1)
-#endif
-
-#define add_delay_ts(tsk,field,start_ts,end_ts) \
- do { if (test_ts_integrity(start_ts,end_ts)) (tsk)->delays.field += ((end_ts)-(start_ts)); } while (0)
-
-#define add_delay_clear(tsk,field,start_ts,flg) \
- do { \
- unsigned long long now = sched_clock();\
- add_delay_ts(tsk,field,start_ts,now); \
- clear_delay_flag(tsk,flg); \
- } while (0)
-
-static inline void add_io_delay(unsigned long long dstart)
+static inline void add_io_delay(unsigned long dstart)
{
struct task_struct * tsk = current;
- unsigned long long now = sched_clock();
- unsigned long long val;
-
- if (test_ts_integrity(dstart,now))
- val = now - dstart;
- else
- val = 0;
+ unsigned long val = delay_value(sched_clock()-dstart);
if (test_delay_flag(tsk,PF_MEMIO)) {
tsk->delays.mem_iowait_total += val;
tsk->delays.num_memwaits++;
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
-#else
-static inline void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
-}
-#endif
-
#endif /* __KERNEL__ */
#endif
/* PPC CPM type number */
#define PORT_CPM 58
-/* MPC52xx type numbers */
-#define PORT_MPC52xx 59
+/* Marvell MPSC for PPC & MIPS */
+#define PORT_MPSC 59
#ifdef __KERNEL__
time_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
- struct user_struct *mlock_user;
+ struct user_struct * mlock_user;
};
/* shm_mode upper byte flags */
#define SHMEM_NR_DIRECT 16
-#define TMPFS_SUPER_MAGIC 0x01021994
-
-
struct shmem_inode_info {
spinlock_t lock;
unsigned long next_index;
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
- char cb[40];
+ char cb[48];
unsigned int len,
data_len,
extern void skb_init(void);
extern void skb_add_mtu(int mtu);
-struct skb_iter {
- /* Iteration functions set these */
- unsigned char *data;
- unsigned int len;
-
- /* Private to iteration */
- unsigned int nextfrag;
- struct sk_buff *fraglist;
-};
-
-/* Keep iterating until skb_iter_next returns false. */
-extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i);
-extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i);
-/* Call this if aborting loop before !skb_iter_next */
-extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i);
-
-struct tux_req_struct;
-
#ifdef CONFIG_NETFILTER
static inline void nf_conntrack_put(struct nf_ct_info *nfct)
{
+++ /dev/null
-/*
- * Definitions for MIBs
- *
- * Author: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
- */
-
-#ifndef _LINUX_SNMP_H
-#define _LINUX_SNMP_H
-
-/* ipstats mib definitions */
-/*
- * RFC 1213: MIB-II
- * RFC 2011 (updates 1213): SNMPv2-MIB-IP
- * RFC 2863: Interfaces Group MIB
- * RFC 2465: IPv6 MIB: General Group
- * draft-ietf-ipv6-rfc2011-update-10.txt: MIB for IP: IP Statistics Tables
- */
-enum
-{
- IPSTATS_MIB_NUM = 0,
- IPSTATS_MIB_INRECEIVES, /* InReceives */
- IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
- IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */
- IPSTATS_MIB_INNOROUTES, /* InNoRoutes */
- IPSTATS_MIB_INADDRERRORS, /* InAddrErrors */
- IPSTATS_MIB_INUNKNOWNPROTOS, /* InUnknownProtos */
- IPSTATS_MIB_INTRUNCATEDPKTS, /* InTruncatedPkts */
- IPSTATS_MIB_INDISCARDS, /* InDiscards */
- IPSTATS_MIB_INDELIVERS, /* InDelivers */
- IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
- IPSTATS_MIB_OUTREQUESTS, /* OutRequests */
- IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */
- IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */
- IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */
- IPSTATS_MIB_REASMREQDS, /* ReasmReqds */
- IPSTATS_MIB_REASMOKS, /* ReasmOKs */
- IPSTATS_MIB_REASMFAILS, /* ReasmFails */
- IPSTATS_MIB_FRAGOKS, /* FragOKs */
- IPSTATS_MIB_FRAGFAILS, /* FragFails */
- IPSTATS_MIB_FRAGCREATES, /* FragCreates */
- IPSTATS_MIB_INMCASTPKTS, /* InMcastPkts */
- IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */
- __IPSTATS_MIB_MAX
-};
-
-/* icmp mib definitions */
-/*
- * RFC 1213: MIB-II ICMP Group
- * RFC 2011 (updates 1213): SNMPv2 MIB for IP: ICMP group
- */
-enum
-{
- ICMP_MIB_NUM = 0,
- ICMP_MIB_INMSGS, /* InMsgs */
- ICMP_MIB_INERRORS, /* InErrors */
- ICMP_MIB_INDESTUNREACHS, /* InDestUnreachs */
- ICMP_MIB_INTIMEEXCDS, /* InTimeExcds */
- ICMP_MIB_INPARMPROBS, /* InParmProbs */
- ICMP_MIB_INSRCQUENCHS, /* InSrcQuenchs */
- ICMP_MIB_INREDIRECTS, /* InRedirects */
- ICMP_MIB_INECHOS, /* InEchos */
- ICMP_MIB_INECHOREPS, /* InEchoReps */
- ICMP_MIB_INTIMESTAMPS, /* InTimestamps */
- ICMP_MIB_INTIMESTAMPREPS, /* InTimestampReps */
- ICMP_MIB_INADDRMASKS, /* InAddrMasks */
- ICMP_MIB_INADDRMASKREPS, /* InAddrMaskReps */
- ICMP_MIB_OUTMSGS, /* OutMsgs */
- ICMP_MIB_OUTERRORS, /* OutErrors */
- ICMP_MIB_OUTDESTUNREACHS, /* OutDestUnreachs */
- ICMP_MIB_OUTTIMEEXCDS, /* OutTimeExcds */
- ICMP_MIB_OUTPARMPROBS, /* OutParmProbs */
- ICMP_MIB_OUTSRCQUENCHS, /* OutSrcQuenchs */
- ICMP_MIB_OUTREDIRECTS, /* OutRedirects */
- ICMP_MIB_OUTECHOS, /* OutEchos */
- ICMP_MIB_OUTECHOREPS, /* OutEchoReps */
- ICMP_MIB_OUTTIMESTAMPS, /* OutTimestamps */
- ICMP_MIB_OUTTIMESTAMPREPS, /* OutTimestampReps */
- ICMP_MIB_OUTADDRMASKS, /* OutAddrMasks */
- ICMP_MIB_OUTADDRMASKREPS, /* OutAddrMaskReps */
- __ICMP_MIB_MAX
-};
-
-/* icmp6 mib definitions */
-/*
- * RFC 2466: ICMPv6-MIB
- */
-enum
-{
- ICMP6_MIB_NUM = 0,
- ICMP6_MIB_INMSGS, /* InMsgs */
- ICMP6_MIB_INERRORS, /* InErrors */
- ICMP6_MIB_INDESTUNREACHS, /* InDestUnreachs */
- ICMP6_MIB_INPKTTOOBIGS, /* InPktTooBigs */
- ICMP6_MIB_INTIMEEXCDS, /* InTimeExcds */
- ICMP6_MIB_INPARMPROBLEMS, /* InParmProblems */
- ICMP6_MIB_INECHOS, /* InEchos */
- ICMP6_MIB_INECHOREPLIES, /* InEchoReplies */
- ICMP6_MIB_INGROUPMEMBQUERIES, /* InGroupMembQueries */
- ICMP6_MIB_INGROUPMEMBRESPONSES, /* InGroupMembResponses */
- ICMP6_MIB_INGROUPMEMBREDUCTIONS, /* InGroupMembReductions */
- ICMP6_MIB_INROUTERSOLICITS, /* InRouterSolicits */
- ICMP6_MIB_INROUTERADVERTISEMENTS, /* InRouterAdvertisements */
- ICMP6_MIB_INNEIGHBORSOLICITS, /* InNeighborSolicits */
- ICMP6_MIB_INNEIGHBORADVERTISEMENTS, /* InNeighborAdvertisements */
- ICMP6_MIB_INREDIRECTS, /* InRedirects */
- ICMP6_MIB_OUTMSGS, /* OutMsgs */
- ICMP6_MIB_OUTDESTUNREACHS, /* OutDestUnreachs */
- ICMP6_MIB_OUTPKTTOOBIGS, /* OutPktTooBigs */
- ICMP6_MIB_OUTTIMEEXCDS, /* OutTimeExcds */
- ICMP6_MIB_OUTPARMPROBLEMS, /* OutParmProblems */
- ICMP6_MIB_OUTECHOREPLIES, /* OutEchoReplies */
- ICMP6_MIB_OUTROUTERSOLICITS, /* OutRouterSolicits */
- ICMP6_MIB_OUTNEIGHBORSOLICITS, /* OutNeighborSolicits */
- ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS, /* OutNeighborAdvertisements */
- ICMP6_MIB_OUTREDIRECTS, /* OutRedirects */
- ICMP6_MIB_OUTGROUPMEMBRESPONSES, /* OutGroupMembResponses */
- ICMP6_MIB_OUTGROUPMEMBREDUCTIONS, /* OutGroupMembReductions */
- __ICMP6_MIB_MAX
-};
-
-/* tcp mib definitions */
-/*
- * RFC 1213: MIB-II TCP group
- * RFC 2012 (updates 1213): SNMPv2-MIB-TCP
- */
-enum
-{
- TCP_MIB_NUM = 0,
- TCP_MIB_RTOALGORITHM, /* RtoAlgorithm */
- TCP_MIB_RTOMIN, /* RtoMin */
- TCP_MIB_RTOMAX, /* RtoMax */
- TCP_MIB_MAXCONN, /* MaxConn */
- TCP_MIB_ACTIVEOPENS, /* ActiveOpens */
- TCP_MIB_PASSIVEOPENS, /* PassiveOpens */
- TCP_MIB_ATTEMPTFAILS, /* AttemptFails */
- TCP_MIB_ESTABRESETS, /* EstabResets */
- TCP_MIB_CURRESTAB, /* CurrEstab */
- TCP_MIB_INSEGS, /* InSegs */
- TCP_MIB_OUTSEGS, /* OutSegs */
- TCP_MIB_RETRANSSEGS, /* RetransSegs */
- TCP_MIB_INERRS, /* InErrs */
- TCP_MIB_OUTRSTS, /* OutRsts */
- __TCP_MIB_MAX
-};
-
-/* udp mib definitions */
-/*
- * RFC 1213: MIB-II UDP group
- * RFC 2013 (updates 1213): SNMPv2-MIB-UDP
- */
-enum
-{
- UDP_MIB_NUM = 0,
- UDP_MIB_INDATAGRAMS, /* InDatagrams */
- UDP_MIB_NOPORTS, /* NoPorts */
- UDP_MIB_INERRORS, /* InErrors */
- UDP_MIB_OUTDATAGRAMS, /* OutDatagrams */
- __UDP_MIB_MAX
-};
-
-/* sctp mib definitions */
-/*
- * draft-ietf-sigtran-sctp-mib-07.txt
- */
-enum
-{
- SCTP_MIB_NUM = 0,
- SCTP_MIB_CURRESTAB, /* CurrEstab */
- SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */
- SCTP_MIB_PASSIVEESTABS, /* PassiveEstabs */
- SCTP_MIB_ABORTEDS, /* Aborteds */
- SCTP_MIB_SHUTDOWNS, /* Shutdowns */
- SCTP_MIB_OUTOFBLUES, /* OutOfBlues */
- SCTP_MIB_CHECKSUMERRORS, /* ChecksumErrors */
- SCTP_MIB_OUTCTRLCHUNKS, /* OutCtrlChunks */
- SCTP_MIB_OUTORDERCHUNKS, /* OutOrderChunks */
- SCTP_MIB_OUTUNORDERCHUNKS, /* OutUnorderChunks */
- SCTP_MIB_INCTRLCHUNKS, /* InCtrlChunks */
- SCTP_MIB_INORDERCHUNKS, /* InOrderChunks */
- SCTP_MIB_INUNORDERCHUNKS, /* InUnorderChunks */
- SCTP_MIB_FRAGUSRMSGS, /* FragUsrMsgs */
- SCTP_MIB_REASMUSRMSGS, /* ReasmUsrMsgs */
- SCTP_MIB_OUTSCTPPACKS, /* OutSCTPPacks */
- SCTP_MIB_INSCTPPACKS, /* InSCTPPacks */
- SCTP_MIB_RTOALGORITHM, /* RtoAlgorithm */
- SCTP_MIB_RTOMIN, /* RtoMin */
- SCTP_MIB_RTOMAX, /* RtoMax */
- SCTP_MIB_RTOINITIAL, /* RtoInitial */
- SCTP_MIB_VALCOOKIELIFE, /* ValCookieLife */
- SCTP_MIB_MAXINITRETR, /* MaxInitRetr */
- __SCTP_MIB_MAX
-};
-
-/* linux mib definitions */
-enum
-{
- LINUX_MIB_NUM = 0,
- LINUX_MIB_SYNCOOKIESSENT, /* SyncookiesSent */
- LINUX_MIB_SYNCOOKIESRECV, /* SyncookiesRecv */
- LINUX_MIB_SYNCOOKIESFAILED, /* SyncookiesFailed */
- LINUX_MIB_EMBRYONICRSTS, /* EmbryonicRsts */
- LINUX_MIB_PRUNECALLED, /* PruneCalled */
- LINUX_MIB_RCVPRUNED, /* RcvPruned */
- LINUX_MIB_OFOPRUNED, /* OfoPruned */
- LINUX_MIB_OUTOFWINDOWICMPS, /* OutOfWindowIcmps */
- LINUX_MIB_LOCKDROPPEDICMPS, /* LockDroppedIcmps */
- LINUX_MIB_ARPFILTER, /* ArpFilter */
- LINUX_MIB_TIMEWAITED, /* TimeWaited */
- LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */
- LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */
- LINUX_MIB_PAWSPASSIVEREJECTED, /* PAWSPassiveRejected */
- LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */
- LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */
- LINUX_MIB_DELAYEDACKS, /* DelayedACKs */
- LINUX_MIB_DELAYEDACKLOCKED, /* DelayedACKLocked */
- LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */
- LINUX_MIB_LISTENOVERFLOWS, /* ListenOverflows */
- LINUX_MIB_LISTENDROPS, /* ListenDrops */
- LINUX_MIB_TCPPREQUEUED, /* TCPPrequeued */
- LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, /* TCPDirectCopyFromBacklog */
- LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, /* TCPDirectCopyFromPrequeue */
- LINUX_MIB_TCPPREQUEUEDROPPED, /* TCPPrequeueDropped */
- LINUX_MIB_TCPHPHITS, /* TCPHPHits */
- LINUX_MIB_TCPHPHITSTOUSER, /* TCPHPHitsToUser */
- LINUX_MIB_TCPPUREACKS, /* TCPPureAcks */
- LINUX_MIB_TCPHPACKS, /* TCPHPAcks */
- LINUX_MIB_TCPRENORECOVERY, /* TCPRenoRecovery */
- LINUX_MIB_TCPSACKRECOVERY, /* TCPSackRecovery */
- LINUX_MIB_TCPSACKRENEGING, /* TCPSACKReneging */
- LINUX_MIB_TCPFACKREORDER, /* TCPFACKReorder */
- LINUX_MIB_TCPSACKREORDER, /* TCPSACKReorder */
- LINUX_MIB_TCPRENOREORDER, /* TCPRenoReorder */
- LINUX_MIB_TCPTSREORDER, /* TCPTSReorder */
- LINUX_MIB_TCPFULLUNDO, /* TCPFullUndo */
- LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */
- LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */
- LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */
- LINUX_MIB_TCPLOSS, /* TCPLoss */
- LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */
- LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */
- LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */
- LINUX_MIB_TCPLOSSFAILURES, /* TCPLossFailures */
- LINUX_MIB_TCPFASTRETRANS, /* TCPFastRetrans */
- LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */
- LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */
- LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */
- LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */
- LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */
- LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */
- LINUX_MIB_TCPRCVCOLLAPSED, /* TCPRcvCollapsed */
- LINUX_MIB_TCPDSACKOLDSENT, /* TCPDSACKOldSent */
- LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */
- LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */
- LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */
- LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */
- LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */
- LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */
- LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */
- LINUX_MIB_TCPABORTONTIMEOUT, /* TCPAbortOnTimeout */
- LINUX_MIB_TCPABORTONLINGER, /* TCPAbortOnLinger */
- LINUX_MIB_TCPABORTFAILED, /* TCPAbortFailed */
- LINUX_MIB_TCPMEMORYPRESSURES, /* TCPMemoryPressures */
- __LINUX_MIB_MAX
-};
-
-#endif /* _LINUX_SNMP_H */
#define SOL_NETBEUI 267
#define SOL_LLC 268
-/* PlanetLab PL2525: reset the context ID of an existing socket */
-#define SO_SETXID SO_PEERCRED
-
/* IPX options */
#define IPX_TYPE 1
extern int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct socket;
-struct file * sock_map_file(struct socket *sock);
-extern int sock_map_fd(struct socket *sock);
-extern struct socket *sockfd_lookup(int fd, int *err);
-
#endif
#endif /* not kernel and not glibc */
#endif /* _LINUX_SOCKET_H */
struct auth_cred {
uid_t uid;
gid_t gid;
- xid_t xid;
struct group_info *group_info;
};
cl_autobind : 1,/* use getport() */
cl_droppriv : 1,/* enable NFS suid hack */
cl_oneshot : 1,/* dispose after use */
- cl_dead : 1,/* abandoned */
- cl_tagxid : 1;/* do xid tagging */
+ cl_dead : 1;/* abandoned */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
struct rpc_portmap * cl_pmap; /* port mapping */
* read responses (that have a header, and some data pages, and possibly
* a tail) and means we can share some client side routines.
*
- * The xdr_buf.head kvec always points to the first page in the rq_*pages
+ * The xdr_buf.head iovec always points to the first page in the rq_*pages
* list. The xdr_buf.pages pointer points to the second page on that
* list. xdr_buf.tail points to the end of the first page.
* This assumes that the non-page part of an rpc reply will fit
*/
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2)
-static inline u32 svc_getu32(struct kvec *iov)
+static inline u32 svc_getu32(struct iovec *iov)
{
u32 val, *vp;
vp = iov->iov_base;
iov->iov_len -= sizeof(u32);
return val;
}
-static inline void svc_putu32(struct kvec *iov, u32 val)
+static inline void svc_putu32(struct iovec *iov, u32 val)
{
u32 *vp = iov->iov_base + iov->iov_len;
*vp = val;
xdr_argsize_check(struct svc_rqst *rqstp, u32 *p)
{
char *cp = (char *)p;
- struct kvec *vec = &rqstp->rq_arg.head[0];
+ struct iovec *vec = &rqstp->rq_arg.head[0];
return cp - (char*)vec->iov_base <= vec->iov_len;
}
static inline int
xdr_ressize_check(struct svc_rqst *rqstp, u32 *p)
{
- struct kvec *vec = &rqstp->rq_res.head[0];
+ struct iovec *vec = &rqstp->rq_res.head[0];
char *cp = (char*)p;
vec->iov_len = cp - (char*)vec->iov_base;
* operations and/or has a need for scatter/gather involving pages.
*/
struct xdr_buf {
- struct kvec head[1], /* RPC header + non-page data */
+ struct iovec head[1], /* RPC header + non-page data */
tail[1]; /* Appended after page data */
struct page ** pages; /* Array of contiguous pages */
}
/*
- * Adjust kvec to reflect end of xdr'ed data (RPC client XDR)
+ * Adjust iovec to reflect end of xdr'ed data (RPC client XDR)
*/
static inline int
-xdr_adjust_iovec(struct kvec *iov, u32 *p)
+xdr_adjust_iovec(struct iovec *iov, u32 *p)
{
return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base);
}
-void xdr_shift_iovec(struct kvec *, int, size_t);
+void xdr_shift_iovec(struct iovec *, int, size_t);
/*
* Maximum number of iov's we use.
/*
* XDR buffer helper functions
*/
-extern int xdr_kmap(struct kvec *, struct xdr_buf *, size_t);
+extern int xdr_kmap(struct iovec *, struct xdr_buf *, size_t);
extern void xdr_kunmap(struct xdr_buf *, size_t);
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void _copy_from_pages(char *, struct page **, size_t, size_t);
-extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
+extern void xdr_buf_from_iov(struct iovec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len);
struct xdr_buf *buf; /* XDR buffer to read/write */
uint32_t *end; /* end of available buffer space */
- struct kvec *iov; /* pointer to the current kvec */
+ struct iovec *iov; /* pointer to the current iovec */
};
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p);
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
/* linux/mm/oom_kill.c */
-extern void out_of_memory(int gfp_mask);
+extern void out_of_memory(void);
/* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma,
unsigned long addr);
-/* linux/mm/thrash.c */
-#ifdef CONFIG_SWAP
-extern struct mm_struct * swap_token_mm;
-extern void grab_swap_token(void);
-extern void __put_swap_token(struct mm_struct *);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
- if (has_swap_token(mm))
- __put_swap_token(mm);
-}
-#else /* CONFIG_SWAP */
-#define put_swap_token(x) do { } while(0)
-#define grab_swap_token do { } while(0)
-#define has_swap_token 0
-#endif /* CONFIG_SWAP */
/* linux/mm/swapfile.c */
extern long total_swap_pages;
VM_BLOCK_DUMP=24, /* block dump mode */
VM_HUGETLB_GROUP=25, /* permitted hugetlb group */
VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
- VM_LEGACY_VA_LAYOUT=27, /* legacy/compatibility virtual address space layout */
};
NET_DECNET=15,
NET_ECONET=16,
NET_SCTP=17,
- NET_TUX=18,
};
/* /proc/sys/kernel/random */
NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
};
-/* /proc/sys/net/tux/ */
-enum {
- NET_TUX_DOCROOT = 1,
- NET_TUX_LOGFILE = 2,
- NET_TUX_EXTCGI = 3,
- NET_TUX_STOP = 4,
- NET_TUX_CLIENTPORT = 5,
- NET_TUX_LOGGING = 6,
- NET_TUX_SERVERPORT = 7,
- NET_TUX_THREADS = 8,
- NET_TUX_KEEPALIVE_TIMEOUT = 9,
- NET_TUX_MAX_KEEPALIVE_BW = 10,
- NET_TUX_DEFER_ACCEPT = 11,
- NET_TUX_MAX_FREE_REQUESTS = 12,
- NET_TUX_MAX_CONNECT = 13,
- NET_TUX_MAX_BACKLOG = 14,
- NET_TUX_MODE_FORBIDDEN = 15,
- NET_TUX_MODE_ALLOWED = 16,
- NET_TUX_MODE_USERSPACE = 17,
- NET_TUX_MODE_CGI = 18,
- NET_TUX_CGI_UID = 19,
- NET_TUX_CGI_GID = 20,
- NET_TUX_CGIROOT = 21,
- NET_TUX_LOGENTRY_ALIGN_ORDER = 22,
- NET_TUX_NONAGLE = 23,
- NET_TUX_ACK_PINGPONG = 24,
- NET_TUX_PUSH_ALL = 25,
- NET_TUX_ZEROCOPY_PARSE = 26,
- NET_CONFIG_TUX_DEBUG_BLOCKING = 27,
- NET_TUX_PAGE_AGE_START = 28,
- NET_TUX_PAGE_AGE_ADV = 29,
- NET_TUX_PAGE_AGE_MAX = 30,
- NET_TUX_VIRTUAL_SERVER = 31,
- NET_TUX_MAX_OBJECT_SIZE = 32,
- NET_TUX_COMPRESSION = 33,
- NET_TUX_NOID = 34,
- NET_TUX_CGI_INHERIT_CPU = 35,
- NET_TUX_CGI_CPU_MASK = 36,
- NET_TUX_ZEROCOPY_HEADER = 37,
- NET_TUX_ZEROCOPY_SENDFILE = 38,
- NET_TUX_ALL_USERSPACE = 39,
- NET_TUX_REDIRECT_LOGGING = 40,
- NET_TUX_REFERER_LOGGING = 41,
- NET_TUX_MAX_HEADER_LEN = 42,
- NET_TUX_404_PAGE = 43,
- NET_TUX_MAX_KEEPALIVES = 44,
- NET_TUX_IGNORE_QUERY = 45,
-};
-
/* CTL_PROC names: */
/* CTL_FS names: */
void **context);
typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
extern int proc_dostring(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_bset(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_minmax(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_jiffies(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_dointvec_userhz_jiffies(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_doulongvec_minmax(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
- struct file *, void __user *, size_t *, loff_t *);
+ struct file *, void __user *, size_t *);
extern int do_sysctl (int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
#ifndef _SYSFS_H_
#define _SYSFS_H_
-#define SYSFS_SUPER_MAGIC 0x62656572
-
struct kobject;
struct module;
#include <linux/types.h>
struct task_delay_info {
-#if defined CONFIG_DELAY_ACCT
+#ifdef CONFIG_DELAY_ACCT
/* delay statistics in usecs */
uint64_t waitcpu_total;
uint64_t runcpu_total;
uint32_t runs;
uint32_t num_iowaits;
uint32_t num_memwaits;
-#endif
+#endif
};
#endif // _LINUX_TASKDELAYS_H
#ifndef CONFIG_ACCEPT_QUEUES
struct open_request *accept_queue_tail;
#endif
+
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2;
__u32 cnt; /* increase cwnd by 1 after this number of ACKs */
__u32 last_max_cwnd; /* last maximium snd_cwnd */
__u32 last_cwnd; /* the last snd_cwnd */
- __u32 last_stamp; /* time when updated last_cwnd */
} bictcp;
#ifdef CONFIG_ACCEPT_QUEUES
#include "vserver/context.h"
+// #define VX_DEBUG
+
+
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
#define vx_task_xid(t) ((t)->xid)
#define _VX_VS_CONTEXT_H
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
-#include "vserver/debug.h"
+
+#undef vxdprintk
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
extern int proc_pid_vx_info(struct task_struct *, char *);
{
if (!vxi)
return NULL;
- vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
+ vxdprintk("get_vx_info(%p[#%d.%d])\t%s:%d\n",
vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
_file, _line);
atomic_inc(&vxi->vx_usecnt);
{
if (!vxi)
return;
- vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
+ vxdprintk("put_vx_info(%p[#%d.%d])\t%s:%d\n",
vxi, vxi?vxi->vx_id:0, vxi?atomic_read(&vxi->vx_usecnt):0,
_file, _line);
if (atomic_dec_and_test(&vxi->vx_usecnt))
BUG_ON(*vxp);
if (!vxi)
return;
- vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d.%d])",
+ vxdprintk("set_vx_info(%p[#%d.%d.%d])\t%s:%d\n",
vxi, vxi?vxi->vx_id:0,
vxi?atomic_read(&vxi->vx_usecnt):0,
vxi?atomic_read(&vxi->vx_refcnt):0,
if (!vxo)
return;
- vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d.%d])",
+ vxdprintk("clr_vx_info(%p[#%d.%d.%d])\t%s:%d\n",
vxo, vxo?vxo->vx_id:0,
vxo?atomic_read(&vxo->vx_usecnt):0,
vxo?atomic_read(&vxo->vx_refcnt):0,
struct vx_info *vxi;
task_lock(p);
- vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
- p, _file, _line);
vxi = __get_vx_info(p->vx_info, _file, _line);
task_unlock(p);
return vxi;
}
+#undef vxdprintk
+#define vxdprintk(x...)
+
#else
#warning duplicate inclusion
#endif
#ifndef _VX_VS_CVIRT_H
#define _VX_VS_CVIRT_H
+
+// #define VX_DEBUG
+
#include "vserver/cvirt.h"
-#include "vserver/debug.h"
#include "vs_base.h"
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
/* utsname virtualization */
char *file, int line)
{
if (vxi && __vx_flags(vxi->vx_flags, VXF_INFO_INIT, 0)) {
- vxlprintk(VXD_CBIT(cvirt, 2),
- "vx_map_tgid: %p/%llx: %d -> %d",
+ vxdprintk("vx_map_tgid: %p/%llx: %d -> %d in %s:%d\n",
vxi, vxi->vx_flags, pid,
(pid == vxi->vx_initpid)?1:pid,
file, line);
char *file, int line)
{
if (vxi && __vx_flags(vxi->vx_flags, VXF_INFO_INIT, 0)) {
- vxlprintk(VXD_CBIT(cvirt, 2),
- "vx_rmap_tgid: %p/%llx: %d -> %d",
+ vxdprintk("vx_rmap_tgid: %p/%llx: %d -> %d in %s:%d\n",
vxi, vxi->vx_flags, pid,
(pid == 1)?vxi->vx_initpid:pid,
file, line);
return pid;
}
+#undef vxdprintk
+#define vxdprintk(x...)
#else
#warning duplicate inclusion
#ifndef _VX_VS_DLIMIT_H
#define _VX_VS_DLIMIT_H
+
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
#include "vserver/dlimit.h"
-#include "vserver/debug.h"
+
+#if defined(VX_DEBUG)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
#define get_dl_info(i) __get_dl_info(i,__FILE__,__LINE__)
{
if (!dli)
return NULL;
- vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
+ vxdprintk("get_dl_info(%p[#%d.%d])\t%s:%d\n",
dli, dli?dli->dl_xid:0, dli?atomic_read(&dli->dl_usecnt):0,
_file, _line);
atomic_inc(&dli->dl_usecnt);
#define put_dl_info(i) __put_dl_info(i,__FILE__,__LINE__)
-static inline void __put_dl_info(struct dl_info *dli,
- const char *_file, int _line)
+static inline void __put_dl_info(struct dl_info *dli, const char *_file, int _line)
{
if (!dli)
return;
- vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
+ vxdprintk("put_dl_info(%p[#%d.%d])\t%s:%d\n",
dli, dli?dli->dl_xid:0, dli?atomic_read(&dli->dl_usecnt):0,
_file, _line);
if (atomic_dec_and_test(&dli->dl_usecnt))
}
+extern int vx_debug_dlimit;
+
#define __dlimit_char(d) ((d)?'*':' ')
static inline int __dl_alloc_space(struct super_block *sb,
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 1),
- "ALLOC (%p,#%d)%c %lld bytes (%d)",
- sb, xid, __dlimit_char(dli), nr, ret, file, line);
+ if (vx_debug_dlimit)
+ printk("ALLOC (%p,#%d)%c %lld bytes (%d)@ %s:%d\n",
+ sb, xid, __dlimit_char(dli), nr, ret, file, line);
return ret;
}
static inline void __dl_free_space(struct super_block *sb,
- xid_t xid, dlsize_t nr, const char *_file, int _line)
+ xid_t xid, dlsize_t nr, const char *file, int line)
{
struct dl_info *dli = NULL;
goto out;
spin_lock(&dli->dl_lock);
- if (dli->dl_space_used > nr)
- dli->dl_space_used -= nr;
- else
- dli->dl_space_used = 0;
+ dli->dl_space_used -= nr;
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 1),
- "FREE (%p,#%d)%c %lld bytes",
- sb, xid, __dlimit_char(dli), nr, _file, _line);
+ if (vx_debug_dlimit)
+ printk("FREE (%p,#%d)%c %lld bytes @ %s:%d\n",
+ sb, xid, __dlimit_char(dli), nr, file, line);
}
static inline int __dl_alloc_inode(struct super_block *sb,
- xid_t xid, const char *_file, int _line)
+ xid_t xid, const char *file, int line)
{
struct dl_info *dli;
int ret = 0;
ret = (dli->dl_inodes_used >= dli->dl_inodes_total);
if (!ret)
dli->dl_inodes_used++;
-#if 0
- else
- printk("VSW: DLIMIT hit (%p,#%d), inode %d>=%d @ %s:%d\n",
- sb, xid,
- dli->dl_inodes_used, dli->dl_inodes_total,
- file, line);
-#endif
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 0),
- "ALLOC (%p,#%d)%c inode (%d)",
- sb, xid, __dlimit_char(dli), ret, _file, _line);
+ if (vx_debug_dlimit)
+ printk("ALLOC (%p,#%d)%c inode (%d)@ %s:%d\n",
+ sb, xid, __dlimit_char(dli), ret, file, line);
return ret;
}
static inline void __dl_free_inode(struct super_block *sb,
- xid_t xid, const char *_file, int _line)
+ xid_t xid, const char *file, int line)
{
struct dl_info *dli;
goto out;
spin_lock(&dli->dl_lock);
- if (dli->dl_inodes_used > 1)
- dli->dl_inodes_used--;
- else
- dli->dl_inodes_used = 0;
+ dli->dl_inodes_used--;
spin_unlock(&dli->dl_lock);
put_dl_info(dli);
out:
- vxlprintk(VXD_CBIT(dlim, 0),
- "FREE (%p,#%d)%c inode",
- sb, xid, __dlimit_char(dli), _file, _line);
+ if (vx_debug_dlimit)
+ printk("FREE (%p,#%d)%c inode @ %s:%d\n",
+ sb, xid, __dlimit_char(dli), file, line);
}
-static inline void __dl_adjust_block(struct super_block *sb, xid_t xid,
- unsigned int *free_blocks, unsigned int *root_blocks,
- const char *_file, int _line)
-{
- struct dl_info *dli;
- uint64_t broot, bfree;
-
- dli = locate_dl_info(sb, xid);
- if (!dli)
- return;
-
- spin_lock(&dli->dl_lock);
- broot = (dli->dl_space_total -
- (dli->dl_space_total >> 10) * dli->dl_nrlmult)
- >> sb->s_blocksize_bits;
- bfree = (dli->dl_space_total - dli->dl_space_used)
- >> sb->s_blocksize_bits;
- spin_unlock(&dli->dl_lock);
-
- vxlprintk(VXD_CBIT(dlim, 2),
- "ADJUST: %lld,%lld on %d,%d [mult=%d]",
- bfree, broot, *free_blocks, *root_blocks,
- dli->dl_nrlmult, _file, _line);
- if (free_blocks) {
- if (*free_blocks > bfree)
- *free_blocks = bfree;
- }
- if (root_blocks) {
- if (*root_blocks > broot)
- *root_blocks = broot;
- }
- put_dl_info(dli);
-}
#define DLIMIT_ALLOC_BLOCK(sb, xid, nr) \
__dl_free_inode(sb, xid, __FILE__, __LINE__ )
-#define DLIMIT_ADJUST_BLOCK(sb, xid, fb, rb) \
- __dl_adjust_block(sb, xid, fb, rb, __FILE__, __LINE__ )
+#define DLIMIT_ADJUST_BLOCK(sb, xid, fb, rb)
#else
#ifndef _VX_VS_LIMIT_H
#define _VX_VS_LIMIT_H
+
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
#include "vserver/limit.h"
-#include "vserver/debug.h"
/* file limits */
+#define VX_DEBUG_ACC_FILE 0
+#define VX_DEBUG_ACC_OPENFD 0
+
+#if (VX_DEBUG_ACC_FILE) || (VX_DEBUG_ACC_OPENFD)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
+
+#define vx_acc_cres(v,d,r) \
+ __vx_acc_cres((v), (r), (d), __FILE__, __LINE__)
static inline void __vx_acc_cres(struct vx_info *vxi,
- int res, int dir, void *_data, char *_file, int _line)
+ int res, int dir, char *file, int line)
{
- if (VXD_RLIMIT(res, RLIMIT_NOFILE) ||
- VXD_RLIMIT(res, RLIMIT_NPROC) ||
- VXD_RLIMIT(res, VLIMIT_NSOCK))
- vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5d%s (%p)",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
- (vxi?atomic_read(&vxi->limit.rcur[res]):0),
- (dir>0)?"++":"--", _data, _file, _line);
if (vxi) {
+ if ((res == RLIMIT_NOFILE && VX_DEBUG_ACC_FILE) ||
+ (res == RLIMIT_OPENFD && VX_DEBUG_ACC_OPENFD))
+ printk("vx_acc_cres[%5d,%2d]: %5d%s in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?atomic_read(&vxi->limit.rcur[res]):0),
+ (dir>0)?"++":"--", file, line);
if (dir > 0)
atomic_inc(&vxi->limit.rcur[res]);
else
}
}
-#define vx_acc_cres(v,d,p,r) \
- __vx_acc_cres((v), (r), (d), (p), __FILE__, __LINE__)
+#define vx_nproc_inc(p) vx_acc_cres(current->vx_info, 1, RLIMIT_NPROC)
+#define vx_nproc_dec(p) vx_acc_cres(current->vx_info,-1, RLIMIT_NPROC)
-#define vx_nproc_inc(p) \
- vx_acc_cres(current->vx_info, 1, (p), RLIMIT_NPROC)
-#define vx_nproc_dec(p) \
- vx_acc_cres(current->vx_info,-1, (p), RLIMIT_NPROC)
+#define vx_files_inc(f) vx_acc_cres(current->vx_info, 1, RLIMIT_NOFILE)
+#define vx_files_dec(f) vx_acc_cres(current->vx_info,-1, RLIMIT_NOFILE)
-#define vx_files_inc(f) \
- vx_acc_cres(current->vx_info, 1, (f), RLIMIT_NOFILE)
-#define vx_files_dec(f) \
- vx_acc_cres(current->vx_info,-1, (f), RLIMIT_NOFILE)
+#define vx_openfd_inc(f) vx_acc_cres(current->vx_info, 1, RLIMIT_OPENFD)
+#define vx_openfd_dec(f) vx_acc_cres(current->vx_info,-1, RLIMIT_OPENFD)
/*
#define vx_openfd_inc(f) do { \
__vx_cres_avail((v), (r), (n), __FILE__, __LINE__)
static inline int __vx_cres_avail(struct vx_info *vxi,
- int res, int num, char *_file, int _line)
+ int res, int num, char *file, int line)
{
unsigned long value;
- if (VXD_RLIMIT(res, RLIMIT_NOFILE) ||
- VXD_RLIMIT(res, RLIMIT_NPROC) ||
- VXD_RLIMIT(res, VLIMIT_NSOCK))
- vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
+ if ((res == RLIMIT_NOFILE && VX_DEBUG_ACC_FILE) ||
+ (res == RLIMIT_OPENFD && VX_DEBUG_ACC_OPENFD))
+ printk("vx_cres_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
(vxi?vxi->limit.rlim[res]:1),
(vxi?atomic_read(&vxi->limit.rcur[res]):0),
- num, _file, _line);
+ num, file, line);
if (!vxi)
return 1;
value = atomic_read(&vxi->limit.rcur[res]);
#define vx_files_avail(n) \
vx_cres_avail(current->vx_info, (n), RLIMIT_NOFILE)
+#define vx_openfd_avail(n) \
+ vx_cres_avail(current->vx_info, (n), RLIMIT_OPENFD)
+
/* socket limits */
-#define vx_sock_inc(s) \
- vx_acc_cres((s)->sk_vx_info, 1, (s), VLIMIT_NSOCK)
-#define vx_sock_dec(s) \
- vx_acc_cres((s)->sk_vx_info,-1, (s), VLIMIT_NSOCK)
+#define vx_sock_inc(f) vx_acc_cres(current->vx_info, 1, VLIMIT_SOCK)
+#define vx_sock_dec(f) vx_acc_cres(current->vx_info,-1, VLIMIT_SOCK)
#define vx_sock_avail(n) \
- vx_cres_avail(current->vx_info, (n), VLIMIT_NSOCK)
+ vx_cres_avail(current->vx_info, (n), VLIMIT_SOCK)
+
#else
#warning duplicate inclusion
#ifndef _VX_VS_MEMORY_H
#define _VX_VS_MEMORY_H
+
+// #define VX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/context.h"
#include "vserver/limit.h"
-#include "vserver/debug.h"
+#define VX_DEBUG_ACC_RSS 0
+#define VX_DEBUG_ACC_VM 0
+#define VX_DEBUG_ACC_VML 0
+
+#if (VX_DEBUG_ACC_RSS) || (VX_DEBUG_ACC_VM) || (VX_DEBUG_ACC_VML)
+#define vxdprintk(x...) printk("vxd: " x)
+#else
+#define vxdprintk(x...)
+#endif
+
#define vx_acc_page(m, d, v, r) \
__vx_acc_page(&(m->v), m->mm_vx_info, r, d, __FILE__, __LINE__)
static inline void __vx_acc_page(unsigned long *v, struct vx_info *vxi,
int res, int dir, char *file, int line)
{
- if (VXD_RLIMIT(res, RLIMIT_RSS) ||
- VXD_RLIMIT(res, RLIMIT_AS) ||
- VXD_RLIMIT(res, RLIMIT_MEMLOCK))
- vxlprintk(1, "vx_acc_page[%5d,%s,%2d]: %5d%s",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
- (vxi?atomic_read(&vxi->limit.rcur[res]):0),
- (dir?"++":"--"), file, line);
if (v) {
if (dir > 0)
++(*v);
__vx_acc_pages(&(m->v), m->mm_vx_info, r, p, __FILE__, __LINE__)
static inline void __vx_acc_pages(unsigned long *v, struct vx_info *vxi,
- int res, int pages, char *_file, int _line)
+ int res, int pages, char *file, int line)
{
- if (VXD_RLIMIT(res, RLIMIT_RSS) ||
- VXD_RLIMIT(res, RLIMIT_AS) ||
- VXD_RLIMIT(res, RLIMIT_MEMLOCK))
- vxlprintk(1, "vx_acc_pages[%5d,%s,%2d]: %5d += %5d",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
- (vxi?atomic_read(&vxi->limit.rcur[res]):0),
- pages, _file, _line);
+ if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) ||
+ (res == RLIMIT_AS && VX_DEBUG_ACC_VM) ||
+ (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))
+ vxdprintk("vx_acc_pages [%5d,%2d]: %5d += %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
+ (vxi?atomic_read(&vxi->limit.res[res]):0),
+ pages, file, line);
if (pages == 0)
return;
if (v)
#define vx_acc_rsspages(m,p) vx_acc_pages(m, p, rss, RLIMIT_RSS)
#define vx_pages_add(s,r,p) __vx_acc_pages(0, s, r, p, __FILE__, __LINE__)
-#define vx_pages_sub(s,r,p) vx_pages_add(s, r, -(p))
+#define vx_pages_sub(s,r,p) __vx_pages_add(s, r, -(p))
#define vx_vmpages_inc(m) vx_acc_vmpage(m, 1)
#define vx_vmpages_dec(m) vx_acc_vmpage(m,-1)
__vx_pages_avail((m)->mm_vx_info, (r), (p), __FILE__, __LINE__)
static inline int __vx_pages_avail(struct vx_info *vxi,
- int res, int pages, char *_file, int _line)
+ int res, int pages, char *file, int line)
{
unsigned long value;
- if (VXD_RLIMIT(res, RLIMIT_RSS) ||
- VXD_RLIMIT(res, RLIMIT_AS) ||
- VXD_RLIMIT(res, RLIMIT_MEMLOCK))
- vxlprintk(1, "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d",
- (vxi?vxi->vx_id:-1), vlimit_name[res], res,
+ if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) ||
+ (res == RLIMIT_AS && VX_DEBUG_ACC_VM) ||
+ (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))
+ printk("vx_pages_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n",
+ (vxi?vxi->vx_id:-1), res,
(vxi?vxi->limit.rlim[res]:1),
(vxi?atomic_read(&vxi->limit.rcur[res]):0),
- pages, _file, _line);
+ pages, file, line);
if (!vxi)
return 1;
value = atomic_read(&vxi->limit.rcur[res]);
#ifndef _NX_VS_NETWORK_H
#define _NX_VS_NETWORK_H
+
+// #define NX_DEBUG
+
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "vserver/network.h"
-#include "vserver/debug.h"
+
+#if defined(NX_DEBUG)
+#define nxdprintk(x...) printk("nxd: " x)
+#else
+#define nxdprintk(x...)
+#endif
extern int proc_pid_nx_info(struct task_struct *, char *);
{
if (!nxi)
return NULL;
- vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
+ nxdprintk("get_nx_info(%p[#%d.%d])\t%s:%d\n",
nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
_file, _line);
atomic_inc(&nxi->nx_usecnt);
}
-#define free_nx_info(i) \
- call_rcu(&i->nx_rcu, rcu_free_nx_info);
+#define free_nx_info(nxi) \
+ call_rcu(&nxi->nx_rcu, rcu_free_nx_info);
#define put_nx_info(i) __put_nx_info(i,__FILE__,__LINE__)
{
if (!nxi)
return;
- vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
+ nxdprintk("put_nx_info(%p[#%d.%d])\t%s:%d\n",
nxi, nxi?nxi->nx_id:0, nxi?atomic_read(&nxi->nx_usecnt):0,
_file, _line);
if (atomic_dec_and_test(&nxi->nx_usecnt))
BUG_ON(*nxp);
if (!nxi)
return;
- vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d.%d])",
+ nxdprintk("set_nx_info(%p[#%d.%d.%d])\t%s:%d\n",
nxi, nxi?nxi->nx_id:0,
nxi?atomic_read(&nxi->nx_usecnt):0,
nxi?atomic_read(&nxi->nx_refcnt):0,
if (!nxo)
return;
- vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d.%d])",
+ nxdprintk("clr_nx_info(%p[#%d.%d.%d])\t%s:%d\n",
nxo, nxo?nxo->nx_id:0,
nxo?atomic_read(&nxo->nx_usecnt):0,
nxo?atomic_read(&nxo->nx_refcnt):0,
task_lock(p);
nxi = __get_nx_info(p->nx_info, _file, _line);
- vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
- p, _file, _line);
task_unlock(p);
return nxi;
}
#define nx_weak_check(c,m) ((m) ? nx_check(c,m) : 1)
+#undef nxdprintk
+#define nxdprintk(x...)
+
#define __nx_flags(v,m,f) (((v) & (m)) ^ (f))
-#ifndef _VX_VS_SOCKET_H
-#define _VX_VS_SOCKET_H
+#ifndef _VX_VS_LIMIT_H
+#define _VX_VS_LIMIT_H
// #define VX_DEBUG
#include "vserver/context.h"
#include "vserver/network.h"
-#include "vserver/debug.h"
/* socket accounting */
#define VX_ATR_MASK 0x0F00
-struct rcu_head;
-
extern void rcu_free_vx_info(struct rcu_head *);
extern void unhash_vx_info(struct vx_info *);
-/* _VX_CVIRT_H defined below */
-
#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
#include <linux/utsname.h>
struct _vx_cvirt {
int max_threads;
+ unsigned int bias_cswtch;
struct timespec bias_idle;
+ struct timespec bias_tp;
uint64_t bias_jiffies;
struct new_utsname utsname;
{
uint64_t idle_jiffies = vx_idle_jiffies();
+ // new->virt.bias_cswtch = kstat.context_swtch;
cvirt->bias_jiffies = get_jiffies_64();
+
jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
+ do_posix_clock_monotonic_gettime(&cvirt->bias_tp);
down_read(&uts_sem);
cvirt->utsname = system_utsname;
static inline int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
{
int length = 0;
- length += sprintf(buffer + length,
- "BiasJiffies:\t%lld\n", (long long int)cvirt->bias_jiffies);
- length += sprintf(buffer + length,
- "SysName:\t%.*s\n"
- "NodeName:\t%.*s\n"
- "Release:\t%.*s\n"
- "Version:\t%.*s\n"
- "Machine:\t%.*s\n"
- "DomainName:\t%.*s\n"
- ,__NEW_UTS_LEN, cvirt->utsname.sysname
- ,__NEW_UTS_LEN, cvirt->utsname.nodename
- ,__NEW_UTS_LEN, cvirt->utsname.release
- ,__NEW_UTS_LEN, cvirt->utsname.version
- ,__NEW_UTS_LEN, cvirt->utsname.machine
- ,__NEW_UTS_LEN, cvirt->utsname.domainname
- );
return length;
}
+++ /dev/null
-#ifndef _VX_DEBUG_H
-#define _VX_DEBUG_H
-
-
-extern unsigned int vx_debug_switch;
-extern unsigned int vx_debug_xid;
-extern unsigned int vx_debug_nid;
-extern unsigned int vx_debug_net;
-extern unsigned int vx_debug_limit;
-extern unsigned int vx_debug_dlim;
-extern unsigned int vx_debug_cvirt;
-
-
-#define VXD_CBIT(n,m) (vx_debug_ ## n & (1 << (m)))
-#define VXD_CMIN(n,m) (vx_debug_ ## n > (m))
-#define VXD_MASK(n,m) (vx_debug_ ## n & (m))
-
-// #define VXD_HERE __FILE__, __LINE__
-
-
-#ifdef CONFIG_VSERVER_DEBUG
-
-#define VX_LOGLEVEL "vxD: "
-
-#define vxdprintk(c,f,x...) \
- do { \
- if (c) \
- printk(VX_LOGLEVEL f "\n", x); \
- } while (0)
-
-#define vxlprintk(c,f,x...) \
- do { \
- if (c) \
- printk(VX_LOGLEVEL f " @%s:%d\n", x); \
- } while (0)
-
-#else
-
-#define vxdprintk(x...) do { } while (0)
-#define vxlprintk(x...) do { } while (0)
-
-#endif
-
-
-
-#endif /* _VX_DEBUG_H */
unsigned int dl_nrlmult; /* non root limit mult */
};
-struct rcu_head;
-
extern void rcu_free_dl_info(struct rcu_head *);
extern void unhash_dl_info(struct dl_info *);
#define IATTR_IMMUTABLE 0x00040000
-#ifdef CONFIG_VSERVER_PROC_SECURE
+#ifdef CONFIG_PROC_SECURE
#define IATTR_PROC_DEFAULT ( IATTR_ADMIN | IATTR_HIDE )
#define IATTR_PROC_SYMLINK ( IATTR_ADMIN )
#else
extern int vc_get_iattr(uint32_t, void __user *);
extern int vc_set_iattr(uint32_t, void __user *);
-extern int vc_iattr_ioctl(struct dentry *de,
- unsigned int cmd,
- unsigned long arg);
-
#endif /* __KERNEL__ */
/* inode ioctls */
#define FIOC_GETXFLG _IOR('x', 5, long)
#define FIOC_SETXFLG _IOW('x', 6, long)
-#define FIOC_GETIATTR _IOR('x', 7, long)
-#define FIOC_SETIATTR _IOR('x', 8, long)
-
#endif /* _VX_INODE_H */
-/* _VX_LIMIT_H defined below */
-
#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
#include <asm/atomic.h>
/* context sub struct */
-#define NUM_LIMITS 20
+#define RLIMIT_OPENFD 12
+
+#define NUM_RLIMITS 16
-#define VLIMIT_NSOCK 16
+#define VLIMIT_SOCK 16
-extern const char *vlimit_name[NUM_LIMITS];
struct _vx_limit {
atomic_t ticks;
- unsigned long rlim[NUM_LIMITS]; /* Context limit */
- unsigned long rmax[NUM_LIMITS]; /* Context maximum */
- atomic_t rcur[NUM_LIMITS]; /* Current value */
- atomic_t lhit[NUM_LIMITS]; /* Limit hits */
+ unsigned long rlim[NUM_RLIMITS]; /* Context limit */
+ unsigned long rmax[NUM_RLIMITS]; /* Context maximum */
+ atomic_t rcur[NUM_RLIMITS]; /* Current value */
+ atomic_t lhit[NUM_RLIMITS]; /* Limit hits */
};
static inline void vx_info_init_limit(struct _vx_limit *limit)
{
int lim;
- for (lim=0; lim<NUM_LIMITS; lim++) {
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
limit->rlim[lim] = RLIM_INFINITY;
limit->rmax[lim] = 0;
atomic_set(&limit->rcur[lim], 0);
}
}
+extern unsigned int vx_debug_limit;
+
static inline void vx_info_exit_limit(struct _vx_limit *limit)
{
-#ifdef CONFIG_VSERVER_DEBUG
unsigned long value;
unsigned int lim;
- for (lim=0; lim<NUM_LIMITS; lim++) {
+ if (!vx_debug_limit)
+ return;
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
value = atomic_read(&limit->rcur[lim]);
if (value)
- printk("!!! limit: %p[%s,%d] = %ld on exit.\n",
- limit, vlimit_name[lim], lim, value);
+ printk("!!! limit: %p[%d] = %ld on exit.\n",
+ limit, lim, value);
}
-#endif
}
static inline void vx_limit_fixup(struct _vx_limit *limit)
unsigned long value;
unsigned int lim;
- for (lim=0; lim<NUM_LIMITS; lim++) {
+ for (lim=0; lim<NUM_RLIMITS; lim++) {
value = atomic_read(&limit->rcur[lim]);
if (value > limit->rmax[lim])
limit->rmax[lim] = value;
"VML" VX_LIMIT_FMT
"RSS" VX_LIMIT_FMT
"FILES" VX_LIMIT_FMT
- "SOCK" VX_LIMIT_FMT
+ "OFD" VX_LIMIT_FMT
VX_LIMIT_ARG(RLIMIT_NPROC)
VX_LIMIT_ARG(RLIMIT_AS)
VX_LIMIT_ARG(RLIMIT_MEMLOCK)
VX_LIMIT_ARG(RLIMIT_RSS)
VX_LIMIT_ARG(RLIMIT_NOFILE)
- VX_LIMIT_ARG(VLIMIT_NSOCK)
+ VX_LIMIT_ARG(RLIMIT_OPENFD)
);
}
#include "switch.h"
-#define VXD_RLIMIT(r,l) (VXD_CBIT(limit, (l)) && ((r) == (l)))
-
/* rlimit vserver commands */
#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0)
#endif /* _VX_LIMIT_H */
#endif
-
-
};
-struct rcu_head;
-
extern void rcu_free_nx_info(struct rcu_head *);
extern void unhash_nx_info(struct nx_info *);
-/* _VX_SCHED_H defined below */
-
#if defined(__KERNEL__) && defined(_VX_INFO_DEF_)
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#include <linux/cpumask.h>
#include <asm/atomic.h>
#include <asm/param.h>
+#include <linux/cpumask.h>
/* context sub struct */
/* interface version */
-#define VCI_VERSION 0x00010020
+#define VCI_VERSION 0x00010016
/* query version */
-#ifndef _VX_XID_H
-#define _VX_XID_H
-
-
-#define XID_TAG(in) (!(in) || \
- (((struct inode *)in)->i_sb && \
- (((struct inode *)in)->i_sb->s_flags & MS_TAGXID)))
-
+#ifndef _LINUX_XID_H_
+#define _LINUX_XID_H_
#ifdef CONFIG_INOXID_NONE
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0xFFFFFFFF
-#define INOXID_XID(tag, uid, gid, xid) (0)
+#define INOXID_XID(uid, gid, xid) (0)
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (gid)
#endif
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0x0000FFFF
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (((gid) >> 16) & 0xFFFF) : 0)
+#define INOXID_XID(uid, gid, xid) (((gid) >> 16) & 0xFFFF)
+
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (((gid) & 0xFFFF) | ((xid) << 16))
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) \
- ((tag) ? (((gid) & 0xFFFF) | ((xid) << 16)) : (gid))
#endif
-#ifdef CONFIG_INOXID_UGID24
+#ifdef CONFIG_INOXID_GID24
#define MAX_UID 0x00FFFFFF
#define MAX_GID 0x00FFFFFF
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
+#define INOXID_XID(uid, gid, xid) ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF))
-#define XIDINO_UID(tag, uid, xid) \
- ((tag) ? (((uid) & 0xFFFFFF) | (((xid) & 0xFF00) << 16)) : (uid))
-#define XIDINO_GID(tag, gid, xid) \
- ((tag) ? (((gid) & 0xFFFFFF) | (((xid) & 0x00FF) << 24)) : (gid))
+#define XIDINO_UID(uid, xid) (((uid) & 0xFFFFFF) | (((xid) & 0xFF00) << 16))
+#define XIDINO_GID(gid, xid) (((gid) & 0xFFFFFF) | (((xid) & 0x00FF) << 24))
#endif
-#ifdef CONFIG_INOXID_UID16
-
-#define MAX_UID 0x0000FFFF
-#define MAX_GID 0xFFFFFFFF
-
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? ((uid) >> 16) & 0xFFFF) : 0)
-
-#define XIDINO_UID(tag, uid, xid) \
- ((tag) ? (((uid) & 0xFFFF) | ((xid) << 16)) : (uid))
-#define XIDINO_GID(tag, gid, xid) (gid)
-
-#endif
-
-
-#ifdef CONFIG_INOXID_INTERN
+#ifdef CONFIG_INOXID_GID32
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0xFFFFFFFF
-#define INOXID_XID(tag, uid, gid, xid) \
- ((tag) ? (xid) : 0)
+#define INOXID_XID(uid, gid, xid) (xid)
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (gid)
#endif
#define MAX_UID 0xFFFFFFFF
#define MAX_GID 0xFFFFFFFF
-#define INOXID_XID(tag, uid, gid, xid) (0)
+#define INOXID_XID(uid, gid, xid) (0)
-#define XIDINO_UID(tag, uid, xid) (uid)
-#define XIDINO_GID(tag, gid, xid) (gid)
+#define XIDINO_UID(uid, xid) (uid)
+#define XIDINO_GID(gid, xid) (gid)
#endif
-#define INOXID_UID(tag, uid, gid) \
- ((tag) ? ((uid) & MAX_UID) : (uid))
-#define INOXID_GID(tag, uid, gid) \
- ((tag) ? ((gid) & MAX_GID) : (gid))
-
+#define INOXID_UID(uid, gid) ((uid) & MAX_UID)
+#define INOXID_GID(uid, gid) ((gid) & MAX_GID)
static inline uid_t vx_map_uid(uid_t uid)
{
#define FIOC_SETXIDJ _IOW('x', 3, long)
#endif
-#endif /* _VX_XID_H */
+#endif /* _LINUX_XID_H_ */
void unblank_screen(void);
void poke_blanked_console(void);
int con_font_op(int currcons, struct console_font_op *op);
-int con_font_set(int currcons, struct console_font_op *op);
-int con_font_get(int currcons, struct console_font_op *op);
-int con_font_default(int currcons, struct console_font_op *op);
-int con_font_copy(int currcons, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
int con_get_cmap(unsigned char __user *cmap);
void scrollback(int);
struct ctl_table;
struct file;
int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
struct mtd_oob_buf {
uint32_t start;
uint32_t length;
- unsigned char __user *ptr;
+ unsigned char *ptr;
};
#define MTD_ABSENT 0
inet6_ifa_finish_destroy(ifp);
}
-#define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt)
-#define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt)
+#define __in6_ifa_put(idev) atomic_dec(&(idev)->refcnt)
+#define in6_ifa_hold(idev) atomic_inc(&(idev)->refcnt)
extern void addrconf_forwarding_on(void);
* This will include the IEEE address token on links that support it.
*/
- word = addr->s6_addr32[2] ^ addr->s6_addr32[3];
- word ^= (word >> 16);
+ word = addr->s6_addr[2] ^ addr->s6_addr32[3];
+ word ^= (word>>16);
word ^= (word >> 8);
return ((word ^ (word >> 4)) & 0x0f);
__u8 pscan_period_mode;
__u8 dev_class[3];
__u16 clock_offset;
- __s8 rssi;
+ __u8 rssi;
} __attribute__ ((packed));
#define HCI_EV_CONN_COMPLETE 0x03
#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field)
#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field)
#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field)
+#define ICMP_INC_STATS_FIELD(offt) \
+ (*((unsigned long *) ((void *) \
+ per_cpu_ptr(icmp_statistics[!in_softirq()],\
+ smp_processor_id()) + offt)))++
+#define ICMP_INC_STATS_BH_FIELD(offt) \
+ (*((unsigned long *) ((void *) \
+ per_cpu_ptr(icmp_statistics[0], \
+ smp_processor_id()) + offt)))++
+#define ICMP_INC_STATS_USER_FIELD(offt) \
+ (*((unsigned long *) ((void *) \
+ per_cpu_ptr(icmp_statistics[1], \
+ smp_processor_id()) + offt)))++
extern void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info);
extern int icmp_rcv(struct sk_buff *skb);
#include <linux/ip.h>
-enum {
- INET_ECN_NOT_ECT = 0,
- INET_ECN_ECT_1 = 1,
- INET_ECN_ECT_0 = 2,
- INET_ECN_CE = 3,
- INET_ECN_MASK = 3,
-};
-
static inline int INET_ECN_is_ce(__u8 dsfield)
{
- return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
+ return (dsfield&3) == 3;
}
static inline int INET_ECN_is_not_ce(__u8 dsfield)
{
- return (dsfield & INET_ECN_MASK) == INET_ECN_ECT_0;
+ return (dsfield&3) == 2;
}
static inline int INET_ECN_is_capable(__u8 dsfield)
{
- return (dsfield & INET_ECN_ECT_0);
+ return (dsfield&2);
}
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
- outer &= ~INET_ECN_MASK;
+ outer &= ~3;
if (INET_ECN_is_capable(inner))
- outer |= (inner & INET_ECN_MASK);
+ outer |= (inner & 3);
return outer;
}
-#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define INET_ECN_dontxmit(sk) \
- do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= 2; } while (0)
+#define INET_ECN_dontxmit(sk) do { inet_sk(sk)->tos &= ~3; } while (0)
-#define IP6_ECN_flow_init(label) do { \
- (label) &= ~htonl(INET_ECN_MASK << 20); \
+#define IP6_ECN_flow_init(label) do { \
+ (label) &= ~htonl(3<<20); \
} while (0)
-#define IP6_ECN_flow_xmit(sk, label) do { \
- if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
- (label) |= __constant_htons(INET_ECN_ECT_0 << 4); \
+#define IP6_ECN_flow_xmit(sk, label) do { \
+ if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
+ (label) |= __constant_htons(2 << 4); \
} while (0)
static inline void IP_ECN_set_ce(struct iphdr *iph)
u32 check = iph->check;
check += __constant_htons(0xFFFE);
iph->check = check + (check>=0xFFFF);
- iph->tos |= INET_ECN_CE;
+ iph->tos |= 1;
}
static inline void IP_ECN_clear(struct iphdr *iph)
{
- iph->tos &= ~INET_ECN_MASK;
+ iph->tos &= ~3;
}
struct ipv6hdr;
static inline void IP6_ECN_set_ce(struct ipv6hdr *iph)
{
- *(u32*)iph |= htonl(INET_ECN_CE << 20);
+ *(u32*)iph |= htonl(1<<20);
}
static inline void IP6_ECN_clear(struct ipv6hdr *iph)
{
- *(u32*)iph &= ~htonl(INET_ECN_MASK << 20);
+ *(u32*)iph &= ~htonl(3<<20);
}
#define ip6_get_dsfield(iph) ((ntohs(*(u16*)(iph)) >> 4) & 0xFF)
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
-/* datagram.c */
-extern int ip4_datagram_connect(struct sock *sk,
- struct sockaddr *uaddr, int addr_len);
/*
* Map a multicast IP onto multicast MAC for type Token Ring.
}
struct ip_reply_arg {
- struct kvec iov[1];
- u32 csum;
- int csumoffset; /* u16 offset of csum in iov[0].iov_base */
- /* -1 if not needed */
+ struct iovec iov[1];
+ u32 csum;
+ int csumoffset; /* u16 offset of csum in iov[0].iov_base */
+ /* -1 if not needed */
};
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
*/
int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ size_t *lenp);
int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
/*
* Store a destination cache entry in a socket
+ * For UDP/RAW sockets this is done on udp_connect.
*/
+
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct in6_addr *daddr)
{
extern void ipv6_packet_cleanup(void);
-extern int ip6_datagram_connect(struct sock *sk,
- struct sockaddr *addr, int addr_len);
-
extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, u16 port,
u32 info, u8 *payload);
return(irlap_is_primary(self->lsap->lap->irlap));
}
+extern struct irttp_cb *irttp;
+
#endif /* IRTTP_H */
int write,
struct file * filp,
void __user *buffer,
- size_t *lenp,
- loff_t *ppos);
+ size_t *lenp);
#endif
extern void inet6_ifinfo_notify(int event,
* nr_node & nr_neigh lists, refcounting and locking
*********************************************************************/
+extern struct hlist_head nr_node_list;
+extern struct hlist_head nr_neigh_list;
+
#define nr_node_hold(__nr_node) \
atomic_inc(&((__nr_node)->refcount))
#ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H
+#define PSCHED_GETTIMEOFDAY 1
+#define PSCHED_JIFFIES 2
+#define PSCHED_CPU 3
+
+#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
+
#include <linux/config.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
+#ifdef CONFIG_X86_TSC
+#include <asm/msr.h>
+#endif
+
+
struct rtattr;
struct Qdisc;
#define TCQ_F_BUILTIN 1
#define TCQ_F_THROTTLED 2
#define TCQ_F_INGRES 4
- int padded;
struct Qdisc_ops *ops;
+ struct Qdisc *next;
u32 handle;
atomic_t refcnt;
struct sk_buff_head q;
struct net_device *dev;
- struct list_head list;
struct tc_stats stats;
spinlock_t *stats_lock;
* and it will live until better solution will be invented.
*/
struct Qdisc *__parent;
-};
-
-#define QDISC_ALIGN 32
-#define QDISC_ALIGN_CONST (QDISC_ALIGN - 1)
-static inline void *qdisc_priv(struct Qdisc *q)
-{
- return (char *)q + ((sizeof(struct Qdisc) + QDISC_ALIGN_CONST)
- & ~QDISC_ALIGN_CONST);
-}
+ char data[0];
+};
struct qdisc_rate_table
{
int refcnt;
};
-extern void qdisc_lock_tree(struct net_device *dev);
-extern void qdisc_unlock_tree(struct net_device *dev);
+static inline void sch_tree_lock(struct Qdisc *q)
+{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&q->dev->queue_lock);
+}
+
+static inline void sch_tree_unlock(struct Qdisc *q)
+{
+ spin_unlock_bh(&q->dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
+}
+
+static inline void tcf_tree_lock(struct tcf_proto *tp)
+{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&tp->q->dev->queue_lock);
+}
+
+static inline void tcf_tree_unlock(struct tcf_proto *tp)
+{
+ spin_unlock_bh(&tp->q->dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
+}
+
+
+static inline unsigned long
+cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl)
+{
+ unsigned long old_cl;
-#define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
-#define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
-#define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
-#define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
+ tcf_tree_lock(tp);
+ old_cl = *clp;
+ *clp = cl;
+ tcf_tree_unlock(tp);
+ return old_cl;
+}
-#define cls_set_class(tp, clp, cl) tcf_set_class(tp, clp, cl)
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
The reason is that, when it is not the same thing as
gettimeofday, it returns invalid timestamp, which is
not updated, when net_bh is active.
+
+ So, use PSCHED_CLOCK_SOURCE = PSCHED_CPU on alpha and pentiums
+ with rtdsc. And PSCHED_JIFFIES on all other architectures, including [34]86
+ and pentiums without rtdsc.
+ You can use PSCHED_GETTIMEOFDAY on another architectures,
+ which have fast and precise clock source, but it is too expensive.
*/
/* General note about internal clock.
Any clock source returns time intervals, measured in units
- close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
+ close to 1usec. With source PSCHED_GETTIMEOFDAY it is precisely
microseconds, otherwise something close but different chosen to minimize
arithmetic cost. Ratio usec/internal untis in form nominator/denominator
may be read from /proc/net/psched.
*/
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
typedef struct timeval psched_time_t;
typedef long psched_tdiff_t;
#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#else /* PSCHED_CLOCK_SOURCE != PSCHED_GETTIMEOFDAY */
typedef u64 psched_time_t;
typedef long psched_tdiff_t;
-#ifdef CONFIG_NET_SCH_CLK_JIFFIES
+extern psched_time_t psched_time_base;
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
#if HZ < 96
#define PSCHED_JSCALE 14
#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
-#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
-#ifdef CONFIG_NET_SCH_CLK_CPU
-#include <asm/timex.h>
+#elif PSCHED_CLOCK_SOURCE == PSCHED_CPU
extern psched_tdiff_t psched_clock_per_hz;
extern int psched_clock_scale;
-extern psched_time_t psched_time_base;
-extern cycles_t psched_time_mark;
-
-#define PSCHED_GET_TIME(stamp) \
-do { \
- cycles_t cur = get_cycles(); \
- if (sizeof(cycles_t) == sizeof(u32)) { \
- if (cur <= psched_time_mark) \
- psched_time_base += 0x100000000ULL; \
- psched_time_mark = cur; \
- (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
- } else { \
- (stamp) = cur>>psched_clock_scale; \
- } \
-} while (0)
+
#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
-#endif /* CONFIG_NET_SCH_CLK_CPU */
+#ifdef CONFIG_X86_TSC
+
+#define PSCHED_GET_TIME(stamp) \
+({ u64 __cur; \
+ rdtscll(__cur); \
+ (stamp) = __cur>>psched_clock_scale; \
+})
+
+#elif defined (__alpha__)
+
+#define PSCHED_WATCHER u32
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+extern PSCHED_WATCHER psched_time_mark;
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#define PSCHED_GET_TIME(stamp) \
+({ u32 __res; \
+ __asm__ __volatile__ ("rpcc %0" : "r="(__res)); \
+ if (__res <= psched_time_mark) psched_time_base += 0x100000000UL; \
+ psched_time_mark = __res; \
+ (stamp) = (psched_time_base + __res)>>psched_clock_scale; \
+})
+
+#else
+
+#error PSCHED_CLOCK_SOURCE=PSCHED_CPU is not supported on this arch.
+
+#endif /* ARCH */
+
+#endif /* PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES */
+
+#endif /* PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY */
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#define PSCHED_TDIFF(tv1, tv2) \
({ \
int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#else
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
#define PSCHED_AUDIT_TDIFF(t)
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#endif
struct tcf_police
{
extern int tcf_act_police(struct sk_buff **skb, struct tc_action *a);
#endif
-extern unsigned long tcf_set_class(struct tcf_proto *tp, unsigned long *clp,
- unsigned long cl);
extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st, spinlock_t *lock);
extern void tcf_police_destroy(struct tcf_police *p);
extern int qdisc_restart(struct net_device *dev);
+static inline void qdisc_run(struct net_device *dev)
+{
+ while (!netif_queue_stopped(dev) &&
+ qdisc_restart(dev)<0)
+ /* NOTHING */;
+}
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
#include <linux/route.h>
#include <linux/ip.h>
#include <linux/cache.h>
-#include <linux/vs_base.h>
-#include <linux/vs_context.h>
-#include <linux/vs_network.h>
#ifndef __KERNEL__
#warning This file is not supposed to be used outside of kernel.
SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
- SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
- SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
- SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
SCTP_CMD_LAST
} sctp_verb_t;
SCTP_IERROR_BAD_TAG,
SCTP_IERROR_BIG_GAP,
SCTP_IERROR_DUP_TSN,
- SCTP_IERROR_HIGH_TSN,
- SCTP_IERROR_IGNORE_TSN,
- SCTP_IERROR_NO_DATA,
- SCTP_IERROR_BAD_STREAM,
} sctp_ierror_t;
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
-int sctp_eat_data(const struct sctp_association *asoc,
- struct sctp_chunk *chunk,
- sctp_cmd_seq_t *commands);
/* 3rd level prototypes */
__u32 sctp_generate_tag(const struct sctp_endpoint *);
#define _SNMP_H
#include <linux/cache.h>
-#include <linux/snmp.h>
-
-/*
- * Mibs are stored in array of unsigned long.
- */
+
/*
- * struct snmp_mib{}
- * - list of entries for particular API (such as /proc/net/snmp)
- * - name of entries.
+ * We use all unsigned longs. Linux will soon be so reliable that even these
+ * will rapidly get too small 8-). Seriously consider the IpInReceives count
+ * on the 20Gb/s + networks people expect in a few years time!
*/
-struct snmp_mib {
+
+/*
+ * The rule for padding:
+ * Best is power of two because then the right structure can be found by a simple
+ * shift. The structure should be always cache line aligned.
+ * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add instructions
+ * to emulate multiply in case it is not power-of-two. Currently n is always <=3 for
+ * all sizes so simple cache line alignment is enough.
+ *
+ * The best solution would be a global CPU local area , especially on 64 and 128byte
+ * cacheline machine it makes a *lot* of sense -AK
+ */
+
+struct snmp_item {
char *name;
- int entry;
+ int offset;
};
-#define SNMP_MIB_ITEM(_name,_entry) { \
- .name = _name, \
- .entry = _entry, \
+#define SNMP_ITEM(mib,entry,procname) { \
+ .name = procname, \
+ .offset = offsetof(mib, entry), \
}
-#define SNMP_MIB_SENTINEL { \
- .name = NULL, \
- .entry = 0, \
+#define SNMP_ITEM_SENTINEL { \
+ .name = NULL, \
+ .offset = 0, \
}
/*
- * We use all unsigned longs. Linux will soon be so reliable that even
- * these will rapidly get too small 8-). Seriously consider the IpInReceives
- * count on the 20Gb/s + networks people expect in a few years time!
+ * RFC 1213: MIB-II
+ * RFC 2011 (updates 1213): SNMPv2-MIB-IP
+ * RFC 2863: Interfaces Group MIB
+ * RFC 2465: IPv6 MIB: General Group
+ * draft-ietf-ipv6-rfc2011-update-10.txt: MIB for IP: IP Statistics Tables
*/
+struct ipstats_mib
+{
+ unsigned long InReceives;
+ unsigned long InHdrErrors;
+ unsigned long InTooBigErrors;
+ unsigned long InNoRoutes;
+ unsigned long InAddrErrors;
+ unsigned long InUnknownProtos;
+ unsigned long InTruncatedPkts;
+ unsigned long InDiscards;
+ unsigned long InDelivers;
+ unsigned long OutForwDatagrams;
+ unsigned long OutRequests;
+ unsigned long OutDiscards;
+ unsigned long OutNoRoutes;
+ unsigned long ReasmTimeout;
+ unsigned long ReasmReqds;
+ unsigned long ReasmOKs;
+ unsigned long ReasmFails;
+ unsigned long FragOKs;
+ unsigned long FragFails;
+ unsigned long FragCreates;
+ unsigned long InMcastPkts;
+ unsigned long OutMcastPkts;
+ unsigned long __pad[0];
+};
+
+/*
+ * RFC 1213: MIB-II ICMP Group
+ * RFC 2011 (updates 1213): SNMPv2 MIB for IP: ICMP group
+ */
+struct icmp_mib
+{
+ unsigned long IcmpInMsgs;
+ unsigned long IcmpInErrors;
+ unsigned long IcmpInDestUnreachs;
+ unsigned long IcmpInTimeExcds;
+ unsigned long IcmpInParmProbs;
+ unsigned long IcmpInSrcQuenchs;
+ unsigned long IcmpInRedirects;
+ unsigned long IcmpInEchos;
+ unsigned long IcmpInEchoReps;
+ unsigned long IcmpInTimestamps;
+ unsigned long IcmpInTimestampReps;
+ unsigned long IcmpInAddrMasks;
+ unsigned long IcmpInAddrMaskReps;
+ unsigned long IcmpOutMsgs;
+ unsigned long IcmpOutErrors;
+ unsigned long IcmpOutDestUnreachs;
+ unsigned long IcmpOutTimeExcds;
+ unsigned long IcmpOutParmProbs;
+ unsigned long IcmpOutSrcQuenchs;
+ unsigned long IcmpOutRedirects;
+ unsigned long IcmpOutEchos;
+ unsigned long IcmpOutEchoReps;
+ unsigned long IcmpOutTimestamps;
+ unsigned long IcmpOutTimestampReps;
+ unsigned long IcmpOutAddrMasks;
+ unsigned long IcmpOutAddrMaskReps;
+ unsigned long dummy;
+ unsigned long __pad[0];
+};
-/*
- * The rule for padding:
- * Best is power of two because then the right structure can be found by a
- * simple shift. The structure should be always cache line aligned.
- * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
- * instructions to emulate multiply in case it is not power-of-two.
- * Currently n is always <=3 for all sizes so simple cache line alignment
- * is enough.
- *
- * The best solution would be a global CPU local area , especially on 64
- * and 128byte cacheline machine it makes a *lot* of sense -AK
- */
+/*
+ * RFC 2466: ICMPv6-MIB
+ */
+struct icmpv6_mib
+{
+ unsigned long Icmp6InMsgs;
+ unsigned long Icmp6InErrors;
+
+ unsigned long Icmp6InDestUnreachs;
+ unsigned long Icmp6InPktTooBigs;
+ unsigned long Icmp6InTimeExcds;
+ unsigned long Icmp6InParmProblems;
+
+ unsigned long Icmp6InEchos;
+ unsigned long Icmp6InEchoReplies;
+ unsigned long Icmp6InGroupMembQueries;
+ unsigned long Icmp6InGroupMembResponses;
+ unsigned long Icmp6InGroupMembReductions;
+ unsigned long Icmp6InRouterSolicits;
+ unsigned long Icmp6InRouterAdvertisements;
+ unsigned long Icmp6InNeighborSolicits;
+ unsigned long Icmp6InNeighborAdvertisements;
+ unsigned long Icmp6InRedirects;
+
+ unsigned long Icmp6OutMsgs;
+
+ unsigned long Icmp6OutDestUnreachs;
+ unsigned long Icmp6OutPktTooBigs;
+ unsigned long Icmp6OutTimeExcds;
+ unsigned long Icmp6OutParmProblems;
+
+ unsigned long Icmp6OutEchoReplies;
+ unsigned long Icmp6OutRouterSolicits;
+ unsigned long Icmp6OutNeighborSolicits;
+ unsigned long Icmp6OutNeighborAdvertisements;
+ unsigned long Icmp6OutRedirects;
+ unsigned long Icmp6OutGroupMembResponses;
+ unsigned long Icmp6OutGroupMembReductions;
+ unsigned long __pad[0];
+};
+
+/*
+ * RFC 1213: MIB-II TCP group
+ * RFC 2012 (updates 1213): SNMPv2-MIB-TCP
+ */
+struct tcp_mib
+{
+ unsigned long TcpRtoAlgorithm;
+ unsigned long TcpRtoMin;
+ unsigned long TcpRtoMax;
+ unsigned long TcpMaxConn;
+ unsigned long TcpActiveOpens;
+ unsigned long TcpPassiveOpens;
+ unsigned long TcpAttemptFails;
+ unsigned long TcpEstabResets;
+ unsigned long TcpCurrEstab;
+ unsigned long TcpInSegs;
+ unsigned long TcpOutSegs;
+ unsigned long TcpRetransSegs;
+ unsigned long TcpInErrs;
+ unsigned long TcpOutRsts;
+ unsigned long __pad[0];
+};
+
+/*
+ * RFC 1213: MIB-II UDP group
+ * RFC 2013 (updates 1213): SNMPv2-MIB-UDP
+ */
+struct udp_mib
+{
+ unsigned long UdpInDatagrams;
+ unsigned long UdpNoPorts;
+ unsigned long UdpInErrors;
+ unsigned long UdpOutDatagrams;
+ unsigned long __pad[0];
+};
+
+/* draft-ietf-sigtran-sctp-mib-07.txt */
+struct sctp_mib
+{
+ unsigned long SctpCurrEstab;
+ unsigned long SctpActiveEstabs;
+ unsigned long SctpPassiveEstabs;
+ unsigned long SctpAborteds;
+ unsigned long SctpShutdowns;
+ unsigned long SctpOutOfBlues;
+ unsigned long SctpChecksumErrors;
+ unsigned long SctpOutCtrlChunks;
+ unsigned long SctpOutOrderChunks;
+ unsigned long SctpOutUnorderChunks;
+ unsigned long SctpInCtrlChunks;
+ unsigned long SctpInOrderChunks;
+ unsigned long SctpInUnorderChunks;
+ unsigned long SctpFragUsrMsgs;
+ unsigned long SctpReasmUsrMsgs;
+ unsigned long SctpOutSCTPPacks;
+ unsigned long SctpInSCTPPacks;
+ unsigned long SctpRtoAlgorithm;
+ unsigned long SctpRtoMin;
+ unsigned long SctpRtoMax;
+ unsigned long SctpRtoInitial;
+ unsigned long SctpValCookieLife;
+ unsigned long SctpMaxInitRetr;
+ unsigned long __pad[0];
+};
-#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
-
-/* IPstats */
-#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
-struct ipstats_mib {
- unsigned long mibs[IPSTATS_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* ICMP */
-#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
-#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
-
-struct icmp_mib {
- unsigned long mibs[ICMP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* ICMP6 (IPv6-ICMP) */
-#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
-struct icmpv6_mib {
- unsigned long mibs[ICMP6_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* TCP */
-#define TCP_MIB_MAX __TCP_MIB_MAX
-struct tcp_mib {
- unsigned long mibs[TCP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* UDP */
-#define UDP_MIB_MAX __UDP_MIB_MAX
-struct udp_mib {
- unsigned long mibs[UDP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* SCTP */
-#define SCTP_MIB_MAX __SCTP_MIB_MAX
-struct sctp_mib {
- unsigned long mibs[SCTP_MIB_MAX];
-} __SNMP_MIB_ALIGN__;
-
-/* Linux */
-#define LINUX_MIB_MAX __LINUX_MIB_MAX
-struct linux_mib {
- unsigned long mibs[LINUX_MIB_MAX];
+struct linux_mib
+{
+ unsigned long SyncookiesSent;
+ unsigned long SyncookiesRecv;
+ unsigned long SyncookiesFailed;
+ unsigned long EmbryonicRsts;
+ unsigned long PruneCalled;
+ unsigned long RcvPruned;
+ unsigned long OfoPruned;
+ unsigned long OutOfWindowIcmps;
+ unsigned long LockDroppedIcmps;
+ unsigned long ArpFilter;
+ unsigned long TimeWaited;
+ unsigned long TimeWaitRecycled;
+ unsigned long TimeWaitKilled;
+ unsigned long PAWSPassiveRejected;
+ unsigned long PAWSActiveRejected;
+ unsigned long PAWSEstabRejected;
+ unsigned long DelayedACKs;
+ unsigned long DelayedACKLocked;
+ unsigned long DelayedACKLost;
+ unsigned long ListenOverflows;
+ unsigned long ListenDrops;
+ unsigned long TCPPrequeued;
+ unsigned long TCPDirectCopyFromBacklog;
+ unsigned long TCPDirectCopyFromPrequeue;
+ unsigned long TCPPrequeueDropped;
+ unsigned long TCPHPHits;
+ unsigned long TCPHPHitsToUser;
+ unsigned long TCPPureAcks;
+ unsigned long TCPHPAcks;
+ unsigned long TCPRenoRecovery;
+ unsigned long TCPSackRecovery;
+ unsigned long TCPSACKReneging;
+ unsigned long TCPFACKReorder;
+ unsigned long TCPSACKReorder;
+ unsigned long TCPRenoReorder;
+ unsigned long TCPTSReorder;
+ unsigned long TCPFullUndo;
+ unsigned long TCPPartialUndo;
+ unsigned long TCPDSACKUndo;
+ unsigned long TCPLossUndo;
+ unsigned long TCPLoss;
+ unsigned long TCPLostRetransmit;
+ unsigned long TCPRenoFailures;
+ unsigned long TCPSackFailures;
+ unsigned long TCPLossFailures;
+ unsigned long TCPFastRetrans;
+ unsigned long TCPForwardRetrans;
+ unsigned long TCPSlowStartRetrans;
+ unsigned long TCPTimeouts;
+ unsigned long TCPRenoRecoveryFail;
+ unsigned long TCPSackRecoveryFail;
+ unsigned long TCPSchedulerFailed;
+ unsigned long TCPRcvCollapsed;
+ unsigned long TCPDSACKOldSent;
+ unsigned long TCPDSACKOfoSent;
+ unsigned long TCPDSACKRecv;
+ unsigned long TCPDSACKOfoRecv;
+ unsigned long TCPAbortOnSyn;
+ unsigned long TCPAbortOnData;
+ unsigned long TCPAbortOnClose;
+ unsigned long TCPAbortOnMemory;
+ unsigned long TCPAbortOnTimeout;
+ unsigned long TCPAbortOnLinger;
+ unsigned long TCPAbortFailed;
+ unsigned long TCPMemoryPressures;
+ unsigned long __pad[0];
};
/*
- * FIXME: On x86 and some other CPUs the split into user and softirq parts
- * is not needed because addl $1,memory is atomic against interrupts (but
- * atomic_inc would be overkill because of the lock cycles). Wants new
- * nonlocked_atomic_inc() primitives -AK
+ * FIXME: On x86 and some other CPUs the split into user and softirq parts is not needed because
+ * addl $1,memory is atomic against interrupts (but atomic_inc would be overkill because of the lock
+ * cycles). Wants new nonlocked_atomic_inc() primitives -AK
*/
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define SNMP_STAT_USRPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
- (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field]++)
+ (per_cpu_ptr(mib[0], smp_processor_id())->field++)
#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
- (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field + (offset)]++)
+ ((*((&per_cpu_ptr(mib[0], smp_processor_id())->field) + (offset)))++)
#define SNMP_INC_STATS_USER(mib, field) \
- (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field]++)
+ (per_cpu_ptr(mib[1], smp_processor_id())->field++)
#define SNMP_INC_STATS(mib, field) \
- (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]++)
+ (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field++)
#define SNMP_DEC_STATS(mib, field) \
- (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]--)
+ (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
- (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field] += addend)
+ (per_cpu_ptr(mib[0], smp_processor_id())->field += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
- (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field] += addend)
-
+ (per_cpu_ptr(mib[1], smp_processor_id())->field += addend)
+
#endif
*/
/* Define this to get the sk->sk_debug debugging facility. */
-//#define SOCK_DEBUGGING
+#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \
printk(KERN_DEBUG msg); } while (0)
* @sk_timer - sock cleanup timer
* @sk_stamp - time stamp of last packet received
* @sk_socket - Identd and reporting IO signals
- * @sk_user_data - RPC and Tux layer private data
+ * @sk_user_data - RPC layer private data
* @sk_owner - module that owns this socket
* @sk_sndmsg_page - cached page for sendmsg
* @sk_sndmsg_off - cached offset for sendmsg
* @sk_data_ready - callback to indicate there is data to be processed
* @sk_write_space - callback to indicate there is bf sending space available
* @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
- * @sk_create_child - callback to get new socket events
* @sk_backlog_rcv - callback to process the backlog
* @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
*/
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
- void (*sk_create_child)(struct sock *sk, struct sock *newsk);
void (*sk_destruct)(struct sock *sk);
};
* packet.
*/
if (inet_stream_ops.bind != inet_bind &&
- (int) sk->sk_xid > 0 && sk->sk_xid != skb->xid) {
- err = -EPERM;
+ (int) sk->sk_xid >= 0 && sk->sk_xid != skb->xid)
goto out;
- }
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
extern void tcp_push_one(struct sock *, unsigned mss_now);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk);
-extern void cleanup_rbuf(struct sock *sk, int copied);
/* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *);
/* Return 0, if packet can be sent now without violation Nagle's rules:
1. It is full sized.
2. Or it contains FIN.
- 3. Or higher layers meant to force a packet boundary, hence the PSH bit.
- 4. Or TCP_NODELAY was set.
- 5. Or TCP_CORK is not set, and all sent packets are ACKed.
+ 3. Or TCP_NODELAY was set.
+ 4. Or TCP_CORK is not set, and all sent packets are ACKed.
With Minshall's modification: all sent small packets are ACKed.
*/
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
+ NET_INC_STATS_BH(TCPPrequeueDropped);
}
tp->ucopy.memory = 0;
switch (state) {
case TCP_ESTABLISHED:
if (oldstate != TCP_ESTABLISHED)
- TCP_INC_STATS(TCP_MIB_CURRESTAB);
+ TCP_INC_STATS(TcpCurrEstab);
break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
- TCP_INC_STATS(TCP_MIB_ESTABRESETS);
+ TCP_INC_STATS(TcpEstabResets);
sk->sk_prot->unhash(sk);
if (tcp_sk(sk)->bind_hash &&
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
- TCP_DEC_STATS(TCP_MIB_CURRESTAB);
+ TCP_DEC_STATS(TcpCurrEstab);
}
/* Change state AFTER socket is unhashed to avoid closed
static inline void tcp_mib_init(void)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
+ TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
+ TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(TcpMaxConn, -1);
}
/* /proc */
+++ /dev/null
-#ifndef _NET_TUX_H
-#define _NET_TUX_H
-
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * tux.h: main structure definitions and function prototypes
- */
-
-#define __KERNEL_SYSCALLS__
-
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/wait.h>
-#include <linux/namei.h>
-#include <linux/file.h>
-#include <linux/mman.h>
-#include <linux/swap.h>
-#include <linux/ctype.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/unistd.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/pagemap.h>
-#include <linux/vmalloc.h>
-#include <linux/utsname.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel_stat.h>
-#include <linux/kernel_stat.h>
-#include <linux/time.h>
-#include <asm/div64.h>
-#include <asm/unaligned.h>
-#include <linux/compiler.h>
-#include <linux/mount.h>
-#include <linux/zlib.h>
-
-#include <net/tcp.h>
-#include <net/tux_u.h>
-
-/* Maximum number of threads: */
-#define CONFIG_TUX_NUMTHREADS 8
-
-/* Number of cachemiss/IO threads: */
-#define NR_IO_THREADS 32
-
-/* Maximum number of listen sockets per thread: */
-#define CONFIG_TUX_NUMSOCKETS 16
-
-extern spinlock_t tux_module_lock;
-extern struct module *tux_module;
-extern asmlinkage long (*sys_tux_ptr) (unsigned int action, user_req_t *u_info);
-
-#undef Dprintk
-
-extern int tux_TDprintk;
-extern int tux_Dprintk;
-
-#define TUX_DEBUG CONFIG_TUX_DEBUG
-#if CONFIG_TUX_DEBUG
-# define TUX_BUG() BUG()
-
-# define TUX_DPRINTK 1
-# define TDprintk(x...) do { if (tux_TDprintk) { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } } while (0)
-# define Dprintk(x...) do { if (tux_Dprintk == 1) TDprintk(x); } while (0)
-#else
-# define TUX_DPRINTK 0
-# define Dprintk(x...) do { } while (0)
-# define TDprintk(x...) do { } while (0)
-//# define TUX_BUG() BUG()
-# define TUX_BUG() do { } while (0)
-#endif
-
-#if 1
-# define INC_STAT(x) do { } while (0)
-# define DEC_STAT(x) do { } while (0)
-# define ADD_STAT(x,y) do { } while (0)
-# define SUB_STAT(x,y) do { } while (0)
-#else
-# define INC_STAT(x) atomic_inc((atomic_t *)&kstat.x)
-# define DEC_STAT(x) atomic_dec((atomic_t *)&kstat.x)
-# define ADD_STAT(y,x) atomic_add(y,(atomic_t *)&kstat.x)
-# define SUB_STAT(y,x) atomic_sub(y,(atomic_t *)&kstat.x)
-#endif
-
-// lru needs this:
-
-# define DEBUG_DEL_LIST(x...) do { INIT_LIST_HEAD((x)); } while (0)
-
-
-#define LOG_LEN (8*1024*1024UL)
-
-struct tux_req_struct;
-typedef struct tux_req_struct tux_req_t;
-typedef struct tux_threadinfo threadinfo_t;
-
-extern struct address_space_operations url_aops;
-
-typedef struct tcapi_template_s {
- char *vfs_name;
- struct list_head modules;
- int (*query) (tux_req_t *req);
- struct module *mod;
- unsigned int userspace_id;
-} tcapi_template_t;
-
-typedef struct mimetype_s {
- struct list_head list;
-
- char *ext;
- unsigned int ext_len;
- char *type;
- unsigned int type_len;
- char *expire_str;
- unsigned int expire_str_len;
-
- unsigned int special;
-} mimetype_t;
-
-typedef struct tux_attribute_s {
- mimetype_t *mime;
- tcapi_template_t *tcapi;
-} tux_attribute_t;
-
-#define MAX_TUX_ATOMS 8
-
-typedef void (atom_func_t)(tux_req_t *req, int cachemiss);
-
-typedef struct tux_proto_s
-{
- unsigned int defer_accept;
- unsigned int can_redirect;
- void (*got_request) (tux_req_t *req);
- int (*parse_message) (tux_req_t *req, const int total_len);
- atom_func_t *illegal_request;
- atom_func_t *request_timeout;
- void (*pre_log) (tux_req_t *req);
- int (*check_req_err) (tux_req_t *req, int cachemiss);
- char * (*print_dir_line) (tux_req_t *req, char *tmp, char *d_name, int d_len, int d_type, struct dentry *dentry, struct inode *inode);
- const char *name;
- struct nameidata main_docroot;
-} tux_proto_t;
-
-typedef struct tux_socket_s {
- tux_proto_t *proto;
- unsigned int ip;
- unsigned short port;
- struct proc_dir_entry *entry;
-} tux_socket_t;
-
-extern tux_socket_t tux_listen [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS];
-
-
-typedef struct abuf_s {
- struct page *page;
- char *buf;
- unsigned int size;
- unsigned int max_len;
- unsigned int offset;
- unsigned int left;
- unsigned long flags;
-} abuf_t;
-
-struct linux_dirent64 {
- u64 d_ino;
- s64 d_off;
- unsigned short d_reclen;
- unsigned char d_type;
- char d_name[0];
-};
-
-struct getdents_callback64 {
- struct linux_dirent64 * current_dir;
- struct linux_dirent64 * previous;
- int count;
- int error;
-};
-
-#define TUX_MAGIC 0x12457801
-
-#define MAX_TUX_ATOMS 8
-
-struct tux_req_struct
-{
- tux_proto_t *proto;
-
- int atom_idx;
- atom_func_t *atoms [MAX_TUX_ATOMS];
- struct list_head work;
-
- struct list_head all;
- struct list_head free;
- struct list_head lru;
-
- unsigned long idle_input;
- unsigned long wait_output_space;
-
- struct socket *sock;
- struct dentry *dentry;
- struct vfsmount *mnt;
- struct dentry *docroot_dentry;
- struct vfsmount *docroot_mnt;
- struct dentry *cwd_dentry;
- struct vfsmount *cwd_mnt;
-
- struct file in_file;
- int fd;
- read_descriptor_t desc;
- u32 client_addr;
- u32 client_port;
- unsigned int virtual;
-
- loff_t total_file_len;
- unsigned int lendigits;
- loff_t offset_start;
- loff_t offset_end;
- loff_t output_len;
-
- loff_t ftp_offset_start;
-
- time_t mtime;
- unsigned int etaglen;
- char etag [40];
-
- char usermode;
- unsigned int usermodule_idx;
- struct dentry *module_dentry;
- struct vfsmount *module_mnt;
- char *userbuf;
- unsigned int userlen;
-
- tux_attribute_t *attr;
-
- threadinfo_t *ti;
- wait_queue_t sleep;
- wait_queue_t ftp_sleep;
-
- abuf_t abuf;
- /*
- * Parsed request fields. In-line strings are zero-delimited.
- */
- const char *headers;
- unsigned int headers_len;
-
- unsigned int parsed_len;
-
- // FTP part
- ftp_command_t ftp_command;
- u32 ftp_user_addr;
- u16 ftp_user_port;
-
- struct socket *data_sock;
- unsigned int prev_pos;
-
- // ls handing:
- struct linux_dirent64 *dirp0;
- unsigned int curroff, total;
-
-#define MAX_USERNAME_LEN 16
- char username[MAX_USERNAME_LEN];
- unsigned int username_len;
-
- // HTTP part
- http_method_t method;
- const char *method_str;
- unsigned int method_len;
-
- http_version_t version;
- const char *version_str;
- unsigned int version_len;
-
- /* requested URI: */
-
- const char *uri_str;
- unsigned int uri_len;
-
- /* Objectname (filename/scriptname) this URI refers to: */
-
-#define MAX_OBJECTNAME_LEN 256
- char objectname[MAX_OBJECTNAME_LEN + 4]; // space for .gz as well
- unsigned int objectname_len;
-
- /* Query string within the URI: */
-
- const char *query_str;
- unsigned int query_len;
-
- /* Cookies: */
-
- const char *cookies_str;
- unsigned int cookies_len;
- unsigned int parse_cookies;
-
- /* Content-TYpe */
- const char *content_type_str;
- unsigned int content_type_len;
-
- /* Content-Length: */
-
- const char *contentlen_str;
- unsigned int contentlen_len;
- unsigned int content_len;
-
- /* User-Agent: */
-
- const char *user_agent_str;
- unsigned int user_agent_len;
-
- /* Accept: */
-
- const char *accept_str;
- unsigned int accept_len;
-
- /* Accept-Charset: */
-
- const char *accept_charset_str;
- unsigned int accept_charset_len;
-
- /* Accept-Language: */
-
- const char *accept_language_str;
- unsigned int accept_language_len;
-
- /* Cache-Control: */
-
- const char *cache_control_str;
- unsigned int cache_control_len;
-
- /* If-Modified-Since: */
-
- const char *if_modified_since_str;
- unsigned int if_modified_since_len;
-
- /* If-None-Match: */
- const char *if_none_match_str;
- unsigned int if_none_match_len;
-
- /* If-Range: */
-
- const char *if_range_str;
- unsigned int if_range_len;
-
- /* Negotiate: */
-
- const char *negotiate_str;
- unsigned int negotiate_len;
-
- /* Pragma: */
-
- const char *pragma_str;
- unsigned int pragma_len;
-
- /* Referer: */
-
- const char *referer_str;
- unsigned int referer_len;
-
- /* Accept-Encoding: */
-
- const char *accept_encoding_str;
- unsigned int accept_encoding_len;
- unsigned int may_send_gzip;
- unsigned int content_gzipped;
-
- /* Host */
-
-#define MAX_HOST_LEN 128
- char host[MAX_HOST_LEN];
- unsigned int host_len;
-
- /* POSTed data: */
-
- const char *post_data_str;
- unsigned int post_data_len;
-
- unsigned int status;
-
- /* the file being sent */
-
- unsigned int bytes_sent;
-#if CONFIG_TUX_DEBUG
- unsigned int bytes_expected;
-#endif
- unsigned long first_timestamp;
- unsigned int body_len;
-
- unsigned int user_error;
-
- char error;
- char postponed;
-
- char had_cachemiss;
- char lookup_dir;
- char lookup_404;
-
- char keep_alive;
- struct timer_list keepalive_timer;
- unsigned int total_bytes;
- struct timer_list output_timer;
-
- unsigned int nr_keepalives;
-
- unsigned int event;
- u64 private;
-
- unsigned int magic;
- void (*real_data_ready)(struct sock *sk, int space);
- void (*real_state_change)(struct sock *sk);
- void (*real_write_space)(struct sock *sk);
- void (*real_error_report)(struct sock *sk);
- void (*real_destruct)(struct sock *sk);
-
- void (*ftp_real_data_ready)(struct sock *sk, int space);
- void (*ftp_real_state_change)(struct sock *sk);
- void (*ftp_real_write_space)(struct sock *sk);
- void (*ftp_real_error_report)(struct sock *sk);
- void (*ftp_real_create_child)(struct sock *sk, struct sock *newsk);
- void (*ftp_real_destruct)(struct sock *sk);
-
-#if CONFIG_TUX_EXTENDED_LOG
- unsigned long accept_timestamp;
- unsigned long parse_timestamp;
- unsigned long output_timestamp;
- unsigned long flush_timestamp;
-# define SET_TIMESTAMP(x) do { (x) = jiffies; } while (0)
-#else
-# define SET_TIMESTAMP(x) do { } while (0)
-#endif
-
-};
-
-extern void add_tux_atom (tux_req_t *req, atom_func_t *event_done);
-extern void del_tux_atom (tux_req_t *req);
-extern void tux_schedule_atom (tux_req_t *req, int cachemiss);
-extern void add_req_to_workqueue (tux_req_t *req);
-
-
-typedef struct iothread_s
-{
- spinlock_t async_lock;
- threadinfo_t *ti;
- struct list_head async_queue;
- wait_queue_head_t async_sleep;
- unsigned int nr_async_pending;
- unsigned int threads;
- unsigned int shutdown;
- wait_queue_head_t wait_shutdown;
-} iothread_t;
-
-typedef struct tux_listen_s
-{
- tux_proto_t *proto;
- struct socket *sock;
- unsigned int cloned;
-} tux_listen_t;
-
-struct tux_threadinfo
-{
- tux_req_t *userspace_req;
- unsigned int started;
- struct task_struct *thread;
- iothread_t *iot;
- wait_queue_t wait_event [CONFIG_TUX_NUMSOCKETS];
- wait_queue_t stop;
- unsigned int pid;
-
- struct page *header_cache;
- unsigned int header_offset;
-
- unsigned int nr_requests;
- struct list_head all_requests;
-
- unsigned int nr_free_requests;
- spinlock_t free_requests_lock;
- struct list_head free_requests;
-
- spinlock_t work_lock;
- struct list_head work_pending;
- struct list_head lru;
- unsigned int nr_lru;
-
- unsigned int listen_error;
- tux_listen_t listen[CONFIG_TUX_NUMSOCKETS];
-
- struct semaphore gzip_sem;
- z_stream gzip_state;
-
- unsigned int cpu;
- unsigned int __padding[16];
-};
-
-typedef enum special_mimetypes {
- NORMAL_MIME_TYPE,
- MIME_TYPE_REDIRECT,
- MIME_TYPE_CGI,
- MIME_TYPE_MODULE,
-} special_mimetypes_t;
-
-#if CONFIG_TUX_DEBUG
-#if 0
-extern inline void url_hist_hit (int size)
-{
- unsigned int idx = size/1024;
-
- if (idx >= URL_HIST_SIZE)
- idx = URL_HIST_SIZE-1;
- kstat.url_hist_hits[idx]++;
-}
-extern inline void url_hist_miss (int size)
-{
- unsigned int idx = size/1024;
-
- if (idx >= URL_HIST_SIZE)
- idx = URL_HIST_SIZE-1;
- kstat.url_hist_misses[idx]++;
-}
-#endif
-extern void __check_req_list (tux_req_t *req, struct list_head *list);
-# define check_req_list __check_req_list
-#else
-# define check_req_list(req, list) do { } while (0)
-#endif
-
-#define url_hist_hit(size) do { } while (0)
-#define url_hist_miss(size) do { } while (0)
-
-extern char tux_common_docroot[200];
-extern char tux_http_subdocroot[200];
-extern char tux_ftp_subdocroot[200];
-extern char tux_logfile[200];
-extern char tux_cgiroot[200];
-extern char tux_404_page[200];
-extern char tux_default_vhost[200];
-extern char tux_extra_html_header[600];
-extern unsigned int tux_extra_html_header_size;
-extern int tux_cgi_uid;
-extern int tux_cgi_gid;
-extern unsigned int tux_clientport;
-extern unsigned int tux_logging;
-extern unsigned int tux_threads;
-extern unsigned int tux_keepalive_timeout;
-extern unsigned int tux_max_output_bandwidth;
-extern unsigned int tux_max_backlog;
-extern unsigned int tux_max_connect;
-extern unsigned int tux_mode_forbidden;
-extern unsigned int tux_mode_allowed;
-extern unsigned int tux_logentry_align_order;
-extern unsigned int tux_nonagle;
-extern unsigned int tux_ack_pingpong;
-extern unsigned int tux_push_all;
-extern unsigned int tux_zerocopy_parse;
-extern unsigned int tux_generate_etags;
-extern unsigned int tux_generate_last_mod;
-extern unsigned int tux_generate_cache_control;
-extern unsigned int tux_ip_logging;
-extern unsigned int tux_ftp_wait_close;
-extern unsigned int tux_ftp_log_retr_only;
-extern unsigned int tux_hide_unreadable;
-
-typedef enum virtual_server {
- TUX_VHOST_NONE,
- TUX_VHOST_HOST,
- TUX_VHOST_IP,
- TUX_VHOST_IP_HOST,
-} virtual_server_t;
-
-extern unsigned int tux_virtual_server;
-extern unsigned int mass_hosting_hash;
-extern unsigned int strip_host_tail;
-extern unsigned int tux_ftp_virtual_server;
-
-extern unsigned int tux_max_object_size;
-extern unsigned int tux_max_free_requests;
-extern unsigned int tux_defer_accept;
-
-extern struct socket * start_listening(tux_socket_t *listen, int nr);
-extern void stop_listening(struct socket **sock);
-extern void start_sysctl(void);
-extern void end_sysctl(void);
-extern void flush_request (tux_req_t *req, int cachemiss);
-extern void unlink_tux_socket (tux_req_t *req);
-extern void unlink_tux_data_socket (tux_req_t *req);
-extern void unlink_tux_listen_socket (tux_req_t *req);
-extern void link_tux_ftp_accept_socket (tux_req_t *req, struct socket *sock);
-extern void link_tux_data_socket (tux_req_t *req, struct socket *sock);
-extern void tux_push_req (tux_req_t *req);
-extern int send_sync_buf (tux_req_t *req, struct socket *sock, const char *buf, const size_t length, unsigned long flags);
-extern void __send_async_message (tux_req_t *req, const char *message, int status, unsigned int size, int push);
-#define send_async_message(req,str,status,push) \
- __send_async_message(req,str,status,strlen(str),push)
-
-extern void send_success (tux_req_t *req, struct socket *sock);
-extern void send_async_err_not_found (tux_req_t *req);
-extern void send_async_timed_out (tux_req_t *req);
-
-extern void kfree_req (tux_req_t *req);
-extern int accept_requests (threadinfo_t *ti);
-extern int process_requests (threadinfo_t *ti, tux_req_t **user_req);
-extern int flush_freequeue (threadinfo_t * ti);
-extern int tux_flush_workqueue (threadinfo_t *ti);
-extern tux_req_t * pick_userspace_req (threadinfo_t *ti);
-extern atom_func_t redirect_request;
-extern atom_func_t parse_request;
-extern void queue_cachemiss (tux_req_t *req);
-extern int start_cachemiss_threads (threadinfo_t *ti);
-extern void stop_cachemiss_threads (threadinfo_t *ti);
-struct file * tux_open_file(char *filename, int mode);
-extern void start_log_thread (void);
-extern void stop_log_thread (void);
-extern void add_mimetype (char *new_ext, char *new_type, char *new_expire);
-extern void free_mimetypes (void);
-extern int lookup_object (tux_req_t *req, const unsigned int flag);
-extern int handle_gzip_req (tux_req_t *req, unsigned int flags);
-extern struct dentry * tux_lookup (tux_req_t *req, const char *filename, const unsigned int flag, struct vfsmount **mnt);
-extern tcapi_template_t * lookup_tuxmodule (const char *filename);
-extern int register_tuxmodule (tcapi_template_t *tcapi);
-extern tcapi_template_t * unregister_tuxmodule (char *vfs_name);
-extern tcapi_template_t * get_first_usermodule (void);
-extern int user_register_module (user_req_t *u_info);
-extern int user_unregister_module (user_req_t *u_info);
-extern void unregister_all_tuxmodules (void);
-
-typedef struct exec_param_s {
- char *command;
- char **argv;
- char **envp;
- unsigned int pipe_fds;
-} exec_param_t;
-
-extern pid_t tux_exec_process (char *command, char **argv, char **envp, int pipe_fds, exec_param_t *param, int wait);
-
-extern void start_external_cgi (tux_req_t *req);
-extern tcapi_template_t extcgi_tcapi;
-
-extern void queue_output_req (tux_req_t *req, threadinfo_t *ti);
-extern void queue_userspace_req (tux_req_t *req, threadinfo_t *ti);
-
-
-extern void __log_request (tux_req_t *req);
-extern inline void log_request (tux_req_t *req)
-{
- if (tux_logging)
- __log_request(req);
-}
-
-extern int __connection_too_fast (tux_req_t *req);
-
-#define connection_too_fast(req) \
- ({ \
- int __ret = 1; \
- if (unlikely(tux_max_output_bandwidth)) \
- __ret = __connection_too_fast(req); \
- __ret; \
- })
-
-extern void trunc_headers (tux_req_t *req);
-extern int generic_send_file (tux_req_t *req, struct socket *sock, int cachemiss);
-extern int tux_fetch_file (tux_req_t *req, int nonblock);
-
-extern void postpone_request (tux_req_t *req);
-extern int continue_request (int fd);
-extern void tux_push_pending (struct sock *sk);
-extern void zap_request (tux_req_t *req, int cachemiss);
-extern int add_output_space_event (tux_req_t *req, struct socket *sock);
-
-extern void reap_kids (void);
-extern void unuse_frag (struct sk_buff *skb, skb_frag_t *frag);
-extern skb_frag_t * build_dynbuf_frag (tux_req_t *req, unsigned int size);
-extern int tux_permission (struct inode *inode);
-extern void flush_all_signals (void);
-
-#define D() Dprintk("{%s:%d}\n", __FILE__, __LINE__)
-
-extern int nr_async_io_pending (void);
-
-extern void __add_keepalive_timer (tux_req_t *req);
-#define add_keepalive_timer(req) \
-do { \
- if (tux_keepalive_timeout) { \
- Dprintk("add_keepalive_timer(%p).\n", (req)); \
- __add_keepalive_timer(req); \
- } \
-} while (0)
-extern void __del_keepalive_timer (tux_req_t *req);
-#define del_keepalive_timer(req) \
-do { \
- if (tux_keepalive_timeout) { \
- Dprintk("del_keepalive_timer(%p).\n", (req)); \
- __del_keepalive_timer(req); \
- } \
-} while (0)
-
-extern void del_output_timer (tux_req_t *req);
-extern void output_timeout (tux_req_t *req);
-
-extern void print_req (tux_req_t *req);
-
-extern char tux_date [DATE_LEN];
-
-
-extern int nr_async_io_pending (void);
-extern void tux_exit (void);
-extern char * get_abuf (tux_req_t *req, unsigned int max_size);
-extern void send_abuf (tux_req_t *req, unsigned int size, unsigned long flags);
-
-
-extern int idle_event (tux_req_t *req);
-extern int output_space_event (tux_req_t *req);
-extern unsigned int log_cpu_mask;
-extern unsigned int tux_compression;
-extern unsigned int tux_noid;
-extern unsigned int tux_cgi_inherit_cpu;
-extern unsigned int tux_zerocopy_header;
-extern unsigned int tux_zerocopy_sendfile;
-extern unsigned int tux_cgi_cpu_mask;
-extern tux_proto_t tux_proto_http;
-extern tux_proto_t tux_proto_ftp;
-extern unsigned int tux_all_userspace;
-extern unsigned int tux_ignore_query;
-extern unsigned int tux_redirect_logging;
-extern unsigned int tux_referer_logging;
-extern unsigned int tux_log_incomplete;
-extern unsigned int tux_max_header_len;
-extern unsigned int tux_cpu_offset;
-extern unsigned int tux_ftp_login_message;
-
-extern void drop_permissions (void);
-extern int query_extcgi (tux_req_t *req);
-extern int tux_chroot (char *dir);
-
-extern void install_req_dentry (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt);
-extern void release_req_dentry (tux_req_t *req);
-extern void unidle_req (tux_req_t *req);
-extern int nr_requests_used (void);
-
-#define req_err(req) do { (req)->error = 1; Dprintk("request %p error at %s:%d.\n", req, __FILE__, __LINE__); } while (0)
-
-#define enough_wspace(sk) (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
-#define clear_keepalive(req) do { (req)->keep_alive = 0; Dprintk("keepalive cleared for req %p.\n", req); } while (0)
-
-extern int print_all_requests (threadinfo_t *ti);
-extern unsigned int tux_max_keepalives;
-extern int time_unix2ls (time_t zulu, char *buf);
-extern void last_mod_time(char * curr, const time_t t);
-extern int mdtm_time(char * curr, const time_t t);
-extern time_t parse_time(const char *str, const int str_len);
-
-extern unsigned int nr_tux_threads;
-extern threadinfo_t threadinfo[CONFIG_TUX_NUMTHREADS];
-
-#define switch_docroot(req) do { if (((req)->docroot_dentry != current->fs->root) || ((req)->docroot_mnt != current->fs->rootmnt)) __switch_docroot(req); } while (0)
-extern void __switch_docroot(tux_req_t *req);
-extern void list_directory (tux_req_t *req, int cachemiss);
-extern char * tux_print_path (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt, char *buf, unsigned int max_len);
-
-extern unsigned int tux_http_dir_indexing;
-
-int tux_gzip_compress (tux_req_t *req, unsigned char *data_in, unsigned char *data_out, __u32 *in_len, __u32 *out_len);
-
-struct dentry * __tux_lookup (tux_req_t *req, const char *filename,
- struct nameidata *base, struct vfsmount **mnt);
-
-/* error codes for req->error */
-#define TUX_ERROR_REDIRECT 1
-#define TUX_ERROR_UNUSED 2
-#define TUX_ERROR_CONN_CLOSE 3
-#define TUX_ERROR_CONN_TIMEOUT 4
-
-extern void __put_data_sock (tux_req_t *req);
-
-static inline void put_data_sock (tux_req_t *req)
-{
- if (req->data_sock)
- __put_data_sock(req);
-}
-
-#define socket_input(sock) \
- (!skb_queue_empty(&(sock)->sk->sk_receive_queue) || \
- !skb_queue_empty(&(sock)->sk->sk_error_queue))
-
-#define tux_kmalloc(size) \
-({ \
- void *__ptr; \
- \
- while (!(__ptr = kmalloc(size, GFP_KERNEL))) { \
- if (net_ratelimit()) \
- printk(KERN_WARNING "tux: OOM at %s:%d (%d bytes).\n", \
- __FILE__, __LINE__, size); \
- current->state = TASK_UNINTERRUPTIBLE; \
- schedule_timeout(1); \
- } \
- __ptr; \
-})
-
-extern long tux_close(unsigned int fd);
-
-#endif
+++ /dev/null
-#ifndef _NET_TUX_U_H
-#define _NET_TUX_U_H
-
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * tux_u.h: HTTP module API - HTTP interface to user-space
- */
-
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define TUX_MAJOR_VERSION 3
-#define TUX_MINOR_VERSION 0
-#define TUX_PATCHLEVEL_VERSION 0
-
-#define __KERNEL_SYSCALLS__
-
-typedef enum http_versions {
- HTTP_1_0,
- HTTP_1_1
-} http_version_t;
-
-/*
- * Request methods known to HTTP:
- */
-typedef enum http_methods {
- METHOD_NONE,
- METHOD_GET,
- METHOD_HEAD,
- METHOD_POST,
- METHOD_PUT,
- NR_METHODS
-} http_method_t;
-
-enum user_req {
- TUX_ACTION_STARTUP = 1,
- TUX_ACTION_SHUTDOWN = 2,
- TUX_ACTION_STARTTHREAD = 3,
- TUX_ACTION_STOPTHREAD = 4,
- TUX_ACTION_EVENTLOOP = 5,
- TUX_ACTION_GET_OBJECT = 6,
- TUX_ACTION_SEND_OBJECT = 7,
- TUX_ACTION_READ_OBJECT = 8,
- TUX_ACTION_FINISH_REQ = 9,
- TUX_ACTION_FINISH_CLOSE_REQ = 10,
- TUX_ACTION_REGISTER_MODULE = 11,
- TUX_ACTION_UNREGISTER_MODULE = 12,
- TUX_ACTION_CURRENT_DATE = 13,
- TUX_ACTION_REGISTER_MIMETYPE = 14,
- TUX_ACTION_READ_HEADERS = 15,
- TUX_ACTION_POSTPONE_REQ = 16,
- TUX_ACTION_CONTINUE_REQ = 17,
- TUX_ACTION_REDIRECT_REQ = 18,
- TUX_ACTION_READ_POST_DATA = 19,
- TUX_ACTION_SEND_BUFFER = 20,
- TUX_ACTION_WATCH_PROXY_SOCKET = 21,
- TUX_ACTION_WAIT_PROXY_SOCKET = 22,
- TUX_ACTION_QUERY_VERSION = 23,
- MAX_TUX_ACTION
-};
-
-enum tux_ret {
- TUX_ERROR = -1,
- TUX_RETURN_USERSPACE_REQUEST = 0,
- TUX_RETURN_EXIT = 1,
- TUX_RETURN_SIGNAL = 2,
- TUX_CONTINUE_EVENTLOOP = 3,
-};
-
-#define MAX_URI_LEN 256
-#define MAX_COOKIE_LEN 128
-#define MAX_FIELD_LEN 64
-#define DATE_LEN 30
-
-typedef struct user_req_s {
- u32 version_major;
- u32 version_minor;
- u32 version_patch;
- u32 http_version;
- u32 http_method;
- u32 http_status;
-
- u32 sock;
- u32 event;
- u32 error;
- u32 thread_nr;
- u32 bytes_sent;
- u32 client_host;
- u32 objectlen;
- u32 module_index;
- u32 keep_alive;
- u32 cookies_len;
-
- u64 id;
- u64 priv;
- u64 object_addr;
-
- u8 query[MAX_URI_LEN];
- u8 objectname[MAX_URI_LEN];
- u8 cookies[MAX_COOKIE_LEN];
- u8 content_type[MAX_FIELD_LEN];
- u8 user_agent[MAX_FIELD_LEN];
- u8 accept[MAX_FIELD_LEN];
- u8 accept_charset[MAX_FIELD_LEN];
- u8 accept_encoding[MAX_FIELD_LEN];
- u8 accept_language[MAX_FIELD_LEN];
- u8 cache_control[MAX_FIELD_LEN];
- u8 if_modified_since[MAX_FIELD_LEN];
- u8 negotiate[MAX_FIELD_LEN];
- u8 pragma[MAX_FIELD_LEN];
- u8 referer[MAX_FIELD_LEN];
- u8 new_date[DATE_LEN];
- u8 pad[2];
-
-} user_req_t;
-
-typedef enum ftp_commands {
- FTP_COMM_NONE,
- FTP_COMM_USER,
- FTP_COMM_PASS,
- FTP_COMM_ACCT,
- FTP_COMM_CWD,
- FTP_COMM_CDUP,
- FTP_COMM_SMNT,
- FTP_COMM_QUIT,
- FTP_COMM_REIN,
- FTP_COMM_PORT,
- FTP_COMM_PASV,
- FTP_COMM_TYPE,
- FTP_COMM_STRU,
- FTP_COMM_MODE,
- FTP_COMM_RETR,
- FTP_COMM_SIZE,
- FTP_COMM_MDTM,
- FTP_COMM_STOR,
- FTP_COMM_STOU,
- FTP_COMM_APPE,
- FTP_COMM_ALLO,
- FTP_COMM_REST,
- FTP_COMM_RNFR,
- FTP_COMM_RNTO,
- FTP_COMM_ABOR,
- FTP_COMM_DELE,
- FTP_COMM_RMD,
- FTP_COMM_MKD,
- FTP_COMM_PWD,
- FTP_COMM_LIST,
- FTP_COMM_NLST,
- FTP_COMM_SITE,
- FTP_COMM_SYST,
- FTP_COMM_STAT,
- FTP_COMM_HELP,
- FTP_COMM_NOOP,
- FTP_COMM_FEAT,
- FTP_COMM_CLNT,
-} ftp_command_t;
-
-#endif
extern void udp_err(struct sk_buff *, u32);
+extern int udp_connect(struct sock *sk,
+ struct sockaddr *usin, int addr_len);
extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
extern int xfrm4_output(struct sk_buff **pskb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
+extern int xfrm4_tunnel_check_size(struct sk_buff *skb);
extern int xfrm6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
+extern int xfrm6_tunnel_check_size(struct sk_buff *skb);
extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
-extern int xfrm6_output(struct sk_buff **pskb);
#ifdef CONFIG_XFRM
extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
*/
struct ccs_modesel_head {
- __u8 _r1; /* reserved */
- __u8 medium; /* device-specific medium type */
- __u8 _r2; /* reserved */
- __u8 block_desc_length; /* block descriptor length */
- __u8 density; /* device-specific density code */
- __u8 number_blocks_hi; /* number of blocks in this block desc */
- __u8 number_blocks_med;
- __u8 number_blocks_lo;
- __u8 _r3;
- __u8 block_length_hi; /* block length for blocks in this desc */
- __u8 block_length_med;
- __u8 block_length_lo;
+ u8 _r1; /* reserved */
+ u8 medium; /* device-specific medium type */
+ u8 _r2; /* reserved */
+ u8 block_desc_length; /* block descriptor length */
+ u8 density; /* device-specific density code */
+ u8 number_blocks_hi; /* number of blocks in this block desc */
+ u8 number_blocks_med;
+ u8 number_blocks_lo;
+ u8 _r3;
+ u8 block_length_hi; /* block length for blocks in this desc */
+ u8 block_length_med;
+ u8 block_length_lo;
};
/*
* ScsiLun: 8 byte LUN.
*/
struct scsi_lun {
- __u8 scsi_lun[8];
+ u8 scsi_lun[8];
};
/*
container_of(d, struct Scsi_Host, shost_classdev)
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
-extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
+extern int scsi_add_host(struct Scsi_Host *, struct device *);
extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
#ifndef _SCSI_GENERIC_H
#define _SCSI_GENERIC_H
-#include <linux/compiler.h>
-
/*
History:
Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user
http://www.torque.net/sg/p/scsi-generic_long.txt
A version of this document (potentially out of date) may also be found in
the kernel source tree, probably at:
- Documentation/scsi/scsi-generic.txt .
+ /usr/src/linux/Documentation/scsi/scsi-generic.txt .
Utility and test programs are available at the sg web site. They are
bundled as sg_utils (for the lk 2.2 series) and sg3_utils (for the
#include <linux/time.h>
#include <asm/byteorder.h>
-#ifdef __LITTLE_ENDIAN
+#if __LITTLE_ENDIAN == 1234
#define SNDRV_LITTLE_ENDIAN
-#else
-#ifdef __BIG_ENDIAN
+#elif __BIG_ENDIAN == 4321
#define SNDRV_BIG_ENDIAN
#else
#error "Unsupported endian..."
#endif
-#endif
#else /* !__KERNEL__ */
int (*release) (snd_info_entry_t * entry,
unsigned short mode, void *file_private_data);
long (*read) (snd_info_entry_t *entry, void *file_private_data,
- struct file * file, char __user *buf,
- unsigned long count, unsigned long pos);
+ struct file * file, char __user *buf, long count);
long (*write) (snd_info_entry_t *entry, void *file_private_data,
- struct file * file, const char __user *buf,
- unsigned long count, unsigned long pos);
+ struct file * file, const char __user *buf, long count);
long long (*llseek) (snd_info_entry_t *entry, void *file_private_data,
struct file * file, long long offset, int orig);
unsigned int (*poll) (snd_info_entry_t *entry, void *file_private_data,
/*
* FIXME
* Ugh, we don't have PCI space, so map readb() and friends to use Zorro space
- * for MMIO accesses. This should make cirrusfb work again on Amiga
+ * for MMIO accesses. This should make clgenfb work again on Amiga
*/
-#undef inb_p
-#undef inw_p
-#undef outb_p
-#undef outw
-#undef readb
-#undef writeb
-#undef writew
#define inb_p(port) 0
#define inw_p(port) 0
#define outb_p(port, val) do { } while (0)
If unsure, say Y
+config STANDALONE
+ bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
+ default y
+ help
+ Select this option if you don't have magic firmware for drivers that
+ need it.
+
+ If unsure, say Y.
+
config BROKEN
bool
depends on !CLEAN_COMPILE
up to the user level program to do useful things with this
information. This is generally a good idea, so say Y.
-config BSD_PROCESS_ACCT_V3
- bool "BSD Process Accounting version 3 file format"
- depends on BSD_PROCESS_ACCT
- default n
- help
- If you say Y here, the process accounting information is written
- in a new file format that also logs the process IDs of each
- process and it's parent. Note that this file format is incompatible
- with previous v0/v1/v2 file formats, so you will need updated tools
- for processing it. A preliminary version of these tools is available
- at <http://http://www.de.kernel.org/pub/linux/utils/acct/>.
-
menu "Class Based Kernel Resource Management"
config CKRM
config CKRM_CPU_SCHEDULE
bool "CKRM CPU scheduler"
depends on CKRM_TYPE_TASKCLASS
- default y
+ default m
help
Use CKRM CPU scheduler instead of Linux Scheduler
Say N if unsure, Y to use the feature.
-config CKRM_RES_BLKIO
- tristate " Disk I/O Resource Controller"
- depends on CKRM_TYPE_TASKCLASS && IOSCHED_CFQ
+config CKRM_CPU_MONITOR
+ bool "CKRM CPU Resoure Monitor"
+ depends on CKRM_CPU_SCHEDULE
default m
help
- Provides a resource controller for best-effort block I/O
- bandwidth control. The controller attempts this by proportional
- servicing of requests in the I/O scheduler. However, seek
- optimizations and reordering by device drivers/disk controllers may
- alter the actual bandwidth delivered to a class.
+ Monitor CPU Resource Usage of the classes
Say N if unsure, Y to use the feature.
-config CKRM_RES_MEM
- bool "Class based physical memory controller"
- default y
- depends on CKRM
- help
- Provide the basic support for collecting physical memory usage information
- among classes. Say Y if you want to know the memory usage of each class.
-
-config CKRM_MEM_LRUORDER_CHANGE
- bool "Change the LRU ordering of scanned pages"
- default n
- depends on CKRM_RES_MEM
- help
- While trying to free pages, by default(n), scanned pages are left were they
- are found if they belong to relatively under-used class. In this case the
- LRU ordering of the memory subsystemis left intact. If this option is chosen,
- then the scanned pages are moved to the tail of the list(active or inactive).
- Changing this to yes reduces the checking overhead but violates the approximate
- LRU order that is maintained by the paging subsystem.
-
config CKRM_TYPE_SOCKETCLASS
bool "Class Manager for socket groups"
depends on CKRM
endmenu
+config BSD_PROCESS_ACCT_V3
+ bool "BSD Process Accounting version 3 file format"
+ depends on BSD_PROCESS_ACCT
+ default n
+ help
+ If you say Y here, the process accounting information is written
+ in a new file format that also logs the process IDs of each
+ process and it's parent. Note that this file format is incompatible
+ with previous v0/v1/v2 file formats, so you will need updated tools
+ for processing it. A preliminary version of these tools is available
+ at <http://http://www.de.kernel.org/pub/linux/utils/acct/>.
+
config SYSCTL
bool "Sysctl support"
---help---
This option enables access to the kernel configuration file
through /proc/config.gz.
-config OOM_PANIC
- bool "OOM Panic"
- default y
- ---help---
- This option enables panic() to be called when a system is out of
- memory. This feature along with /proc/sys/kernel/panic allows a
- different behavior on out-of-memory conditions when the standard
- behavior (killing processes in an attempt to recover) does not
- make sense.
-
- If unsure, say N.
-
-config OOM_KILL
- bool
- depends on !OOM_PANIC
- default y
menuconfig EMBEDDED
bool "Configure standard kernel features (for small systems)"
#else
#define init_ckrm_sched_res() ((void)0)
#endif
-//#include <linux/ckrm_sched.h>
/*
* This is one of the first .c files built. Error out early
* printk() and can access its per-cpu storage.
*/
smp_prepare_boot_cpu();
-
/*
* Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init()
* firmware files.
*/
populate_rootfs();
-
do_basic_setup();
-
init_ckrm_sched_res();
sched_init_smp();
goto out_inode;
}
/* all is ok */
+#warning MEF PLANETLAB: info->user = get_uid(u); is something new in Fedora Core.
info->user = get_uid(u);
} else if (S_ISDIR(mode)) {
inode->i_nlink++;
shm_unlock(shp);
if (!is_file_hugepages(shp->shm_file))
shmem_lock(shp->shm_file, 0, shp->mlock_user);
- else
- user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
- shp->mlock_user);
fput (shp->shm_file);
security_shm_free(shp);
ipc_rcu_free(shp, sizeof(struct shmid_kernel));
shp->shm_perm.key = key;
shp->shm_perm.xid = current->xid;
shp->shm_flags = (shmflg & S_IRWXUGO);
- shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
return error;
}
- if (shmflg & SHM_HUGETLB) {
- /* hugetlb_zero_setup takes care of mlock user accounting */
+ if (shmflg & SHM_HUGETLB)
file = hugetlb_zero_setup(size);
- shp->mlock_user = current->user;
- } else {
+ else {
sprintf (name, "SYSV%08x", key);
file = shmem_file_setup(name, size, VM_ACCOUNT);
}
shp->shm_nattch = 0;
shp->id = shm_buildid(id,shp->shm_perm.seq);
shp->shm_file = file;
+ shp->mlock_user = NULL;
file->f_dentry->d_inode->i_ino = shp->id;
if (shmflg & SHM_HUGETLB)
set_file_hugepages(file);
case SHM_UNLOCK:
{
/* Allow superuser to lock segment in memory */
- if (!can_do_mlock() && cmd == SHM_LOCK) {
+ if (!can_do_mlock()) {
err = -EPERM;
goto out;
}
goto out_unlock;
if(cmd==SHM_LOCK) {
- struct user_struct * user = current->user;
if (!is_file_hugepages(shp->shm_file)) {
- err = shmem_lock(shp->shm_file, 1, user);
- if (!err) {
+ err = shmem_lock(shp->shm_file, 1, current->user);
+ if (!err)
shp->shm_flags |= SHM_LOCKED;
- shp->mlock_user = user;
- }
}
- } else if (!is_file_hugepages(shp->shm_file)) {
- shmem_lock(shp->shm_file, 0, shp->mlock_user);
+ } else {
+ if (!is_file_hugepages(shp->shm_file))
+ shmem_lock(shp->shm_file, 0, shp->mlock_user);
shp->shm_flags &= ~SHM_LOCKED;
- shp->mlock_user = NULL;
}
shm_unlock(shp);
goto out;
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
- !capable(CAP_IPC_OWNER))
- return -1;
+ !capable(CAP_IPC_OWNER)) {
+ if (!can_do_mlock()) {
+ return -1;
+ }
+ }
return security_ipc_permission(ipcp, flag);
}
obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_IKCONFIG_PROC) += configs.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
-obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_classqueue.o ckrm_sched.o
+obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_classqueue.o
+obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_sched.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
+obj-$(CONFIG_KGDB) += kgdbstub.o
+
ifneq ($(CONFIG_IA64),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
*/
memset((caddr_t)&ac, 0, sizeof(acct_t));
- ac.ac_version = ACCT_VERSION | ACCT_BYTEORDER;
+ ac.ac_version = ACCT_VERSION;
strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
elapsed = jiffies_64_to_AHZ(get_jiffies_64() - current->start_time);
old_encode_dev(tty_devnum(current->signal->tty)) : 0;
read_unlock(&tasklist_lock);
- ac.ac_flag = 0;
+ /* ABYTESEX is always set to allow byte order detection */
+ ac.ac_flag = ABYTESEX;
if (current->flags & PF_FORKNOEXEC)
ac.ac_flag |= AFORK;
if (current->flags & PF_SUPERPRIV)
#
ifeq ($(CONFIG_CKRM),y)
- obj-y = ckrm.o ckrmutils.o ckrm_numtasks_stub.o rbce/
+ obj-y = ckrm.o ckrmutils.o ckrm_tasks_stub.o rbce/
endif
obj-$(CONFIG_CKRM_TYPE_TASKCLASS) += ckrm_tc.o
- obj-$(CONFIG_CKRM_RES_NUMTASKS) += ckrm_numtasks.o
+ obj-$(CONFIG_CKRM_RES_NUMTASKS) += ckrm_tasks.o
obj-$(CONFIG_CKRM_TYPE_SOCKETCLASS) += ckrm_sockc.o
- obj-$(CONFIG_CKRM_RES_LISTENAQ) += ckrm_laq.o
- obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_cpu_class.o ckrm_cpu_monitor.o
- obj-$(CONFIG_CKRM_RES_MEM) += ckrm_mem.o
+ obj-$(CONFIG_CKRM_RES_LISTENAQ) += ckrm_listenaq.o
+ obj-$(CONFIG_CKRM_CPU_SCHEDULE) += ckrm_cpu_class.o
+ obj-$(CONFIG_CKRM_CPU_MONITOR) += ckrm_cpu_monitor.o
static inline void set_callbacks_active(struct ckrm_classtype *ctype)
{
- ctype->ce_cb_active = ((atomic_read(&ctype->ce_regd) > 0) &&
+ ctype->ce_cb_active = ((atomic_read(&ctype->ce_nr_users) > 0) &&
(ctype->ce_callbacks.always_callback
|| (ctype->num_classes > 1)));
}
if (ctype == NULL)
return (-ENOENT);
- atomic_inc(&ctype->ce_regd);
-
- /* another engine registered or trying to register ? */
- if (atomic_read(&ctype->ce_regd) != 1) {
- atomic_dec(&ctype->ce_regd);
+ ce_protect(ctype);
+ if (atomic_read(&ctype->ce_nr_users) != 1) {
+ // Some engine is acive, deregister it first.
+ ce_release(ctype);
return (-EBUSY);
}
if (!(((ecbs->classify) && (ecbs->class_delete)) || (ecbs->notify)) ||
(ecbs->c_interest && ecbs->classify == NULL) ||
(ecbs->n_interest && ecbs->notify == NULL)) {
- atomic_dec(&ctype->ce_regd);
+ ce_release(ctype);
return (-EINVAL);
}
+ /* Is any other engine registered for this classtype ? */
+ if (ctype->ce_regd) {
+ ce_release(ctype);
+ return (-EINVAL);
+ }
+
+ ctype->ce_regd = 1;
ctype->ce_callbacks = *ecbs;
set_callbacks_active(ctype);
ctype->ce_cb_active = 0;
- if (atomic_read(&ctype->ce_nr_users) > 1) {
+ if (atomic_dec_and_test(&ctype->ce_nr_users) != 1) {
// Somebody is currently using the engine, cannot deregister.
- return (-EAGAIN);
+ atomic_inc(&ctype->ce_nr_users);
+ return (-EBUSY);
}
- atomic_set(&ctype->ce_regd, 0);
+ ctype->ce_regd = 0;
memset(&ctype->ce_callbacks, 0, sizeof(ckrm_eng_callback_t));
return 0;
}
CLS_DEBUG("name %s => %p\n", name ? name : "default", dcore);
if ((dcore != clstype->default_class) && (!ckrm_is_core_valid(parent))){
- printk(KERN_DEBUG "error not a valid parent %p\n", parent);
+ printk("error not a valid parent %p\n", parent);
return -EINVAL;
}
#if 0
(void **)kmalloc(clstype->max_resid * sizeof(void *),
GFP_KERNEL);
if (dcore->res_class == NULL) {
- printk(KERN_DEBUG "error no mem\n");
+ printk("error no mem\n");
return -ENOMEM;
}
}
parent->name);
if (core->delayed) {
/* this core was marked as late */
- printk(KERN_DEBUG "class <%s> finally deleted %lu\n", core->name, jiffies);
+ printk("class <%s> finally deleted %lu\n", core->name, jiffies);
}
if (ckrm_remove_child(core) == 0) {
- printk(KERN_DEBUG "Core class removal failed. Chilren present\n");
+ printk("Core class removal failed. Chilren present\n");
}
for (i = 0; i < clstype->max_resid; i++) {
*/
read_lock(&ckrm_class_lock);
list_for_each_entry(core, &clstype->classes, clslist) {
- printk(KERN_INFO "CKRM .. create res clsobj for resouce <%s>"
+ printk("CKRM .. create res clsobj for resouce <%s>"
"class <%s> par=%p\n", rcbs->res_name,
core->name, core->hnode.parent);
ckrm_alloc_res_class(core, core->hnode.parent, resid);
}
#define ECC_PRINTK(fmt, args...) \
-// printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+// printk("%s: " fmt, __FUNCTION__ , ## args)
void ckrm_invoke_event_cb_chain(enum ckrm_event ev, void *arg)
{
void __init ckrm_init(void)
{
- printk(KERN_DEBUG "CKRM Initialization\n");
+ printk("CKRM Initialization\n");
// register/initialize the Metatypes
#endif
// prepare init_task and then rely on inheritance of properties
ckrm_cb_newtask(&init_task);
- printk(KERN_DEBUG "CKRM Initialization done\n");
+ printk("CKRM Initialization done\n");
}
EXPORT_SYMBOL(ckrm_register_engine);
#include <linux/ckrm_classqueue.h>
#include <linux/seq_file.h>
-struct ckrm_res_ctlr cpu_rcbs;
-/**
- * insert_cpu_class - insert a class to active_cpu_class list
- *
- * insert the class in decreasing order of class weight
- */
-static inline void insert_cpu_class(struct ckrm_cpu_class *cls)
-{
- list_add(&cls->links,&active_cpu_classes);
-}
+struct ckrm_res_ctlr cpu_rcbs;
/*
* initialize a class object and its local queues
*/
-void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares)
+ static void init_cpu_class(struct ckrm_cpu_class *cls,ckrm_shares_t* shares)
{
int i,j,k;
prio_array_t *array;
- ckrm_lrq_t* queue;
-
- cls->shares = *shares;
- cls->cnt_lock = SPIN_LOCK_UNLOCKED;
- ckrm_cpu_stat_init(&cls->stat);
- ckrm_usage_init(&cls->usage);
- cls->magic = CKRM_CPU_CLASS_MAGIC;
+ struct ckrm_local_runqueue* queue;
for (i = 0 ; i < NR_CPUS ; i++) {
queue = &cls->local_queues[i];
queue->top_priority = MAX_PRIO;
cq_node_init(&queue->classqueue_linkobj);
queue->local_cvt = 0;
- queue->lrq_load = 0;
- queue->local_weight = cpu_class_weight(cls);
+ queue->uncounted_cvt = 0;
queue->uncounted_ns = 0;
- queue->savings = 0;
queue->magic = 0x43FF43D7;
}
+ cls->shares = *shares;
+ cls->global_cvt = 0;
+ cls->cnt_lock = SPIN_LOCK_UNLOCKED;
+ ckrm_cpu_stat_init(&cls->stat);
+
// add to class list
write_lock(&class_list_lock);
- insert_cpu_class(cls);
+ list_add(&cls->links,&active_cpu_classes);
write_unlock(&class_list_lock);
}
static inline void set_default_share(ckrm_shares_t *shares)
{
shares->my_guarantee = 0;
- shares->total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
shares->my_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
+ shares->total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
shares->max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
- shares->cur_max_limit = 0;
+ shares->unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
+ shares->cur_max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
}
-struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core)
-{
- struct ckrm_cpu_class * cls;
- cls = ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
- if (valid_cpu_class(cls))
- return cls;
- else
- return NULL;
+struct ckrm_cpu_class * ckrm_get_cpu_class(struct ckrm_core_class *core) {
+ return ckrm_get_res_class(core, cpu_rcbs.resid, struct ckrm_cpu_class);
}
struct ckrm_cpu_class *cls;
if (! parent) /*root class*/
- cls = get_default_cpu_class();
+ cls = default_cpu_class;
else
cls = (struct ckrm_cpu_class *) kmalloc(sizeof(struct ckrm_cpu_class),GFP_ATOMIC);
cls->parent = parent;
}
} else
- printk(KERN_ERR"alloc_cpu_class failed\n");
+ printk("alloc_cpu_class failed GFP_ATOMIC\n");
return cls;
}
return;
/*the default class can't be freed*/
- if (cls == get_default_cpu_class())
+ if (cls == default_cpu_class)
return;
// Assuming there will be no children when this function is called
write_unlock(&class_list_lock);
kfree(cls);
-
- //call ckrm_cpu_monitor after class removed
- ckrm_cpu_monitor(0);
}
/*
parres = NULL;
}
- /*
- * hzheng: CKRM_SHARE_DONTCARE should be handled
- */
- if (new_share->my_guarantee == CKRM_SHARE_DONTCARE)
- new_share->my_guarantee = 0;
-
rc = set_shares(new_share, cur, par);
- if (cur->my_limit == CKRM_SHARE_DONTCARE)
- cur->my_limit = cur->max_limit;
-
spin_unlock(&cls->cnt_lock);
if (cls->parent) {
spin_unlock(&parres->cnt_lock);
}
-
- //call ckrm_cpu_monitor after changes are changed
- ckrm_cpu_monitor(0);
-
return rc;
}
+/*
+ * translate the global_CVT to ticks
+ */
static int ckrm_cpu_get_share(void *my_res,
struct ckrm_shares *shares)
{
int ckrm_cpu_get_stats(void *my_res, struct seq_file * sfile)
{
struct ckrm_cpu_class *cls = my_res;
- struct ckrm_cpu_class_stat* stat = &cls->stat;
- ckrm_lrq_t* lrq;
- int i;
if (!cls)
return -EINVAL;
seq_printf(sfile, "-------- CPU Class Status Start---------\n");
- seq_printf(sfile, "Share:\n\tgrt= %d limit= %d total_grt= %d max_limit= %d\n",
+ seq_printf(sfile, " gua= %d limit= %d\n",
cls->shares.my_guarantee,
- cls->shares.my_limit,
+ cls->shares.my_limit);
+ seq_printf(sfile, " total_gua= %d limit= %d\n",
cls->shares.total_guarantee,
cls->shares.max_limit);
- seq_printf(sfile, "\tunused_grt= %d cur_max_limit= %d\n",
+ seq_printf(sfile, " used_gua= %d cur_limit= %d\n",
cls->shares.unused_guarantee,
cls->shares.cur_max_limit);
- seq_printf(sfile, "Effective:\n\tegrt= %d\n",stat->egrt);
- seq_printf(sfile, "\tmegrt= %d\n",stat->megrt);
- seq_printf(sfile, "\tehl= %d\n",stat->ehl);
- seq_printf(sfile, "\tmehl= %d\n",stat->mehl);
- seq_printf(sfile, "\teshare= %d\n",stat->eshare);
- seq_printf(sfile, "\tmeshare= %d\n",cpu_class_weight(cls));
- seq_printf(sfile, "\tmax_demand= %lu\n",stat->max_demand);
- seq_printf(sfile, "\ttotal_ns= %llu\n",stat->total_ns);
- seq_printf(sfile, "\tusage(2,10,60)= %d %d %d\n",
- get_ckrm_usage(cls,2*HZ),
- get_ckrm_usage(cls,10*HZ),
- get_ckrm_usage(cls,60*HZ)
- );
- for_each_online_cpu(i) {
- lrq = get_ckrm_lrq(cls,i);
- seq_printf(sfile, "\tlrq %d demand= %lu weight= %d lrq_load= %lu cvt= %llu sav= %llu\n",i,stat->local_stats[i].cpu_demand,local_class_weight(lrq),lrq->lrq_load,lrq->local_cvt,lrq->savings);
- }
-
+ seq_printf(sfile, " Share= %d\n",cpu_class_weight(cls));
+ seq_printf(sfile, " cvt= %llu\n",cls->local_queues[0].local_cvt);
+ seq_printf(sfile, " total_ns= %llu\n",cls->stat.total_ns);
+ seq_printf(sfile, " prio= %d\n",cls->local_queues[0].classqueue_linkobj.prio);
+ seq_printf(sfile, " index= %d\n",cls->local_queues[0].classqueue_linkobj.index);
+ seq_printf(sfile, " run= %llu\n",cls->stat.local_stats[0].run);
+ seq_printf(sfile, " total= %llu\n",cls->stat.local_stats[0].total);
+ seq_printf(sfile, " cpu_demand= %lu\n",cls->stat.cpu_demand);
+
+ seq_printf(sfile, " effective_guarantee= %d\n",cls->stat.effective_guarantee);
+ seq_printf(sfile, " effective_limit= %d\n",cls->stat.effective_limit);
+ seq_printf(sfile, " effective_share= %d\n",cls->stat.effective_share);
seq_printf(sfile, "-------- CPU Class Status END ---------\n");
+
return 0;
}
/*
* task will remain in the same cpu but on a different local runqueue
*/
-void ckrm_cpu_change_class(void *task, void *old, void *new)
+static void ckrm_cpu_change_class(void *task, void *old, void *new)
{
struct task_struct *tsk = task;
struct ckrm_cpu_class *newcls = new;
+ unsigned long flags;
+ struct runqueue *rq;
+ prio_array_t *array;
/*sanity checking*/
if (!task || ! old || !new)
return;
- _ckrm_cpu_change_class(tsk,newcls);
+ rq = task_rq_lock(tsk,&flags);
+ array = tsk->array;
+ if (array) {
+ dequeue_task(tsk,array);
+ tsk->cpu_class = newcls;
+ enqueue_task(tsk,rq_active(tsk,rq));
+ } else {
+ tsk->cpu_class = newcls;
+ }
+ task_rq_unlock(rq,&flags);
}
/*dummy function, not used*/
if (!cls)
return -EINVAL;
- printk(KERN_DEBUG "ckrm_cpu config='%s'\n",cfgstr);
+ printk("ckrm_cpu config='%s'\n",cfgstr);
return 0;
}
struct ckrm_res_ctlr cpu_rcbs = {
- .res_name = "cpu",
+ .res_name = "CKRM CPU Class",
.res_hdepth = 1,
.resid = -1,
.res_alloc = ckrm_alloc_cpu_class,
if (resid == -1) { /*not registered */
resid = ckrm_register_res_ctlr(clstype,&cpu_rcbs);
- printk(KERN_DEBUG "........init_ckrm_sched_res , resid= %d\n",resid);
+ printk("........init_ckrm_sched_res , resid= %d\n",resid);
}
return 0;
}
//init classqueues for each processor
for (i=0; i < NR_CPUS; i++)
classqueue_init(get_cpu_classqueue(i));
-
- /*
- * hzheng: initialize the default cpu class
- * required for E14/E15 since ckrm_init is called after sched_init
- */
+/*
+ * hzheng: initialize the default cpu class
+ * required for E14 since ckrm_init is called after sched_init
+ */
ckrm_alloc_cpu_class(NULL,NULL);
}
#include <asm/div64.h>
#include <linux/ckrm_sched.h>
-#define CPU_MONITOR_INTERVAL (HZ) /*how often do we adjust the shares*/
+#define CPU_MONITOR_INTERVAL (4*HZ) /*how often do we adjust the shares*/
+#define CKRM_SHARE_ACCURACY 7
#define CKRM_SHARE_MAX (1<<CKRM_SHARE_ACCURACY)
-#define CKRM_CPU_DEMAND_RUN 0
-#define CKRM_CPU_DEMAND_SLEEP 1
-//sample task cpu demand every 64ms
-#define CPU_DEMAND_TASK_RECALC (64000000LL)
-#define CPU_DEMAND_CLASS_RECALC (256000000LL)
-#define CPU_DEMAND_TP_CLASS 0
-#define CPU_DEMAND_TP_TASK 1
-
extern struct ckrm_cpu_class *ckrm_get_cpu_class(struct ckrm_core_class *core);
-void update_ckrm_idle(unsigned long surplus);
-
-/*interface to share definition*/
-static inline int get_soft_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.my_limit;
-}
-
-static inline int get_mysoft_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.total_guarantee;
-}
-
-static inline int get_hard_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.total_guarantee;
-}
-
-static inline int get_myhard_limit(struct ckrm_cpu_class *cls)
-{
- return cls->shares.total_guarantee;
-}
-
-
-static inline void cpu_demand_stat_init(struct ckrm_cpu_demand_stat* local_stat, int type)
-{
- unsigned long long now = sched_clock();
-
- local_stat->run = 0;
- local_stat->total = 0;
- local_stat->last_sleep = now;
- switch (type) {
- case CPU_DEMAND_TP_CLASS:
- local_stat->recalc_interval = CPU_DEMAND_CLASS_RECALC;
- local_stat->cpu_demand = 0;
- break;
- case CPU_DEMAND_TP_TASK:
- local_stat->recalc_interval = CPU_DEMAND_TASK_RECALC;
- //for task, the init cpu_demand is copied from its parent
- break;
- default:
- BUG();
- }
-}
void ckrm_cpu_stat_init(struct ckrm_cpu_class_stat *stat)
{
int i;
+ struct ckrm_cpu_class_local_stat* local_stat;
+ unsigned long long now = sched_clock();
stat->stat_lock = SPIN_LOCK_UNLOCKED;
stat->total_ns = 0;
- stat->max_demand = 0;
+ stat->cpu_demand = 0;
for (i=0; i< NR_CPUS; i++) {
- cpu_demand_stat_init(&stat->local_stats[i],CPU_DEMAND_TP_CLASS);
+ local_stat = &stat->local_stats[i];
+ local_stat->run = 0;
+ local_stat->total = 0;
+ local_stat->last_sleep = now;
+ local_stat->cpu_demand = 0;
}
- stat->egrt = 0;
- stat->megrt = 0;
- stat->ehl = CKRM_SHARE_MAX; /*default: no limit*/
- stat->mehl = CKRM_SHARE_MAX; /*default: no limit */
-
- stat->eshare = CKRM_SHARE_MAX;
- stat->meshare = CKRM_SHARE_MAX;
+ stat->effective_guarantee = 0;
+ stat->effective_limit = 0;
+ stat->glut = 0;
+ stat->effective_share = 100;
+ stat->self_effective_share = 100;
}
-
/**********************************************/
/* cpu demand */
/**********************************************/
*/
/**
- * update_cpu_demand_stat -
+ * update_cpu_demand - update a state change
*
- * should be called whenever the state of a task/task local queue changes
+ * should be called whenever the state of a local queue changes
* -- when deschedule : report how much run
* -- when enqueue: report how much sleep
*
- * how often should we recalculate the cpu demand
- * the number is in ns
+ * to deal with excessive long run/sleep state
+ * -- whenever the the ckrm_cpu_monitor is called, check if the class is in sleep state, if yes, then update sleep record
*/
-static inline void update_cpu_demand_stat(struct ckrm_cpu_demand_stat* local_stat,int state, unsigned long long len)
+#define CKRM_CPU_DEMAND_RUN 0
+#define CKRM_CPU_DEMAND_SLEEP 1
+//how often should we recalculate the cpu demand, in ns
+#define CPU_DEMAND_CAL_THRESHOLD (1000000000LL)
+static inline void update_local_cpu_demand(struct ckrm_cpu_class_local_stat* local_stat,int state, unsigned long long len)
{
local_stat->total += len;
if (state == CKRM_CPU_DEMAND_RUN)
local_stat->run += len;
- if (local_stat->total >= local_stat->recalc_interval) {
+ if (local_stat->total >= CPU_DEMAND_CAL_THRESHOLD) {
local_stat->total >>= CKRM_SHARE_ACCURACY;
- if (unlikely(local_stat->run > 0xFFFFFFFF))
- local_stat->run = 0xFFFFFFFF;
-
- if (local_stat->total > 0xFFFFFFFF)
+ if (local_stat->total > 0xFFFFFFFF)
local_stat->total = 0xFFFFFFFF;
-
- do_div(local_stat->run,(unsigned long)local_stat->total);
- if (local_stat->total > 0xFFFFFFFF) //happens after very long sleep
- local_stat->cpu_demand = local_stat->run;
- else {
- local_stat->cpu_demand += local_stat->run;
- local_stat->cpu_demand >>= 1;
- }
+ do_div(local_stat->run,(unsigned long)local_stat->total);
+ local_stat->cpu_demand +=local_stat->run;
+ local_stat->cpu_demand >>= 1;
local_stat->total = 0;
local_stat->run = 0;
}
}
+static inline void cpu_demand_update_run(struct ckrm_cpu_class_local_stat* local_stat, unsigned long long len)
+{
+ update_local_cpu_demand(local_stat,CKRM_CPU_DEMAND_RUN,len);
+}
+
+static inline void cpu_demand_update_sleep(struct ckrm_cpu_class_local_stat* local_stat, unsigned long long len)
+{
+ update_local_cpu_demand(local_stat,CKRM_CPU_DEMAND_SLEEP,len);
+}
+
+#define CPU_DEMAND_ENQUEUE 0
+#define CPU_DEMAND_DEQUEUE 1
+#define CPU_DEMAND_DESCHEDULE 2
+
/**
* cpu_demand_event - and cpu_demand event occured
* @event: one of the following three events:
* CPU_DEMAND_DESCHEDULE: one task belong a certain local class deschedule
* @len: valid only for CPU_DEMAND_DESCHEDULE, how long the task has been run
*/
-void cpu_demand_event(struct ckrm_cpu_demand_stat* local_stat, int event, unsigned long long len)
+void cpu_demand_event(struct ckrm_cpu_class_local_stat* local_stat, int event, unsigned long long len)
{
switch (event) {
case CPU_DEMAND_ENQUEUE:
len = sched_clock() - local_stat->last_sleep;
local_stat->last_sleep = 0;
- update_cpu_demand_stat(local_stat,CKRM_CPU_DEMAND_SLEEP,len);
+ cpu_demand_update_sleep(local_stat,len);
break;
case CPU_DEMAND_DEQUEUE:
- if (! local_stat->last_sleep) {
- local_stat->last_sleep = sched_clock();
- }
+ local_stat->last_sleep = sched_clock();
break;
case CPU_DEMAND_DESCHEDULE:
- update_cpu_demand_stat(local_stat,CKRM_CPU_DEMAND_RUN,len);
- break;
- case CPU_DEMAND_INIT: //for task init only
- cpu_demand_stat_init(local_stat,CPU_DEMAND_TP_TASK);
+ cpu_demand_update_run(local_stat,len);
break;
default:
BUG();
/**
* check all the class local queue
- *
- * to deal with excessive long run/sleep state
- * -- whenever the the ckrm_cpu_monitor is called, check if the class is in sleep state, if yes, then update sleep record
+ * if local queueu is not in runqueue, then it's in sleep state
+ * if compare to last sleep,
*/
static inline void cpu_demand_check_sleep(struct ckrm_cpu_class_stat *stat, int cpu)
{
- struct ckrm_cpu_demand_stat * local_stat = &stat->local_stats[cpu];
+ struct ckrm_cpu_class_local_stat * local_stat = &stat->local_stats[cpu];
unsigned long long sleep,now;
if (local_stat->last_sleep) {
now = sched_clock();
sleep = now - local_stat->last_sleep;
local_stat->last_sleep = now;
- update_cpu_demand_stat(local_stat,CKRM_CPU_DEMAND_SLEEP,sleep);
+ cpu_demand_update_sleep(local_stat,sleep);
}
}
*
* self_cpu_demand = sum(cpu demand of all local queues)
*/
-static inline unsigned long get_self_cpu_demand(struct ckrm_cpu_class_stat *stat)
+static unsigned long get_self_cpu_demand(struct ckrm_cpu_class_stat
+ *stat)
{
int cpu_demand = 0;
int i;
- int cpuonline = 0;
for_each_online_cpu(i) {
cpu_demand_check_sleep(stat,i);
cpu_demand += stat->local_stats[i].cpu_demand;
- cpuonline ++;
}
- return (cpu_demand/cpuonline);
+ if (cpu_demand > CKRM_SHARE_MAX)
+ cpu_demand = CKRM_SHARE_MAX;
+ return cpu_demand;
}
/*
- * my max demand = min(cpu_demand, my effective hard limit)
+ * update effective cpu demand for each class
+ * assume the root_core->parent == NULL
*/
-static inline unsigned long get_mmax_demand(struct ckrm_cpu_class_stat* stat)
-{
- unsigned long mmax_demand = get_self_cpu_demand(stat);
- if (mmax_demand > stat->mehl)
- mmax_demand = stat->mehl;
-
- return mmax_demand;
-}
-
-/**
- * update_max_demand: update effective cpu demand for each class
- * return -1 on error
- *
- * Assume: the root_core->parent == NULL
- */
-static int update_max_demand(struct ckrm_core_class *root_core)
+static void update_cpu_demand(struct ckrm_core_class *root_core)
{
struct ckrm_core_class *cur_core, *child_core;
- struct ckrm_cpu_class *cls,*c_cls;
- int ret = -1;
+ struct ckrm_cpu_class *cls;
cur_core = root_core;
child_core = NULL;
-
- repeat:
- if (!cur_core) { //normal exit
- ret = 0;
- goto out;
- }
+ /*
+ * iterate the tree
+ * update cpu_demand of each node
+ */
+ repeat:
+ if (!cur_core)
+ return;
cls = ckrm_get_cpu_class(cur_core);
- if (! cls) //invalid c_cls, abort
- goto out;
-
if (!child_core) //first child
- cls->stat.max_demand = get_mmax_demand(&cls->stat);
+ cls->stat.cpu_demand = get_self_cpu_demand(&cls->stat);
else {
- c_cls = ckrm_get_cpu_class(child_core);
- if (c_cls)
- cls->stat.max_demand += c_cls->stat.max_demand;
- else //invalid c_cls, abort
- goto out;
+ cls->stat.cpu_demand +=
+ ckrm_get_cpu_class(child_core)->stat.cpu_demand;
+ if (cls->stat.cpu_demand > CKRM_SHARE_MAX)
+ cls->stat.cpu_demand = CKRM_SHARE_MAX;
}
- //check class hard limit
- if (cls->stat.max_demand > cls->stat.ehl)
- cls->stat.max_demand = cls->stat.ehl;
-
//next child
child_core = ckrm_get_next_child(cur_core, child_core);
if (child_core) {
cur_core = child_core->hnode.parent;
}
goto repeat;
- out:
- return ret;
}
/**********************************************/
/* effective guarantee & limit */
/**********************************************/
-static inline void set_eshare(struct ckrm_cpu_class_stat *stat,
+static inline void set_effective_share(struct ckrm_cpu_class_stat *stat,
int new_share)
{
if (!new_share)
new_share = 1;
-
- BUG_ON(new_share < 0);
- stat->eshare = new_share;
+ stat->effective_share = new_share;
}
-static inline void set_meshare(struct ckrm_cpu_class_stat *stat,
+static inline void set_self_effective_share(struct ckrm_cpu_class_stat *stat,
int new_share)
{
if (!new_share)
new_share = 1;
-
- BUG_ON(new_share < 0);
- stat->meshare = new_share;
+ stat->self_effective_share = new_share;
}
-/**
- *update_child_effective - update egrt, ehl, mehl for all children of parent
- *@parent: the parent node
- *return -1 if anything wrong
- *
- */
-static int update_child_effective(struct ckrm_core_class *parent)
+static inline void update_child_effective(struct ckrm_core_class *parent)
{
struct ckrm_cpu_class *p_cls = ckrm_get_cpu_class(parent);
- struct ckrm_core_class *child_core;
- int ret = -1;
-
- if (! p_cls)
- return ret;
+ struct ckrm_core_class *child_core = ckrm_get_next_child(parent, NULL);
- child_core = ckrm_get_next_child(parent, NULL);
while (child_core) {
struct ckrm_cpu_class *c_cls = ckrm_get_cpu_class(child_core);
- if (! c_cls)
- return ret;
- c_cls->stat.egrt =
- p_cls->stat.egrt *
+ c_cls->stat.effective_guarantee =
+ p_cls->stat.effective_guarantee *
c_cls->shares.my_guarantee / p_cls->shares.total_guarantee;
-
- c_cls->stat.megrt = c_cls->stat.egrt * c_cls->shares.unused_guarantee
- / c_cls->shares.total_guarantee;
-
- c_cls->stat.ehl =
- p_cls->stat.ehl *
- get_hard_limit(c_cls) / p_cls->shares.total_guarantee;
-
- c_cls->stat.mehl =
- c_cls->stat.ehl *
- get_myhard_limit(c_cls) / c_cls->shares.total_guarantee;
-
- set_eshare(&c_cls->stat,c_cls->stat.egrt);
- set_meshare(&c_cls->stat,c_cls->stat.megrt);
-
+ c_cls->stat.effective_limit =
+ p_cls->stat.effective_guarantee * c_cls->shares.my_limit /
+ p_cls->shares.total_guarantee;
child_core = ckrm_get_next_child(parent, child_core);
};
- return 0;
+
}
-/**
- * update_effectives: update egrt, ehl, mehl for the whole tree
+/*
+ * update effective guarantee and effective limit
+ * -- effective share = parent->effective->share * share/parent->total_share
+ * -- effective limit = parent->effective->share * limit/parent->total_share
* should be called only when class structure changed
- *
- * return -1 if anything wrong happened (eg: the structure changed during the process)
*/
-static int update_effectives(struct ckrm_core_class *root_core)
+static void update_effective_guarantee_limit(struct ckrm_core_class *root_core)
{
- struct ckrm_core_class *cur_core, *child_core;
+ struct ckrm_core_class *cur_core, *child_core = NULL;
struct ckrm_cpu_class *cls;
- int ret = -1;
cur_core = root_core;
- child_core = NULL;
cls = ckrm_get_cpu_class(cur_core);
+ cls->stat.effective_guarantee = CKRM_SHARE_MAX;
+ cls->stat.effective_limit = cls->stat.effective_guarantee;
- //initialize the effectives for root
- cls->stat.egrt = CKRM_SHARE_MAX; /*egrt of the root is always 100% */
- cls->stat.megrt = cls->stat.egrt * cls->shares.unused_guarantee
- / cls->shares.total_guarantee;
- cls->stat.ehl = CKRM_SHARE_MAX * get_hard_limit(cls)
- / cls->shares.total_guarantee;
- cls->stat.mehl = cls->stat.ehl * get_myhard_limit(cls)
- / cls->shares.total_guarantee;
- set_eshare(&cls->stat,cls->stat.egrt);
- set_meshare(&cls->stat,cls->stat.megrt);
-
- repeat:
+ repeat:
//check exit
if (!cur_core)
- return 0;
+ return;
- //visit this node only once
- if (! child_core)
- if (update_child_effective(cur_core) < 0)
- return ret; //invalid cur_core node
-
+ //visit this node
+ update_child_effective(cur_core);
//next child
child_core = ckrm_get_next_child(cur_core, child_core);
-
if (child_core) {
- //go down to the next hier
+ //go down
cur_core = child_core;
child_core = NULL;
- } else { //no more child, go back
+ goto repeat;
+ } else { //no more child, go back
child_core = cur_core;
cur_core = child_core->hnode.parent;
}
/**********************************************/
/*
- * surplus = egrt - demand
+ * surplus = my_effective_share - demand
* if surplus < 0, surplus = 0
*/
static inline int get_node_surplus(struct ckrm_cpu_class *cls)
{
- int surplus = cls->stat.egrt - cls->stat.max_demand;
+ int surplus = cls->stat.effective_guarantee - cls->stat.cpu_demand;
if (surplus < 0)
surplus = 0;
return surplus;
}
-static inline int get_my_node_surplus(struct ckrm_cpu_class *cls)
-{
- int surplus = cls->stat.megrt - get_mmax_demand(&cls->stat);
-
- if (surplus < 0)
- surplus = 0;
-
- return surplus;
-}
-
-/**
- * consume_surplus: decides how much surplus a node can consume
- * @ckeck_sl: if check_sl is set, then check soft_limitx
+/*
+ * consume the surplus
* return how much consumed
- *
- * implements all the CKRM Scheduling Requirement
- * assume c_cls is valid
+ * set glut when necessary
*/
-static inline int consume_surplus(int surplus,
- struct ckrm_cpu_class *c_cls,
- struct ckrm_cpu_class *p_cls,
- int check_sl
- )
+static inline int node_surplus_consume(int old_surplus,
+ struct ckrm_core_class *child_core,
+ struct ckrm_cpu_class *p_cls)
{
int consumed = 0;
int inc_limit;
- int total_grt = p_cls->shares.total_guarantee;
- BUG_ON(surplus < 0);
+ struct ckrm_cpu_class *c_cls = ckrm_get_cpu_class(child_core);
- /*can't consume more than demand or hard limit*/
- if (c_cls->stat.eshare >= c_cls->stat.max_demand)
+ if (c_cls->stat.glut)
goto out;
- //the surplus allocation is propotional to grt
- consumed =
- surplus * c_cls->shares.my_guarantee / total_grt;
-
- if (! consumed) //no more share
+ //check demand
+ if (c_cls->stat.effective_share >= c_cls->stat.cpu_demand) {
+ c_cls->stat.glut = 1;
goto out;
-
- //hard limit and demand limit
- inc_limit = c_cls->stat.max_demand - c_cls->stat.eshare;
-
- if (check_sl) {
- int esl = p_cls->stat.eshare * get_soft_limit(c_cls)
- /total_grt;
- if (esl < c_cls->stat.max_demand)
- inc_limit = esl - c_cls->stat.eshare;
}
- if (consumed > inc_limit)
- consumed = inc_limit;
-
- BUG_ON(consumed < 0);
- out:
- return consumed;
-}
-
-/*
- * how much a node can consume for itself?
- */
-static inline int consume_self_surplus(int surplus,
- struct ckrm_cpu_class *p_cls,
- int check_sl
- )
-{
- int consumed = 0;
- int inc_limit;
- int total_grt = p_cls->shares.total_guarantee;
- int max_demand = get_mmax_demand(&p_cls->stat);
-
- BUG_ON(surplus < 0);
-
- /*can't consume more than demand or hard limit*/
- if (p_cls->stat.meshare >= max_demand)
- goto out;
-
- //the surplus allocation is propotional to grt
consumed =
- surplus * p_cls->shares.unused_guarantee / total_grt;
-
- if (! consumed) //no more share
- goto out;
-
- //hard limit and demand limit
- inc_limit = max_demand - p_cls->stat.meshare;
+ old_surplus * c_cls->shares.my_guarantee /
+ p_cls->shares.total_guarantee;
- if (check_sl) {
- int mesl = p_cls->stat.eshare * get_mysoft_limit(p_cls)
- /total_grt;
- if (mesl < max_demand)
- inc_limit = mesl - p_cls->stat.meshare;
- }
-
- if (consumed > inc_limit)
+ //check limit
+ inc_limit = c_cls->stat.effective_limit - c_cls->stat.effective_share;
+ if (inc_limit <= consumed) {
+ c_cls->stat.glut = 1;
consumed = inc_limit;
+ }
- BUG_ON(consumed < 0);
- out:
+ c_cls->stat.effective_share += consumed;
+ out:
return consumed;
}
-
/*
- * allocate surplus to all its children and also its default class
- */
-static int alloc_surplus_single_round(
- int surplus,
- struct ckrm_core_class *parent,
- struct ckrm_cpu_class *p_cls,
- int check_sl)
-{
- struct ckrm_cpu_class *c_cls;
- struct ckrm_core_class *child_core = NULL;
- int total_consumed = 0,consumed;
-
- //first allocate to the default class
- consumed =
- consume_self_surplus(surplus,p_cls,check_sl);
-
- if (consumed > 0) {
- set_meshare(&p_cls->stat,p_cls->stat.meshare + consumed);
- total_consumed += consumed;
- }
-
- do {
- child_core = ckrm_get_next_child(parent, child_core);
- if (child_core) {
- c_cls = ckrm_get_cpu_class(child_core);
- if (! c_cls)
- return -1;
-
- consumed =
- consume_surplus(surplus, c_cls,
- p_cls,check_sl);
- if (consumed > 0) {
- set_eshare(&c_cls->stat,c_cls->stat.eshare + consumed);
- total_consumed += consumed;
- }
- }
- } while (child_core);
-
- return total_consumed;
-}
-
-/**
- * alloc_surplus_node: re-allocate the shares for children under parent
- * @parent: parent node
- * return the remaining surplus
- *
+ * re-allocate the shares for all the childs under this node
* task:
* 1. get total surplus
* 2. allocate surplus
* 3. set the effective_share of each node
*/
-static int alloc_surplus_node(struct ckrm_core_class *parent)
+static void alloc_surplus_node(struct ckrm_core_class *parent)
{
- struct ckrm_cpu_class *p_cls,*c_cls;
- int total_surplus,consumed;
- int check_sl;
- int ret = -1;
+ int total_surplus = 0, old_surplus = 0;
+ struct ckrm_cpu_class *p_cls = ckrm_get_cpu_class(parent);
struct ckrm_core_class *child_core = NULL;
-
- p_cls = ckrm_get_cpu_class(parent);
- if (! p_cls)
- goto realloc_out;
+ int self_share;
/*
- * get total surplus
+ * calculate surplus
+ * total_surplus = sum(child_surplus)
+ * reset glut flag
+ * initialize effective_share
*/
- total_surplus = p_cls->stat.eshare - p_cls->stat.egrt;
- BUG_ON(total_surplus < 0);
- total_surplus += get_my_node_surplus(p_cls);
-
do {
child_core = ckrm_get_next_child(parent, child_core);
if (child_core) {
- c_cls = ckrm_get_cpu_class(child_core);
- if (! c_cls)
- goto realloc_out;
+ struct ckrm_cpu_class *c_cls =
+ ckrm_get_cpu_class(child_core);
+ ckrm_stat_t *stat = &c_cls->stat;
total_surplus += get_node_surplus(c_cls);
+ stat->glut = 0;
+ set_effective_share(stat, stat->effective_guarantee);
}
} while (child_core);
-
- if (! total_surplus) {
- ret = 0;
- goto realloc_out;
- }
-
- /*
- * distributing the surplus
- * first with the check_sl enabled
- * once all the tasks has research the soft limit, disable check_sl and try again
- */
-
- check_sl = 1;
+ /*distribute the surplus */
+ child_core = NULL;
do {
- consumed = alloc_surplus_single_round(total_surplus,parent,p_cls,check_sl);
- if (consumed < 0) //something is wrong
- goto realloc_out;
+ if (!child_core) //keep the surplus of last round
+ old_surplus = total_surplus;
- if (! consumed)
- check_sl = 0;
- else
- total_surplus -= consumed;
+ child_core = ckrm_get_next_child(parent, child_core);
+ if (child_core) {
+ total_surplus -=
+ node_surplus_consume(old_surplus, child_core,
+ p_cls);
+ }
+ //start a new round if something is allocated in the last round
+ } while (child_core || (total_surplus != old_surplus));
- } while ((total_surplus > 0) && (consumed || check_sl) );
+ //any remaining surplus goes to the default class
+ self_share = p_cls->stat.effective_share *
+ p_cls->shares.unused_guarantee / p_cls->shares.total_guarantee;
+ self_share += total_surplus;
- ret = 0;
-
- realloc_out:
- return ret;
+ set_self_effective_share(&p_cls->stat, self_share);
}
/**
* alloc_surplus - reallocate unused shares
*
* class A's usused share should be allocated to its siblings
- * the re-allocation goes downward from the top
*/
-static int alloc_surplus(struct ckrm_core_class *root_core)
+static void alloc_surplus(struct ckrm_core_class *root_core)
{
- struct ckrm_core_class *cur_core, *child_core;
- // struct ckrm_cpu_class *cls;
- int ret = -1;
+ struct ckrm_core_class *cur_core, *child_core = NULL;
+ struct ckrm_cpu_class *cls;
- /*initialize*/
cur_core = root_core;
- child_core = NULL;
- // cls = ckrm_get_cpu_class(cur_core);
-
- /*the ckrm idle tasks get all what's remaining*/
- /*hzheng: uncomment the following like for hard limit support */
- // update_ckrm_idle(CKRM_SHARE_MAX - cls->stat.max_demand);
-
- repeat:
+ cls = ckrm_get_cpu_class(cur_core);
+ cls->stat.glut = 0;
+ set_effective_share(&cls->stat, cls->stat.effective_guarantee);
+ repeat:
//check exit
if (!cur_core)
- return 0;
-
- //visit this node only once
- if (! child_core)
- if ( alloc_surplus_node(cur_core) < 0 )
- return ret;
+ return;
+ //visit this node
+ alloc_surplus_node(cur_core);
//next child
child_core = ckrm_get_next_child(cur_core, child_core);
if (child_core) {
goto repeat;
}
-/**********************************************/
-/* CKRM Idle Tasks */
-/**********************************************/
-struct ckrm_cpu_class ckrm_idle_class_obj, *ckrm_idle_class;
-struct task_struct* ckrm_idle_tasks[NR_CPUS];
-
-/*how many ckrm idle tasks should I wakeup*/
-static inline int get_nr_idle(unsigned long surplus)
-{
- int cpu_online = cpus_weight(cpu_online_map);
- int nr_idle = 0;
-
- nr_idle = surplus * cpu_online;
- nr_idle >>= CKRM_SHARE_ACCURACY;
-
- if (surplus)
- nr_idle ++;
-
- if (nr_idle > cpu_online)
- nr_idle = cpu_online;
-
- return nr_idle;
-}
-
-/**
- * update_ckrm_idle: update the status of the idle class according to the new surplus
- * surplus: new system surplus
- *
- * Task:
- * -- update share of the idle class
- * -- wakeup idle tasks according to surplus
- */
-void update_ckrm_idle(unsigned long surplus)
-{
- int nr_idle = get_nr_idle(surplus);
- int i;
- struct task_struct* idle_task;
-
- set_eshare(&ckrm_idle_class->stat,surplus);
- set_meshare(&ckrm_idle_class->stat,surplus);
- /*wake up nr_idle idle tasks*/
- for_each_online_cpu(i) {
- idle_task = ckrm_idle_tasks[i];
- if (unlikely(idle_task->cpu_class != ckrm_idle_class)) {
- ckrm_cpu_change_class(idle_task,
- idle_task->cpu_class,
- ckrm_idle_class);
- }
- if (! idle_task)
- continue;
- if (i < nr_idle) {
- //activate it
- wake_up_process(idle_task);
- } else {
- //deactivate it
- idle_task->state = TASK_INTERRUPTIBLE;
- set_tsk_need_resched(idle_task);
- }
- }
-}
-
-static int ckrm_cpu_idled(void *nothing)
-{
- set_user_nice(current,19);
- daemonize("ckrm_idle_task");
-
- //deactivate it, it will be awakened by ckrm_cpu_monitor
- current->state = TASK_INTERRUPTIBLE;
- schedule();
-
- /*similar to cpu_idle */
- while (1) {
- while (!need_resched()) {
- ckrm_cpu_monitor(1);
- if (current_cpu_data.hlt_works_ok) {
- local_irq_disable();
- if (!need_resched()) {
- set_tsk_need_resched(current);
- safe_halt();
- } else
- local_irq_enable();
- }
- }
- schedule();
- }
- return 0;
-}
-
-/**
- * ckrm_start_ckrm_idle:
- * create the ckrm_idle_class and starts the idle tasks
- *
- */
-void ckrm_start_ckrm_idle(void)
-{
- int i;
- int ret;
- ckrm_shares_t shares;
-
- ckrm_idle_class = &ckrm_idle_class_obj;
- memset(ckrm_idle_class,0,sizeof(shares));
- /*don't care about the shares */
- init_cpu_class(ckrm_idle_class,&shares);
- printk(KERN_INFO"ckrm idle class %x created\n",(int)ckrm_idle_class);
-
- for_each_online_cpu(i) {
- ret = kernel_thread(ckrm_cpu_idled, 0, CLONE_KERNEL);
-
- /*warn on error, but the system should still work without it*/
- if (ret < 0)
- printk(KERN_ERR"Warn: can't start ckrm idle tasks\n");
- else {
- ckrm_idle_tasks[i] = find_task_by_pid(ret);
- if (!ckrm_idle_tasks[i])
- printk(KERN_ERR"Warn: can't find ckrm idle tasks %d\n",ret);
- }
- }
-}
-
-/**********************************************/
-/* Local Weight */
-/**********************************************/
-/**
- * adjust_class_local_weight: adjust the local weight for each cpu
- *
- * lrq->weight = lpr->pressure * class->weight / total_pressure
- */
-static void adjust_lrq_weight(struct ckrm_cpu_class *clsptr, int cpu_online)
-{
- unsigned long total_pressure = 0;
- ckrm_lrq_t* lrq;
- int i;
- unsigned long class_weight;
- unsigned long long lw;
-
- //get total pressure
- for_each_online_cpu(i) {
- lrq = get_ckrm_lrq(clsptr,i);
- total_pressure += lrq->lrq_load;
- }
-
- if (! total_pressure)
- return;
-
- class_weight = cpu_class_weight(clsptr) * cpu_online;
-
- /*
- * update weight for each cpu, minimun is 1
- */
- for_each_online_cpu(i) {
- lrq = get_ckrm_lrq(clsptr,i);
- if (! lrq->lrq_load)
- /*give idle class a high share to boost interactiveness */
- lw = cpu_class_weight(clsptr);
- else {
- lw = lrq->lrq_load * class_weight;
- do_div(lw,total_pressure);
- if (!lw)
- lw = 1;
- else if (lw > CKRM_SHARE_MAX)
- lw = CKRM_SHARE_MAX;
- }
-
- lrq->local_weight = lw;
- }
-}
-
-/*
- * assume called with class_list_lock read lock held
- */
-void adjust_local_weight(void)
-{
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- struct ckrm_cpu_class *clsptr;
- int cpu_online;
-
- //do nothing if someone already holding the lock
- if (! spin_trylock(&lock))
- return;
-
- cpu_online = cpus_weight(cpu_online_map);
-
- //class status: demand, share,total_ns prio, index
- list_for_each_entry(clsptr,&active_cpu_classes,links) {
- adjust_lrq_weight(clsptr,cpu_online);
- }
-
- spin_unlock(&lock);
-}
-
-/**********************************************/
-/* Main */
-/**********************************************/
/**
*ckrm_cpu_monitor - adjust relative shares of the classes based on their progress
- *@check_min: if check_min is set, the call can't be within 100ms of last call
*
* this function is called every CPU_MONITOR_INTERVAL
* it computes the cpu demand of each class
* and re-allocate the un-used shares to other classes
*/
-void ckrm_cpu_monitor(int check_min)
+void ckrm_cpu_monitor(void)
{
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
- static unsigned long long last_check = 0;
- struct ckrm_core_class *root_core = get_default_cpu_class()->core;
- unsigned long long now;
-#define MIN_CPU_MONITOR_INTERVAL 100000000UL
-
+ struct ckrm_core_class *root_core = default_cpu_class->core;
if (!root_core)
return;
- //do nothing if someone already holding the lock
- if (! spin_trylock(&lock))
- return;
-
- read_lock(&class_list_lock);
-
- now = sched_clock();
-
- //consecutive check should be at least 100ms apart
- if (check_min && ((now - last_check) < MIN_CPU_MONITOR_INTERVAL))
- goto outunlock;
-
- last_check = now;
-
- if (update_effectives(root_core) != 0)
- goto outunlock;
-
- if (update_max_demand(root_core) != 0)
- goto outunlock;
-
-#ifndef ALLOC_SURPLUS_SUPPORT
-#warning "MEF taking out alloc_surplus"
-#else
- if (alloc_surplus(root_core) != 0)
- goto outunlock;
-#endif
-
- adjust_local_weight();
-
- outunlock:
- read_unlock(&class_list_lock);
- spin_unlock(&lock);
+ update_effective_guarantee_limit(root_core);
+ update_cpu_demand(root_core);
+ alloc_surplus(root_core);
}
/*****************************************************/
static int ckrm_cpu_monitord(void *nothing)
{
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+
daemonize("ckrm_cpu_ctrld");
for (;;) {
/*sleep for sometime before next try*/
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(CPU_MONITOR_INTERVAL);
- ckrm_cpu_monitor(1);
+ interruptible_sleep_on_timeout(&wait, CPU_MONITOR_INTERVAL);
+ ckrm_cpu_monitor();
if (thread_exit) {
break;
}
}
cpu_monitor_pid = -1;
thread_exit = 2;
- printk(KERN_DEBUG "cpu_monitord exit\n");
+ printk("cpu_monitord exit\n");
return 0;
}
{
cpu_monitor_pid = kernel_thread(ckrm_cpu_monitord, 0, CLONE_KERNEL);
if (cpu_monitor_pid < 0) {
- printk(KERN_DEBUG "ckrm_cpu_monitord for failed\n");
+ printk("ckrm_cpu_monitord for failed\n");
}
}
void ckrm_kill_monitor(void)
{
- printk(KERN_DEBUG "killing process %d\n", cpu_monitor_pid);
+ wait_queue_head_t wait;
+ int interval = HZ;
+ init_waitqueue_head(&wait);
+
+ printk("killing process %d\n", cpu_monitor_pid);
if (cpu_monitor_pid > 0) {
thread_exit = 1;
while (thread_exit != 2) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(CPU_MONITOR_INTERVAL);
+ interruptible_sleep_on_timeout(&wait, interval);
}
}
}
int ckrm_cpu_monitor_init(void)
{
ckrm_start_monitor();
- /*hzheng: uncomment the following like for hard limit support */
- // ckrm_start_ckrm_idle();
return 0;
}
+++ /dev/null
-/* ckrm_socketaq.c - accept queue resource controller
- *
- * Copyright (C) Vivek Kashyap, IBM Corp. 2004
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/* Changes
- * Initial version
- */
-
-/* Code Description: TBD
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/errno.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/ckrm.h>
-#include <linux/ckrm_rc.h>
-#include <net/tcp.h>
-
-#include <linux/ckrm_net.h>
-
-#define hnode_2_core(ptr) \
- ((ptr) ? container_of(ptr, struct ckrm_core_class, hnode) : NULL)
-
-#define CKRM_SAQ_MAX_DEPTH 3 // 0 => /rcfs
- // 1 => socket_aq
- // 2 => socket_aq/listen_class
- // 3 => socket_aq/listen_class/accept_queues
- // 4 => Not allowed
-
-typedef struct ckrm_laq_res {
- spinlock_t reslock;
- atomic_t refcnt;
- struct ckrm_shares shares;
- struct ckrm_core_class *core;
- struct ckrm_core_class *pcore;
- int my_depth;
- int my_id;
- unsigned int min_ratio;
-} ckrm_laq_res_t;
-
-static int my_resid = -1;
-
-extern struct ckrm_core_class *rcfs_create_under_netroot(char *, int, int);
-extern struct ckrm_core_class *rcfs_make_core(struct dentry *,
- struct ckrm_core_class *);
-
-void laq_res_hold(struct ckrm_laq_res *res)
-{
- atomic_inc(&res->refcnt);
- return;
-}
-
-void laq_res_put(struct ckrm_laq_res *res)
-{
- if (atomic_dec_and_test(&res->refcnt))
- kfree(res);
- return;
-}
-
-/* Initialize rescls values
- */
-static void laq_res_initcls(void *my_res)
-{
- ckrm_laq_res_t *res = my_res;
-
- res->shares.my_guarantee = CKRM_SHARE_DONTCARE;
- res->shares.my_limit = CKRM_SHARE_DONTCARE;
- res->shares.total_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- res->shares.max_limit = CKRM_SHARE_DFLT_MAX_LIMIT;
- res->shares.unused_guarantee = CKRM_SHARE_DFLT_TOTAL_GUARANTEE;
- res->shares.cur_max_limit = 0;
-}
-
-static int atoi(char *s)
-{
- int k = 0;
- while (*s)
- k = *s++ - '0' + (k * 10);
- return k;
-}
-
-static char *laq_get_name(struct ckrm_core_class *c)
-{
- char *p = (char *)c->name;
-
- while (*p)
- p++;
- while (*p != '/' && p != c->name)
- p--;
-
- return ++p;
-}
-
-static void *laq_res_alloc(struct ckrm_core_class *core,
- struct ckrm_core_class *parent)
-{
- ckrm_laq_res_t *res, *pres;
- int pdepth;
-
- if (parent)
- pres = ckrm_get_res_class(parent, my_resid, ckrm_laq_res_t);
- else
- pres = NULL;
-
- if (core == core->classtype->default_class)
- pdepth = 1;
- else {
- if (!parent)
- return NULL;
- pdepth = 1 + pres->my_depth;
- }
-
- res = kmalloc(sizeof(ckrm_laq_res_t), GFP_ATOMIC);
- if (res) {
- memset(res, 0, sizeof(res));
- spin_lock_init(&res->reslock);
- laq_res_hold(res);
- res->my_depth = pdepth;
- if (pdepth == 2) // listen class
- res->my_id = 0;
- else if (pdepth == 3)
- res->my_id = atoi(laq_get_name(core));
- res->core = core;
- res->pcore = parent;
-
- // rescls in place, now initialize contents other than
- // hierarchy pointers
- laq_res_initcls(res); // acts as initialising value
- }
-
- return res;
-}
-
-static void laq_res_free(void *my_res)
-{
- ckrm_laq_res_t *res = (ckrm_laq_res_t *) my_res;
- ckrm_laq_res_t *parent;
-
- if (!res)
- return;
-
- if (res->my_depth != 3) {
- kfree(res);
- return;
- }
-
- parent = ckrm_get_res_class(res->pcore, my_resid, ckrm_laq_res_t);
- if (!parent) // Should never happen
- return;
-
- spin_lock(&parent->reslock);
- spin_lock(&res->reslock);
-
- // return child's guarantee to parent node
- // Limits have no meaning for accept queue control
- child_guarantee_changed(&parent->shares, res->shares.my_guarantee, 0);
-
- spin_unlock(&res->reslock);
- laq_res_put(res);
- spin_unlock(&parent->reslock);
- return;
-}
-
-/**************************************************************************
- * SHARES ***
- **************************************************************************/
-
-void laq_set_aq_value(struct ckrm_net_struct *ns, unsigned int *aq_ratio)
-{
- int i;
- struct tcp_opt *tp;
-
- tp = tcp_sk(ns->ns_sk);
- for (i = 0; i < NUM_ACCEPT_QUEUES; i++)
- tp->acceptq[i].aq_ratio = aq_ratio[i];
- return;
-}
-void laq_set_aq_values(ckrm_laq_res_t * parent, unsigned int *aq_ratio)
-{
-
- struct ckrm_net_struct *ns;
- struct ckrm_core_class *core = parent->core;
-
- class_lock(core);
- list_for_each_entry(ns, &core->objlist, ckrm_link) {
- laq_set_aq_value(ns, aq_ratio);
- }
- class_unlock(core);
- return;
-}
-
-static void calculate_aq_ratios(ckrm_laq_res_t * res, unsigned int *aq_ratio)
-{
- struct ckrm_hnode *chnode;
- ckrm_laq_res_t *child;
- unsigned int min;
- int i;
-
- min = aq_ratio[0] = (unsigned int)res->shares.unused_guarantee;
-
- list_for_each_entry(chnode, &res->core->hnode.children, siblings) {
- child = hnode_2_core(chnode)->res_class[my_resid];
-
- aq_ratio[child->my_id] =
- (unsigned int)child->shares.my_guarantee;
- if (aq_ratio[child->my_id] == CKRM_SHARE_DONTCARE)
- aq_ratio[child->my_id] = 0;
- if (aq_ratio[child->my_id] &&
- ((unsigned int)aq_ratio[child->my_id] < min))
- min = (unsigned int)child->shares.my_guarantee;
- }
-
- if (min == 0) {
- min = 1;
- // default takes all if nothing specified
- aq_ratio[0] = 1;
- }
- res->min_ratio = min;
-
- for (i = 0; i < NUM_ACCEPT_QUEUES; i++)
- aq_ratio[i] = aq_ratio[i] / min;
-}
-
-static int laq_set_share_values(void *my_res, struct ckrm_shares *shares)
-{
- ckrm_laq_res_t *res = my_res;
- ckrm_laq_res_t *parent;
- unsigned int aq_ratio[NUM_ACCEPT_QUEUES];
- int rc = 0;
-
- if (!res)
- return -EINVAL;
-
- if (!res->pcore) {
- // something is badly wrong
- printk(KERN_ERR "socketaq internal inconsistency\n");
- return -EBADF;
- }
-
- parent = ckrm_get_res_class(res->pcore, my_resid, ckrm_laq_res_t);
- if (!parent) // socketclass does not have a share interface
- return -EINVAL;
-
- // Ensure that we ignore limit values
- shares->my_limit = CKRM_SHARE_DONTCARE;
- shares->max_limit = CKRM_SHARE_UNCHANGED;
-
- if (res->my_depth == 0) {
- printk(KERN_ERR "socketaq bad entry\n");
- return -EBADF;
- } else if (res->my_depth == 1) {
- // can't be written to. This is an internal default.
- return -EINVAL;
- } else if (res->my_depth == 2) {
- //nothin to inherit
- if (!shares->total_guarantee) {
- return -EINVAL;
- }
- parent = res;
- shares->my_guarantee = CKRM_SHARE_DONTCARE;
- } else if (res->my_depth == 3) {
- // accept queue itself.
- shares->total_guarantee = CKRM_SHARE_UNCHANGED;
- }
-
- ckrm_lock_hier(parent->pcore);
- spin_lock(&parent->reslock);
- rc = set_shares(shares, &res->shares,
- (parent == res) ? NULL : &parent->shares);
- if (rc) {
- spin_unlock(&res->reslock);
- ckrm_unlock_hier(res->pcore);
- return rc;
- }
- calculate_aq_ratios(parent, aq_ratio);
- laq_set_aq_values(parent, aq_ratio);
- spin_unlock(&parent->reslock);
- ckrm_unlock_hier(parent->pcore);
-
- return rc;
-}
-
-static int laq_get_share_values(void *my_res, struct ckrm_shares *shares)
-{
- ckrm_laq_res_t *res = my_res;
-
- if (!res)
- return -EINVAL;
- *shares = res->shares;
- return 0;
-}
-
-/**************************************************************************
- * STATS ***
- **************************************************************************/
-
-void
-laq_print_aq_stats(struct seq_file *sfile, struct tcp_acceptq_info *taq, int i)
-{
- seq_printf(sfile, "Class %d connections:\n\taccepted: %u\n\t"
- "queued: %u\n\twait_time: %u\n",
- i, taq->acceptq_count, taq->acceptq_qcount,
- jiffies_to_msecs(taq->acceptq_wait_time));
-
- if (i)
- return;
-
- for (i = 1; i < NUM_ACCEPT_QUEUES; i++) {
- taq[0].acceptq_wait_time += taq[i].acceptq_wait_time;
- taq[0].acceptq_qcount += taq[i].acceptq_qcount;
- taq[0].acceptq_count += taq[i].acceptq_count;
- }
-
- seq_printf(sfile, "Totals :\n\taccepted: %u\n\t"
- "queued: %u\n\twait_time: %u\n",
- taq->acceptq_count, taq->acceptq_qcount,
- jiffies_to_msecs(taq->acceptq_wait_time));
-
- return;
-}
-
-void
-laq_get_aq_stats(ckrm_laq_res_t * pres, ckrm_laq_res_t * mres,
- struct tcp_acceptq_info *taq)
-{
- struct ckrm_net_struct *ns;
- struct ckrm_core_class *core = pres->core;
- struct tcp_opt *tp;
- int a = mres->my_id;
- int z;
-
- if (a == 0)
- z = NUM_ACCEPT_QUEUES;
- else
- z = a + 1;
-
- // XXX Instead of holding a class_lock introduce a rw
- // lock to be write locked by listen callbacks and read locked here.
- // - VK
- class_lock(pres->core);
- list_for_each_entry(ns, &core->objlist, ckrm_link) {
- tp = tcp_sk(ns->ns_sk);
- for (; a < z; a++) {
- taq->acceptq_wait_time += tp->acceptq[a].aq_wait_time;
- taq->acceptq_qcount += tp->acceptq[a].aq_qcount;
- taq->acceptq_count += tp->acceptq[a].aq_count;
- taq++;
- }
- }
- class_unlock(pres->core);
-}
-
-static int laq_get_stats(void *my_res, struct seq_file *sfile)
-{
- ckrm_laq_res_t *res = my_res;
- ckrm_laq_res_t *parent;
- struct tcp_acceptq_info taq[NUM_ACCEPT_QUEUES];
- int rc = 0;
-
- if (!res)
- return -EINVAL;
-
- if (!res->pcore) {
- // something is badly wrong
- printk(KERN_ERR "socketaq internal inconsistency\n");
- return -EBADF;
- }
-
- parent = ckrm_get_res_class(res->pcore, my_resid, ckrm_laq_res_t);
- if (!parent) { // socketclass does not have a stat interface
- printk(KERN_ERR "socketaq internal fs inconsistency\n");
- return -EINVAL;
- }
-
- memset(taq, 0, sizeof(struct tcp_acceptq_info) * NUM_ACCEPT_QUEUES);
-
- switch (res->my_depth) {
-
- default:
- case 0:
- printk(KERN_ERR "socket class bad entry\n");
- rc = -EBADF;
- break;
-
- case 1: // can't be read from. this is internal default.
- // return -EINVAL
- rc = -EINVAL;
- break;
-
- case 2: // return the default and total
- ckrm_lock_hier(res->core); // block any deletes
- laq_get_aq_stats(res, res, &taq[0]);
- laq_print_aq_stats(sfile, &taq[0], 0);
- ckrm_unlock_hier(res->core); // block any deletes
- break;
-
- case 3:
- ckrm_lock_hier(parent->core); // block any deletes
- laq_get_aq_stats(parent, res, &taq[res->my_id]);
- laq_print_aq_stats(sfile, &taq[res->my_id], res->my_id);
- ckrm_unlock_hier(parent->core); // block any deletes
- break;
- }
-
- return rc;
-}
-
-/*
- * The network connection is reclassified to this class. Update its shares.
- * The socket lock is held.
- */
-static void laq_change_resclass(void *n, void *old, void *r)
-{
- struct ckrm_net_struct *ns = (struct ckrm_net_struct *)n;
- struct ckrm_laq_res *res = (struct ckrm_laq_res *)r;
- unsigned int aq_ratio[NUM_ACCEPT_QUEUES];
-
- if (res->my_depth != 2)
- return;
-
- // a change to my_depth == 3 ie. the accept classes cannot happen.
- // there is no target file
- if (res->my_depth == 2) { // it is one of the socket classes
- ckrm_lock_hier(res->pcore);
- // share rule: hold parent resource lock. then self.
- // However, since my_depth == 1 is a generic class it is not
- // needed here. Self lock is enough.
- spin_lock(&res->reslock);
- calculate_aq_ratios(res, aq_ratio);
- class_lock(res->pcore);
- laq_set_aq_value(ns, aq_ratio);
- class_unlock(res->pcore);
- spin_unlock(&res->reslock);
- ckrm_unlock_hier(res->pcore);
- }
-
- return;
-}
-
-struct ckrm_res_ctlr laq_rcbs = {
- .res_name = "laq",
- .resid = -1, // dynamically assigned
- .res_alloc = laq_res_alloc,
- .res_free = laq_res_free,
- .set_share_values = laq_set_share_values,
- .get_share_values = laq_get_share_values,
- .get_stats = laq_get_stats,
- .change_resclass = laq_change_resclass,
- //.res_initcls = laq_res_initcls, //HUBERTUS: unnecessary !!
-};
-
-int __init init_ckrm_laq_res(void)
-{
- struct ckrm_classtype *clstype;
- int resid;
-
- clstype = ckrm_find_classtype_by_name("socketclass");
- if (clstype == NULL) {
- printk(KERN_INFO " Unknown ckrm classtype<socketclass>");
- return -ENOENT;
- }
-
- if (my_resid == -1) {
- resid = ckrm_register_res_ctlr(clstype, &laq_rcbs);
- if (resid >= 0)
- my_resid = resid;
- printk(KERN_DEBUG "........init_ckrm_listen_aq_res -> %d\n", my_resid);
- }
- return 0;
-
-}
-
-void __exit exit_ckrm_laq_res(void)
-{
- ckrm_unregister_res_ctlr(&laq_rcbs);
- my_resid = -1;
-}
-
-module_init(init_ckrm_laq_res)
- module_exit(exit_ckrm_laq_res)
-
- MODULE_LICENSE("GPL");
static ckrm_mem_res_t *ckrm_mem_root_class;
atomic_t ckrm_mem_real_count = ATOMIC_INIT(0);
EXPORT_SYMBOL(ckrm_mem_real_count);
-static void ckrm_mem_evaluate_all_pages(void);
/* Initialize rescls values
* May be called on each rcfs unmount or as part of error recovery
res->pg_guar = CKRM_SHARE_DONTCARE;
res->pg_limit = CKRM_SHARE_DONTCARE;
- res->pg_unused = 0;
+ res->pg_unused = CKRM_SHARE_DONTCARE;
}
static void *
if (!res)
return;
- res->shares.my_guarantee = 0;
- res->shares.my_limit = 0;
- res->pg_guar = 0;
- res->pg_limit = 0;
- res->pg_unused = 0;
-
parres = ckrm_get_res_class(res->parent, mem_rcbs.resid, ckrm_mem_res_t);
+
// return child's limit/guarantee to parent node
if (parres) {
child_guarantee_changed(&parres->shares, res->shares.my_guarantee, 0);
child_maxlimit_changed_local(parres);
}
- ckrm_mem_evaluate_all_pages();
- res->core = NULL;
-
+ res->shares.my_guarantee = 0;
+ res->shares.my_limit = 0;
spin_lock(&ckrm_mem_lock);
list_del(&res->mcls_list);
spin_unlock(&ckrm_mem_lock);
mem_class_put(res);
+
return;
}
}
}
- spin_unlock(&mm->peertask_lock);
ckrm_mem_evaluate_mm(mm);
- /*
- printk("chg_cls: task <%s:%d> mm %p oldmm %s newmm %s o %s n %s\n",
- task->comm, task->pid, mm, prev_mmcls ? prev_mmcls->core->name:
- "NULL", mm->memclass ? mm->memclass->core->name : "NULL",
- o ? o->core->name: "NULL", n ? n->core->name: "NULL");
- */
+ spin_unlock(&mm->peertask_lock);
return;
}
guar = (res->pg_guar > 0) ? res->pg_guar : 0;
range = res->pg_limit - guar;
- if ((tot_usage > (guar + ((110 * range) / 100))) &&
+ if ((tot_usage > (guar + ((120 * range) / 100))) &&
(res->pg_lent > (guar + ((25 * range) / 100)))) {
set_flags_of_children(res, CLS_PARENT_OVER);
}
res->reclaim_flags |= CLS_OVER_100;
} else if (cls_usage > (guar + ((3 * range) / 4))) {
res->reclaim_flags |= CLS_OVER_75;
- } else if (cls_usage > (guar + (range / 2))) {
- res->reclaim_flags |= CLS_OVER_50;
- } else if (cls_usage > (guar + (range / 4))) {
- res->reclaim_flags |= CLS_OVER_25;
} else if (cls_usage > guar) {
res->reclaim_flags |= CLS_OVER_GUAR;
} else {
{
int i, j, mask = 0;
- if (*flags == 0) {
- *extract = 0;
+ if (*extract == 0 || *flags == 0) {
return;
}
-
if (*flags & CLS_SHRINK) {
*extract = CLS_SHRINK;
*flags = 0;
return;
}
+
i = fls(*flags);
for (j = i-1; j > 0; j--) {
}
void
-ckrm_at_limit(ckrm_mem_res_t *cls)
+ckrm_near_limit(ckrm_mem_res_t *cls)
{
-#ifndef AT_LIMIT_SUPPORT
-#warning "ckrm_at_limit disabled due to problems with memory hog tests"
-#else
struct zone *zone;
unsigned long now = jiffies;
- if (!cls || (cls->pg_limit == CKRM_SHARE_DONTCARE) ||
- ((cls->flags & MEM_AT_LIMIT) == MEM_AT_LIMIT)) {
+ if (!cls || ((cls->flags & MEM_NEAR_LIMIT) == MEM_NEAR_LIMIT)) {
return;
}
if ((cls->last_shrink + (10 * HZ)) < now) { // 10 seconds since last ?
spin_lock(&ckrm_mem_lock);
list_add(&cls->shrink_list, &ckrm_shrink_list);
spin_unlock(&ckrm_mem_lock);
- cls->flags |= MEM_AT_LIMIT;
+ cls->flags |= MEM_NEAR_LIMIT;
for_each_zone(zone) {
wakeup_kswapd(zone);
break; // only once is enough
}
-#endif // AT_LIMIT_SUPPORT
}
-static int unmapped = 0, changed = 0, unchanged = 0, maxnull = 0,
-anovma = 0, fnovma = 0;
-static void
+static int
ckrm_mem_evaluate_page_anon(struct page* page)
{
ckrm_mem_res_t* pgcls = page_class(page);
struct anon_vma *anon_vma = (struct anon_vma *) page->mapping;
struct vm_area_struct *vma;
struct mm_struct* mm;
- int v = 0;
spin_lock(&anon_vma->lock);
BUG_ON(list_empty(&anon_vma->head));
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- v++;
mm = vma->vm_mm;
if (!maxshareclass ||
ckrm_mem_share_compare(maxshareclass, mm->memclass) < 0) {
}
}
spin_unlock(&anon_vma->lock);
- if (!v)
- anovma++;
- if (!maxshareclass)
- maxnull++;
if (maxshareclass && (pgcls != maxshareclass)) {
ckrm_change_page_class(page, maxshareclass);
- changed++;
- } else
- unchanged++;
- return;
+ return 1;
+ }
+ return 0;
}
-static void
+static int
ckrm_mem_evaluate_page_file(struct page* page)
{
ckrm_mem_res_t* pgcls = page_class(page);
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct prio_tree_iter iter;
struct mm_struct* mm;
- int v = 0;
if (!mapping)
- return;
+ return 0;
if (!spin_trylock(&mapping->i_mmap_lock))
- return;
+ return 0;
while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
- v++;
mm = vma->vm_mm;
if (!maxshareclass || ckrm_mem_share_compare(maxshareclass,mm->memclass)<0)
maxshareclass = mm->memclass;
}
spin_unlock(&mapping->i_mmap_lock);
- if (!v)
- fnovma++;
- if (!maxshareclass)
- maxnull++;
-
if (maxshareclass && pgcls != maxshareclass) {
ckrm_change_page_class(page, maxshareclass);
- changed++;
- } else
- unchanged++;
- return;
+ return 1;
+ }
+ return 0;
}
-static void
+static int
ckrm_mem_evaluate_page(struct page* page)
{
+ int changed = 0;
+
if (page->mapping) {
if (PageAnon(page))
- ckrm_mem_evaluate_page_anon(page);
+ changed = ckrm_mem_evaluate_page_anon(page);
else
- ckrm_mem_evaluate_page_file(page);
- } else
- unmapped++;
- return;
-}
-
-static void
-ckrm_mem_evaluate_all_pages()
-{
- struct page *page;
- struct zone *zone;
- int active = 0, inactive = 0, cleared = 0;
- int act_cnt, inact_cnt, idx;
- ckrm_mem_res_t *res;
-
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- res->tmp_cnt = 0;
+ changed = ckrm_mem_evaluate_page_file(page);
}
- spin_unlock(&ckrm_mem_lock);
-
- for_each_zone(zone) {
- spin_lock_irq(&zone->lru_lock);
- list_for_each_entry(page, &zone->inactive_list, lru) {
- ckrm_mem_evaluate_page(page);
- active++;
- page_class(page)->tmp_cnt++;
- if (!test_bit(PG_ckrm_account, &page->flags))
- cleared++;
- }
- list_for_each_entry(page, &zone->active_list, lru) {
- ckrm_mem_evaluate_page(page);
- inactive++;
- page_class(page)->tmp_cnt++;
- if (!test_bit(PG_ckrm_account, &page->flags))
- cleared++;
- }
- spin_unlock_irq(&zone->lru_lock);
- }
- printk(KERN_DEBUG "all_pages: active %d inactive %d cleared %d\n",
- active, inactive, cleared);
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(res, &ckrm_memclass_list, mcls_list) {
- act_cnt = 0; inact_cnt = 0; idx = 0;
- for_each_zone(zone) {
- act_cnt += res->nr_active[idx];
- inact_cnt += res->nr_inactive[idx];
- idx++;
- }
- printk(KERN_DEBUG "all_pages: %s: tmp_cnt %d; act_cnt %d inact_cnt %d\n",
- res->core->name, res->tmp_cnt, act_cnt, inact_cnt);
- }
- spin_unlock(&ckrm_mem_lock);
-
- // check all mm's in the system to see which memclass they are attached
- // to.
- return;
+ return changed;
}
-static /*inline*/ int
+static inline int
class_migrate_pmd(struct mm_struct* mm, struct vm_area_struct* vma,
pmd_t* pmdir, unsigned long address, unsigned long end)
{
- pte_t *pte, *orig_pte;
+ pte_t* pte;
unsigned long pmd_end;
if (pmd_none(*pmdir))
return 0;
BUG_ON(pmd_bad(*pmdir));
- orig_pte = pte = pte_offset_map(pmdir,address);
+ pte = pte_offset_map(pmdir,address);
pmd_end = (address+PMD_SIZE)&PMD_MASK;
if (end>pmd_end)
end = pmd_end;
do {
if (pte_present(*pte)) {
- BUG_ON(mm->memclass == NULL);
- ckrm_change_page_class(pte_page(*pte), mm->memclass);
- // ckrm_mem_evaluate_page(pte_page(*pte));
+ ckrm_mem_evaluate_page(pte_page(*pte));
}
address += PAGE_SIZE;
pte++;
} while(address && (address<end));
- pte_unmap(orig_pte);
return 0;
}
-static /*inline*/ int
+static inline int
class_migrate_pgd(struct mm_struct* mm, struct vm_area_struct* vma,
pgd_t* pgdir, unsigned long address, unsigned long end)
{
return 0;
}
-static /*inline*/ int
+static inline int
class_migrate_vma(struct mm_struct* mm, struct vm_area_struct* vma)
{
pgd_t* pgdir;
maxshareclass = cls;
}
- if (maxshareclass && (mm->memclass != (void *)maxshareclass)) {
+ if (mm->memclass != (void *)maxshareclass) {
+ mem_class_get(maxshareclass);
if (mm->memclass)
mem_class_put(mm->memclass);
mm->memclass = maxshareclass;
- mem_class_get(maxshareclass);
/* Go through all VMA to migrate pages */
down_read(&mm->mmap_sem);
return;
}
+void
+ckrm_mem_evaluate_page_byadd(struct page* page, struct mm_struct* mm)
+{
+ ckrm_mem_res_t *pgcls = page_class(page);
+ ckrm_mem_res_t *chgcls = mm->memclass ? mm->memclass : GET_MEM_CLASS(current);
+
+ if (!chgcls || pgcls == chgcls)
+ return;
+
+ if (!page->mapcount) {
+ ckrm_change_page_class(page, chgcls);
+ return;
+ }
+ if (ckrm_mem_share_compare(pgcls, chgcls) < 0) {
+ ckrm_change_page_class(page, chgcls);
+ return;
+ }
+ return;
+}
+
void
ckrm_init_mm_to_task(struct mm_struct * mm, struct task_struct *task)
{
list_del_init(&task->mm_peers);
}
list_add_tail(&task->mm_peers, &mm->tasklist);
- spin_unlock(&mm->peertask_lock);
if (mm->memclass != GET_MEM_CLASS(task))
ckrm_mem_evaluate_mm(mm);
+ spin_unlock(&mm->peertask_lock);
return;
}
-int
-ckrm_memclass_valid(ckrm_mem_res_t *cls)
-{
- ckrm_mem_res_t *tmp;
-
- spin_lock(&ckrm_mem_lock);
- list_for_each_entry(tmp, &ckrm_memclass_list, mcls_list) {
- if (tmp == cls) {
- spin_unlock(&ckrm_mem_lock);
- return 1;
- }
- }
- spin_unlock(&ckrm_mem_lock);
- return 0;
-}
-
MODULE_LICENSE("GPL");
static struct ckrm_sock_class sockclass_dflt_class = {
};
-#define SOCKET_CLASS_TYPE_NAME "socketclass"
+#define SOCKET_CLASS_TYPE_NAME "socket_class"
const char *dflt_sockclass_name = SOCKET_CLASS_TYPE_NAME;
if (!options)
return -EINVAL;
- if (target == NULL) {
- unsigned long id = simple_strtol(options,NULL,0);
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (id != 0)
- return -EINVAL;
- printk(KERN_DEBUG "sock_class: reclassify all not net implemented\n");
- return 0;
- }
-
while ((p = strsep((char **)&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
void __init ckrm_meta_init_sockclass(void)
{
- printk(KERN_DEBUG "...... Initializing ClassType<%s> ........\n",
+ printk("...... Initializing ClassType<%s> ........\n",
CT_sockclass.name);
// intialize the default class
ckrm_init_core_class(&CT_sockclass, class_core(&sockclass_dflt_class),
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/errno.h>
-#include <asm/div64.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/ckrm.h>
res = ckrm_get_res_class(core, resid, ckrm_numtasks_t);
if (res == NULL)
return;
- if (unlikely(atomic_read(&res->cnt_cur_alloc) == 0)) {
- printk(KERN_WARNING "numtasks_put_ref: Trying to decrement "
- "counter below 0\n");
- return;
- }
atomic_dec(&res->cnt_cur_alloc);
if (atomic_read(&res->cnt_borrowed) > 0) {
atomic_dec(&res->cnt_borrowed);
parres = ckrm_get_res_class(res->parent, resid, ckrm_numtasks_t);
- if (unlikely(atomic_read(&res->cnt_cur_alloc) < 0)) {
- printk(KERN_WARNING "numtasks_res: counter below 0\n");
- }
- if (unlikely(atomic_read(&res->cnt_cur_alloc) > 0 ||
- atomic_read(&res->cnt_borrowed) > 0)) {
- printk(KERN_WARNING "numtasks_res_free: resource still "
- "alloc'd %p\n", res);
+ if (unlikely(atomic_read(&res->cnt_cur_alloc) != 0 ||
+ atomic_read(&res->cnt_borrowed))) {
+ printk(KERN_ERR
+ "numtasks_res_free: resource still alloc'd %p\n", res);
if ((borrowed = atomic_read(&res->cnt_borrowed)) > 0) {
for (i = 0; i < borrowed; i++) {
numtasks_put_ref_local(parres->core);
if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
res->cnt_guarantee = CKRM_SHARE_DONTCARE;
} else if (par->total_guarantee) {
- u64 temp = (u64) self->my_guarantee * parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- res->cnt_guarantee = (int) temp;
+ res->cnt_guarantee =
+ (self->my_guarantee * parres->cnt_guarantee)
+ / par->total_guarantee;
} else {
res->cnt_guarantee = 0;
}
if (parres->cnt_limit == CKRM_SHARE_DONTCARE) {
res->cnt_limit = CKRM_SHARE_DONTCARE;
} else if (par->max_limit) {
- u64 temp = (u64) self->my_limit * parres->cnt_limit;
- do_div(temp, par->max_limit);
- res->cnt_limit = (int) temp;
+ res->cnt_limit = (self->my_limit * parres->cnt_limit)
+ / par->max_limit;
} else {
res->cnt_limit = 0;
}
if (res->cnt_guarantee == CKRM_SHARE_DONTCARE) {
res->cnt_unused = CKRM_SHARE_DONTCARE;
} else if (self->total_guarantee) {
- u64 temp = (u64) self->unused_guarantee * res->cnt_guarantee;
- do_div(temp, self->total_guarantee);
- res->cnt_unused = (int) temp;
+ res->cnt_unused = (self->unused_guarantee *
+ res->cnt_guarantee) /
+ self->total_guarantee;
} else {
res->cnt_unused = 0;
}
if (parres->cnt_guarantee == CKRM_SHARE_DONTCARE) {
parres->cnt_unused = CKRM_SHARE_DONTCARE;
} else if (par->total_guarantee) {
- u64 temp = (u64) par->unused_guarantee * parres->cnt_guarantee;
- do_div(temp, par->total_guarantee);
- parres->cnt_unused = (int) temp;
+ parres->cnt_unused = (par->unused_guarantee *
+ parres->cnt_guarantee) /
+ par->total_guarantee;
} else {
parres->cnt_unused = 0;
}
#ifdef NUMTASKS_DEBUG
seq_printf(sfile,
"cur_alloc %d; borrowed %d; cnt_guar %d; cnt_limit %d "
- "cnt_unused %d, unused_guarantee %d, cur_max_limit %d\n",
+ "unused_guarantee %d, cur_max_limit %d\n",
atomic_read(&res->cnt_cur_alloc),
atomic_read(&res->cnt_borrowed), res->cnt_guarantee,
- res->cnt_limit, res->cnt_unused,
- res->shares.unused_guarantee,
+ res->cnt_limit, res->shares.unused_guarantee,
res->shares.cur_max_limit);
#endif
if (!res)
return -EINVAL;
- printk(KERN_DEBUG "numtasks config='%s'\n", cfgstr);
+ printk("numtasks config='%s'\n", cfgstr);
return 0;
}
if (resid == -1) {
resid = ckrm_register_res_ctlr(clstype, &numtasks_rcbs);
- printk(KERN_DEBUG "........init_ckrm_numtasks_res -> %d\n", resid);
+ printk("........init_ckrm_numtasks_res -> %d\n", resid);
if (resid != -1) {
ckrm_numtasks_register(numtasks_get_ref_local,
numtasks_put_ref_local);
ckrm_task_unlock(tsk->parent);
}
if (!list_empty(&tsk->taskclass_link))
- printk(KERN_WARNING "BUG in cb_fork.. tsk (%s:%d> already linked\n",
+ printk("BUG in cb_fork.. tsk (%s:%d> already linked\n",
tsk->comm, tsk->pid);
ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_FORK);
* We use a hybrid by comparing ratio nr_threads/pidmax
*/
-static int ckrm_reclassify_all_tasks(void)
+static void ckrm_reclassify_all_tasks(void)
{
extern int pid_max;
int ratio;
int use_bitmap;
- /* Check permissions */
- if ((!capable(CAP_SYS_NICE)) && (!capable(CAP_SYS_RESOURCE))) {
- return -EPERM;
- }
-
ratio = curpidmax / nr_threads;
if (curpidmax <= PID_MAX_DEFAULT) {
use_bitmap = 1;
ce_protect(&CT_taskclass);
retry:
-
if (use_bitmap == 0) {
// go through it in one walk
read_lock(&tasklist_lock);
} else {
read_unlock(&tasklist_lock);
}
- pos++;
}
}
}
ce_release(&CT_taskclass);
- return 0;
+}
+
+int ckrm_reclassify(int pid)
+{
+ struct task_struct *tsk;
+ int rc = 0;
+
+ down(&async_serializer); // protect again race condition
+ if (pid < 0) {
+ // do we want to treat this as process group .. should YES ToDo
+ rc = -EINVAL;
+ } else if (pid == 0) {
+ // reclassify all tasks in the system
+ ckrm_reclassify_all_tasks();
+ } else {
+ // reclassify particular pid
+ read_lock(&tasklist_lock);
+ if ((tsk = find_task_by_pid(pid)) != NULL) {
+ get_task_struct(tsk);
+ read_unlock(&tasklist_lock);
+ CE_CLASSIFY_TASK_PROTECT(CKRM_EVENT_RECLASSIFY, tsk);
+ put_task_struct(tsk);
+ } else {
+ read_unlock(&tasklist_lock);
+ rc = -EINVAL;
+ }
+ }
+ up(&async_serializer);
+ return rc;
}
/*
atomic_read(&cls->core.hnode.parent->refcnt));
// If no CE registered for this classtype, following will be needed
// repeatedly;
- ce_regd = atomic_read(&class_core(cls)->classtype->ce_regd);
+ ce_regd = class_core(cls)->classtype->ce_regd;
cnode = &(class_core(cls)->hnode);
parcls = class_type(ckrm_task_class_t, cnode->parent);
}
/*
- * Change the core class of the given task
+ * Change the core class of the given task.
*/
int ckrm_forced_reclassify_pid(pid_t pid, struct ckrm_task_class *cls)
{
struct task_struct *tsk;
- if (cls && !ckrm_validate_and_grab_core(class_core(cls)))
+ if (!ckrm_validate_and_grab_core(class_core(cls)))
return -EINVAL;
read_lock(&tasklist_lock);
if ((tsk = find_task_by_pid(pid)) == NULL) {
read_unlock(&tasklist_lock);
- if (cls)
- ckrm_core_drop(class_core(cls));
+ ckrm_core_drop(class_core(cls));
return -EINVAL;
}
get_task_struct(tsk);
/* Check permissions */
if ((!capable(CAP_SYS_NICE)) &&
(!capable(CAP_SYS_RESOURCE)) && (current->user != tsk->user)) {
- if (cls)
- ckrm_core_drop(class_core(cls));
+ ckrm_core_drop(class_core(cls));
put_task_struct(tsk);
return -EPERM;
}
- ce_protect(&CT_taskclass);
- if (cls == NULL)
- CE_CLASSIFY_TASK(CKRM_EVENT_RECLASSIFY,tsk);
- else
- ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_MANUAL);
+ down(&async_serializer); // protect again race condition
+ ce_protect(&CT_taskclass);
+ ckrm_set_taskclass(tsk, cls, NULL, CKRM_EVENT_MANUAL);
ce_release(&CT_taskclass);
put_task_struct(tsk);
+ up(&async_serializer);
return 0;
}
void __init ckrm_meta_init_taskclass(void)
{
- printk(KERN_DEBUG "...... Initializing ClassType<%s> ........\n",
+ printk("...... Initializing ClassType<%s> ........\n",
CT_taskclass.name);
// intialize the default class
ckrm_init_core_class(&CT_taskclass, class_core(&taskclass_dflt_class),
pid_t pid;
int rc = -EINVAL;
- pid = (pid_t) simple_strtol(obj, NULL, 0);
-
- down(&async_serializer); // protect again race condition with reclassify_class
- if (pid < 0) {
- // do we want to treat this as process group .. TBD
- rc = -EINVAL;
- } else if (pid == 0) {
- rc = (target == NULL) ? ckrm_reclassify_all_tasks() : -EINVAL;
- } else {
- struct ckrm_task_class *cls = NULL;
- if (target)
- cls = class_type(ckrm_task_class_t,target);
- rc = ckrm_forced_reclassify_pid(pid,cls);
+ pid = (pid_t) simple_strtoul(obj, NULL, 10);
+ if (pid > 0) {
+ rc = ckrm_forced_reclassify_pid(pid,
+ class_type(ckrm_task_class_t,
+ target));
}
- up(&async_serializer);
return rc;
}
-#if 0
+#if 1
/******************************************************************************
* Debugging Task Classes: Utility functions
class_lock(core);
if (list_empty(&core->objlist)) {
class_lock(core);
- printk(KERN_DEBUG "check_tasklist_sanity: class %s empty list\n",
+ printk("check_tasklist_sanity: class %s empty list\n",
core->name);
return;
}
container_of(lh1, struct task_struct,
taskclass_link);
if (count++ > 20000) {
- printk(KERN_WARNING "list is CORRUPTED\n");
+ printk("list is CORRUPTED\n");
break;
}
if (tsk->taskclass != cls) {
const char *tclsname;
tclsname = (tsk->taskclass) ?
class_core(tsk->taskclass)->name:"NULL";
- printk(KERN_WARNING "sanity: task %s:%d has ckrm_core "
+ printk("sanity: task %s:%d has ckrm_core "
"|%s| but in list |%s|\n", tsk->comm,
tsk->pid, tclsname, core->name);
}
struct task_struct *proc, *thread;
int count = 0;
- printk(KERN_DEBUG "Analyze Error <%s> %d\n",
+ printk("Analyze Error <%s> %d\n",
class_core(tskcls)->name,
atomic_read(&(class_core(tskcls)->refcnt)));
const char *tclsname;
tclsname = (thread->taskclass) ?
class_core(thread->taskclass)->name :"NULL";
- printk(KERN_DEBUG "%d thread=<%s:%d> -> <%s> <%lx>\n", count,
+ printk("%d thread=<%s:%d> -> <%s> <%lx>\n", count,
thread->comm, thread->pid, tclsname,
thread->flags & PF_EXITING);
}
class_unlock(class_core(tskcls));
read_unlock(&tasklist_lock);
- printk(KERN_DEBUG "End Analyze Error <%s> %d\n",
+ printk("End Analyze Error <%s> %d\n",
class_core(tskcls)->name,
atomic_read(&(class_core(tskcls)->refcnt)));
}
return;
}
+
/*
* Caller is responsible for holding any lock to protect the data
* structures passed to this function
// Check total_guarantee for correctness
if (new->total_guarantee <= CKRM_SHARE_DONTCARE) {
+ printk(KERN_ERR "new->total_guarantee %d <= CKRM_SHARE_DONTCARE\n",
+ new->total_guarantee);
goto set_share_err;
} else if (new->total_guarantee == CKRM_SHARE_UNCHANGED) {
; // do nothing
} else if (cur_usage_guar > new->total_guarantee) {
+ printk(KERN_ERR "cur_usage_guar %d > new->total_guarantee %d\n",
+ cur_usage_guar,new->total_guarantee);
goto set_share_err;
}
// Check max_limit for correctness
if (new->max_limit <= CKRM_SHARE_DONTCARE) {
+ printk(KERN_ERR "new->max_limit %d <= CKRM_SHARE_DONTCARE\n",
+ new->max_limit);
goto set_share_err;
} else if (new->max_limit == CKRM_SHARE_UNCHANGED) {
; // do nothing
} else if (cur->cur_max_limit > new->max_limit) {
+ printk(KERN_ERR "cur->cur_max_limit %d > new->max_limit %d\n",
+ cur->cur_max_limit, new->max_limit);
goto set_share_err;
}
// Check my_guarantee for correctness
} else if (new->my_guarantee == CKRM_SHARE_DONTCARE) {
; // do nothing
} else if (par && increase_by > par->unused_guarantee) {
+ printk(KERN_ERR "increase_by %d > par->unused_guarantee %d\n",
+ increase_by, par->unused_guarantee);
goto set_share_err;
}
// Check my_limit for correctness
; // do nothing
} else if (par && new->my_limit > par->max_limit) {
// I can't get more limit than my parent's limit
+ printk(KERN_ERR "new->my_limit %d > par->max_limit %d\n",
+ new->my_limit,par->max_limit);
goto set_share_err;
}
; // do nothing earlier setting would've
// taken care of it
} else if (new->my_guarantee > cur->my_limit) {
+ printk(KERN_ERR "new->my_guarantee %d > cur->my_limit %d\n",
+ new->my_guarantee,par->max_limit);
goto set_share_err;
}
} else { // new->my_limit has a valid value
; // do nothing
} else if (new->my_guarantee == CKRM_SHARE_UNCHANGED) {
if (cur->my_guarantee > new->my_limit) {
+ printk(KERN_ERR "cur->my_guarantee %d > new->my_limit %d\n",
+ cur->my_guarantee,new->my_limit);
goto set_share_err;
}
} else if (new->my_guarantee > new->my_limit) {
+ printk(KERN_ERR "new->my_guarantee %d > new->my_limit %d\n",
+ new->my_guarantee,new->my_limit);
goto set_share_err;
}
}
return;
}
if (vec == NULL) {
- printk(KERN_DEBUG "v<0>-NULL\n");
+ printk("v<0>-NULL\n");
return;
}
- printk(KERN_DEBUG "v<%d>-", sz = vec->size);
+ printk("v<%d>-", sz = vec->size);
for (i = 0; i < sz; i++) {
- printk(KERN_DEBUG "%c", test_bit(i, vec->bits) ? '1' : '0');
+ printk("%c", test_bit(i, vec->bits) ? '1' : '0');
}
return;
}
static char *info =
"1. Magic files\n"
"\t|--rbce_info - read only file detailing how to setup and use RBCE.\n\n"
+ "\t|--rbce_reclassify - contains nothing. Writing a pid to it"
+ "reclassifies\n"
+ "\tthe given task according to the current set of rules.\n"
+ "\tWriting 0 to it reclassifies all tasks in the system according to the \n"
+ "\tsurrent set of rules. This is typically done by the user/sysadmin \n"
+ "\tafter changing/creating rules. \n\n"
"\t|--rbce_state - determines whether RBCE is currently active"
" or inactive.\n"
"\tWriting 1 (0) activates (deactivates) the CE. Reading the file\n"
-/* RCFS API for Rule-based Classification Engine (RBCE) and
- * Consolidated RBCE module code (combined)
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2003
- * (C) Vivek Kashyap, IBM Corp. 2004
- *
- * Module for loading of classification policies and providing
- * a user API for Class-based Kernel Resource Management (CKRM)
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
+/*
+ * This file is released under the GPL.
*/
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/errno.h>
if (*ptr == '\n') {
*ptr = '\0';
}
+#if 0
+ if (!strcmp(file->f_dentry->d_name.name, "rbce_reclassify")) {
+ pid = simple_strtol(line, NULL, 0);
+ rc = reclassify_pid(pid);
+ } else
+#endif
if (!strcmp(file->f_dentry->d_name.name, "rbce_tag")) {
pid = simple_strtol(line, &ptr, 0);
rc = set_tasktag(pid, ptr + 1); // expected syntax "pid tag"
char result[256];
memset(result, 0, 256);
- if (!strcmp(file->f_dentry->d_name.name, "rbce_tag")) {
+ if (!strcmp(file->f_dentry->d_name.name, "rbce_reclassify") ||
+ !strcmp(file->f_dentry->d_name.name, "rbce_tag")) {
return -EPERM;
}
if (!strcmp(file->f_dentry->d_name.name, "rbce_state")) {
{
const char *name = file->f_dentry->d_name.name;
- if (strcmp(name, "rbce_state") &&
+ if (strcmp(name, "rbce_reclassify") &&
+ strcmp(name, "rbce_state") &&
strcmp(name, "rbce_tag") && strcmp(name, "rbce_info")) {
if (!rule_exists(name)) {
struct dentry *pd =
list_entry(dir->i_dentry.next, struct dentry, d_alias);
- // Under /ce only "rbce_state", "rbce_tag" and "rbce_info" are allowed
+ // Under /ce only "rbce_reclassify", "rbce_state", "rbce_tag" and
+ // "rbce_info" are allowed
if (!strcmp(pd->d_name.name, "ce")) {
- if (strcmp(dentry->d_name.name, "rbce_state") &&
+ if (strcmp(dentry->d_name.name, "rbce_reclassify") &&
+ strcmp(dentry->d_name.name, "rbce_state") &&
strcmp(dentry->d_name.name, "rbce_tag") &&
strcmp(dentry->d_name.name, "rbce_info")) {
return -EINVAL;
/******************************* Magic files ********************/
-#define RBCE_NR_MAGF 5
+#define RBCE_NR_MAGF 6
struct rcfs_magf rbce_magf_files[RBCE_NR_MAGF] = {
{
.name = "ce",
.mode = RCFS_DEFAULT_FILE_MODE,
.i_fop = &rbce_file_operations,
},
+ {
+ .name = "rbce_reclassify",
+ .mode = RCFS_DEFAULT_FILE_MODE,
+ .i_fop = &rbce_file_operations,
+ },
{
.name = "rules",
.mode = (RCFS_DEFAULT_DIR_MODE | S_IWUSR),
static void rbce_put_super(struct super_block *sb)
{
module_put(THIS_MODULE);
- printk(KERN_DEBUG "rbce_put_super called\n");
+ printk("rbce_put_super called\n");
}
static struct super_operations rbce_ops = {
-/* Rule-based Classification Engine (RBCE) and
- * Consolidated RBCE module code (combined)
+/* Rule-based Classification Engine (RBCE) module
*
* Copyright (C) Hubertus Franke, IBM Corp. 2003
* (C) Chandra Seetharaman, IBM Corp. 2003
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
/* Changes
#include <linux/ckrm_ce.h>
#include <linux/ckrm_net.h>
#include "bitvector.h"
-#include <linux/rbce.h>
+#include "rbce.h"
#define DEBUG
#define POLICY_ACTION_REDO_ALL 0x02 // Recompute all rule flags
#define POLICY_ACTION_PACK_TERMS 0x04 // Time to pack the terms
-const int use_persistent_state = 1;
-
struct ckrm_eng_callback ckrm_ecbs;
// Term vector state
#define DBG_RULE ( 0x20 )
#define DBG_POLICY ( 0x40 )
-#define DPRINTK(x, y...) if (rbcedebug & (x)) printk(KERN_DEBUG y)
+#define DPRINTK(x, y...) if (rbcedebug & (x)) printk(y)
// debugging selectively enabled through /proc/sys/debug/rbce
static void print_context_vectors(void)
return;
}
for (i = 0; i < NUM_TERM_MASK_VECTOR; i++) {
- printk(KERN_DEBUG "%d: ", i);
+ printk("%d: ", i);
bitvector_print(DBG_OPTIMIZATION, gl_mask_vecs[i]);
- printk(KERN_DEBUG "\n");
+ printk("\n");
}
}
#else
}
notify_class_action(cls, 0);
cls->classobj = NULL;
- list_for_each_entry(pos, &rules_list[classtype], link) {
+ list_for_each_entry(pos, &rules_list[cls->classtype], link) {
rule = (struct rbce_rule *)pos;
if (rule->target_class) {
if (!strcmp
}
}
}
+ put_class(cls);
if ((cls = find_class_name(classname)) != NULL) {
printk(KERN_ERR
"rbce ERROR: class %s exists in rbce after "
static struct rbce_private_data *create_private_data(struct rbce_private_data *,
int);
-static inline
-void reset_evaluation(struct rbce_private_data *pdata,int termflag)
+int rbce_ckrm_reclassify(int pid)
{
- /* reset TAG ruleterm evaluation results to pick up
- * on next classification event
- */
- if (use_persistent_state && gl_mask_vecs[termflag]) {
- bitvector_and_not( pdata->eval, pdata->eval,
- gl_mask_vecs[termflag] );
- bitvector_and_not( pdata->true, pdata->true,
- gl_mask_vecs[termflag] );
- }
+ printk("ckrm_reclassify_pid ignored\n");
+ return -EINVAL;
+}
+
+int reclassify_pid(int pid)
+{
+ struct task_struct *tsk;
+
+ // FIXME: Need to treat -pid as process group
+ if (pid < 0) {
+ return -EINVAL;
+ }
+
+ if (pid == 0) {
+ rbce_ckrm_reclassify(0); // just reclassify all tasks.
+ }
+ // if pid is +ve take control of the task, start evaluating it
+ if ((tsk = find_task_by_pid(pid)) == NULL) {
+ return -EINVAL;
+ }
+
+ if (unlikely(!RBCE_DATA(tsk))) {
+ RBCE_DATAP(tsk) = create_private_data(NULL, 0);
+ if (!RBCE_DATA(tsk)) {
+ return -ENOMEM;
+ }
+ }
+ RBCE_DATA(tsk)->evaluate = 1;
+ rbce_ckrm_reclassify(pid);
+ return 0;
}
-
+
int set_tasktag(int pid, char *tag)
{
char *tp;
- int rc = 0;
struct task_struct *tsk;
struct rbce_private_data *pdata;
- int len;
if (!tag) {
return -EINVAL;
}
- len = strlen(tag) + 1;
- tp = kmalloc(len, GFP_ATOMIC);
- if (!tp) {
- return -ENOMEM;
- }
- strncpy(tp,tag,len);
- read_lock(&tasklist_lock);
if ((tsk = find_task_by_pid(pid)) == NULL) {
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
+ }
+
+ tp = kmalloc(strlen(tag) + 1, GFP_ATOMIC);
+
+ if (!tp) {
+ return -ENOMEM;
}
if (unlikely(!RBCE_DATA(tsk))) {
RBCE_DATAP(tsk) = create_private_data(NULL, 0);
if (!RBCE_DATA(tsk)) {
- rc = -ENOMEM;
- goto out;
+ kfree(tp);
+ return -ENOMEM;
}
}
pdata = RBCE_DATA(tsk);
kfree(pdata->app_tag);
}
pdata->app_tag = tp;
- reset_evaluation(pdata,RBCE_TERMFLAG_TAG);
-
- out:
- read_unlock(&tasklist_lock);
- if (rc != 0)
- kfree(tp);
- return rc;
+ strcpy(pdata->app_tag, tag);
+ rbce_ckrm_reclassify(pid);
+
+ return 0;
}
/*====================== Classification Functions =======================*/
}
}
spin_unlock(&pdata_lock);
- printk(KERN_WARNING "INVALID/CORRUPT PDATA %p\n", pdata);
+ printk("INVALID/CORRUPT PDATA %p\n", pdata);
return 0;
}
while (i < MAX_PDATA) {
if (pdata_arr[pdata_next] == NULL) {
- printk(KERN_DEBUG "storing %p at %d, count %d\n", pdata,
+ printk("storing %p at %d, count %d\n", pdata,
pdata_next, pdata_count);
pdata_arr[pdata_next++] = pdata;
if (pdata_next == MAX_PDATA) {
spin_unlock(&pdata_lock);
}
if (i == MAX_PDATA) {
- printk(KERN_DEBUG "PDATA BUFFER FULL pdata_count %d pdata %p\n",
+ printk("PDATA BUFFER FULL pdata_count %d pdata %p\n",
pdata_count, pdata);
}
}
spin_lock(&pdata_lock);
for (i = 0; i < MAX_PDATA; i++) {
if (pdata_arr[i] == pdata) {
- printk(KERN_DEBUG "unstoring %p at %d, count %d\n", pdata,
+ printk("unstoring %p at %d, count %d\n", pdata,
i, pdata_count);
pdata_arr[i] = NULL;
pdata_count--;
}
spin_unlock(&pdata_lock);
if (i == MAX_PDATA) {
- printk(KERN_DEBUG "pdata %p not found in the stored array\n",
+ printk("pdata %p not found in the stored array\n",
pdata);
}
}
#endif // PDATA_DEBUG
+const int use_persistent_state = 1;
+
/*
* Allocate and initialize a rbce_private_data data structure.
*
// pdata->evaluate = src->evaluate;
// if(src->app_tag) {
// int len = strlen(src->app_tag)+1;
- // printk(KERN_DEBUG "CREATE_PRIVATE: apptag %s len %d\n",
+ // printk("CREATE_PRIVATE: apptag %s len %d\n",
// src->app_tag,len);
// pdata->app_tag = kmalloc(len, GFP_ATOMIC);
// if (pdata->app_tag) {
va_list args;
void *cls = NULL;
struct task_struct *tsk;
- struct rbce_private_data *pdata;
va_start(args, event);
tsk = va_arg(args, struct task_struct *);
* [ CKRM_LATCHABLE_EVENTS .. CKRM_NONLATCHABLE_EVENTS )
*/
- // printk(KERN_DEBUG "tc_classify %p:%d:%s '%s'\n",tsk,tsk->pid,
+ // printk("tc_classify %p:%d:%s '%s'\n",tsk,tsk->pid,
// tsk->comm,event_names[event]);
switch (event) {
break;
case CKRM_EVENT_RECLASSIFY:
- if ((pdata = (RBCE_DATA(tsk)))) {
- pdata->evaluate = 1;
- }
cls = rbce_classify(tsk, NULL, RBCE_TERMFLAG_ALL, tc_classtype);
break;
}
- // printk(KERN_DEBUG "tc_classify %p:%d:%s '%s' ==> %p\n",tsk,tsk->pid,
+ // printk("tc_classify %p:%d:%s '%s' ==> %p\n",tsk,tsk->pid,
// tsk->comm,event_names[event],cls);
return cls;
#ifndef RBCE_EXTENSION
static void rbce_tc_notify(int event, void *core, struct task_struct *tsk)
{
- printk(KERN_DEBUG "tc_manual %p:%d:%s '%s'\n", tsk, tsk->pid, tsk->comm,
+ printk("tc_manual %p:%d:%s '%s'\n", tsk, tsk->pid, tsk->comm,
event_names[event]);
if (event != CKRM_EVENT_MANUAL)
return;
{NULL}
};
-static void unregister_classtype_engines(void)
- {
+static int register_classtype_engines(void)
+{
int rc;
struct ce_regtable_struct *ceptr = ce_regtable;
while (ceptr->name) {
- if (*ceptr->clsvar >= 0) {
- printk(KERN_DEBUG "ce unregister with <%s>\n",ceptr->name);
- while ((rc = ckrm_unregister_engine(ceptr->name)) == -EAGAIN)
- ;
- printk(KERN_DEBUG "ce unregister with <%s> rc=%d\n",ceptr->name,rc);
- *ceptr->clsvar = -1;
- }
+ rc = ckrm_register_engine(ceptr->name, ceptr->cbs);
+ printk("ce register with <%s> typeId=%d\n", ceptr->name, rc);
+ if ((rc < 0) && (rc != -ENOENT))
+ return (rc);
+ if (rc != -ENOENT)
+ *ceptr->clsvar = rc;
ceptr++;
}
- }
+ return 0;
+}
-static int register_classtype_engines(void)
+static void unregister_classtype_engines(void)
{
int rc;
struct ce_regtable_struct *ceptr = ce_regtable;
while (ceptr->name) {
- rc = ckrm_register_engine(ceptr->name, ceptr->cbs);
- printk(KERN_DEBUG "ce register with <%s> typeId=%d\n",ceptr->name,rc);
- if ((rc < 0) && (rc != -ENOENT)) {
- unregister_classtype_engines();
- return (rc);
+ if (*ceptr->clsvar >= 0) {
+ printk("ce unregister with <%s>\n", ceptr->name);
+ rc = ckrm_unregister_engine(ceptr->name);
+ printk("ce unregister with <%s> rc=%d\n", ceptr->name,
+ rc);
+ *ceptr->clsvar = -1;
}
- if (rc != -ENOENT)
- *ceptr->clsvar = rc;
ceptr++;
}
- return 0;
}
// =========== /proc/sysctl/debug/rbce debug stuff =============
{
int rc, i, line;
- printk(KERN_DEBUG "<1>\nInstalling \'%s\' module\n", modname);
+ printk("<1>\nInstalling \'%s\' module\n", modname);
for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
INIT_LIST_HEAD(&rules_list[i]);
exit_rbce_ext();
out:
- printk(KERN_DEBUG "<1>%s: error installing rc=%d line=%d\n", __FUNCTION__, rc,
+ printk("<1>%s: error installing rc=%d line=%d\n", __FUNCTION__, rc,
line);
return rc;
}
{
int i;
- printk(KERN_DEBUG "<1>Removing \'%s\' module\n", modname);
+ printk("<1>Removing \'%s\' module\n", modname);
stop_debug();
exit_rbce_ext();
// Print warnings if lists are not empty, which is a bug
if (!list_empty(&class_list)) {
- printk(KERN_DEBUG "exit_rbce: Class list is not empty\n");
+ printk("exit_rbce: Class list is not empty\n");
}
for (i = 0; i < CKRM_MAX_CLASSTYPES; i++) {
if (!list_empty(&rules_list[i])) {
- printk(KERN_DEBUG "exit_rbce: Rules list for classtype %d"
+ printk("exit_rbce: Rules list for classtype %d"
" is not empty\n", i);
}
}
EXPORT_SYMBOL(change_rule);
EXPORT_SYMBOL(delete_rule);
EXPORT_SYMBOL(rename_rule);
+EXPORT_SYMBOL(reclassify_pid);
EXPORT_SYMBOL(set_tasktag);
module_init(init_rbce);
* Copyright (C) Hubertus Franke, IBM Corp. 2003
*
* Extension to be included into RBCE to collect delay and sample information
- * Requires user daemon e.g. crbcedmn to activate.
+ * requires user daemon <crbcedmn> to activate.
*
* Latest version, more details at http://ckrm.sf.net
*
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
-
/*******************************************************************************
*
* User-Kernel Communication Channel (UKCC)
{
static int readers = 0;
if (fileop == RELAY_FILE_OPEN) {
- // printk(KERN_DEBUG "got fileop_notify RELAY_FILE_OPEN for file %p\n",
+ // printk("got fileop_notify RELAY_FILE_OPEN for file %p\n",
// filp);
if (readers) {
- printk(KERN_DEBUG "only one client allowed, backoff .... \n");
+ printk("only one client allowed, backoff .... \n");
return -EPERM;
}
if (!try_module_get(THIS_MODULE))
client_attached();
} else if (fileop == RELAY_FILE_CLOSE) {
- // printk(KERN_DEBUG "got fileop_notify RELAY_FILE_CLOSE for file %p\n",
+ // printk("got fileop_notify RELAY_FILE_CLOSE for file %p\n",
// filp);
client_detached();
readers--;
channel_flags,
&ukcc_callbacks, 0, 0, 0, 0, 0, 0, NULL, 0);
if (ukcc_channel < 0)
- printk(KERN_DEBUG "crbce: ukcc creation failed, errcode: %d\n",
+ printk("crbce: ukcc creation failed, errcode: %d\n",
ukcc_channel);
else
- printk(KERN_DEBUG "crbce: ukcc created (%u KB)\n",
+ printk("crbce: ukcc created (%u KB)\n",
UKCC_TOTAL_BUFFER_SIZE >> 10);
return ukcc_channel;
}
(r),(l),-1,NULL) > 0); \
chan_state = chan_isok ? UKCC_OK : UKCC_STANDBY; \
if (chan_wasok && !chan_isok) { \
- printk(KERN_DEBUG "Channel stalled\n"); \
+ printk("Channel stalled\n"); \
} else if (!chan_wasok && chan_isok) { \
- printk(KERN_DEBUG "Channel continues\n"); \
+ printk("Channel continues\n"); \
} \
} while (0)
return 0;
pdata = RBCE_DATA(tsk);
if (pdata == NULL) {
- // printk(KERN_DEBUG "send [%d]<%s>: no pdata\n",tsk->pid,tsk->comm);
+ // printk("send [%d]<%s>: no pdata\n",tsk->pid,tsk->comm);
return 0;
}
if (send_forced || (delta_mode == 0)
rec_set_timehdr(&limrec, CRBCE_REC_DATA_DELIMITER, 0, 0);
rec_send(&limrec);
- // printk(KERN_DEBUG "send_task_data mode=%d t#=%d s#=%d\n",
+ // printk("send_task_data mode=%d t#=%d s#=%d\n",
// delta_mode,taskcnt,sendcnt);
}
}
while_each_thread(proc, thread);
read_unlock(&tasklist_lock);
-// printk(KERN_DEBUG "sample_timer: run=%d wait=%d\n",run,wait);
+// printk("sample_timer: run=%d wait=%d\n",run,wait);
start_sample_timer();
}
struct crbce_cmd_done cmdret;
int rc = 0;
-// printk(KERN_DEBUG "ukcc_cmd_deliver: %d %d len=%d:%d\n",cmdrec->type,
+// printk("ukcc_cmd_deliver: %d %d len=%d:%d\n",cmdrec->type,
// cmdrec->cmd,cmdrec->len,len);
cmdrec->len = len; // add this to reflection so the user doesn't
cmdret.hdr.cmd = cmdrec->cmd;
cmdret.rc = rc;
rec_send(&cmdret);
-// printk(KERN_DEBUG "ukcc_cmd_deliver ACK: %d %d rc=%d %d\n",cmdret.hdr.type,
+// printk("ukcc_cmd_deliver ACK: %d %d rc=%d %d\n",cmdret.hdr.type,
// cmdret.hdr.cmd,rc,sizeof(cmdret));
}
static void client_attached(void)
{
- printk(KERN_DEBUG "client [%d]<%s> attached to UKCC\n", current->pid,
+ printk("client [%d]<%s> attached to UKCC\n", current->pid,
current->comm);
relay_reset(ukcc_channel);
}
static void client_detached(void)
{
- printk(KERN_DEBUG "client [%d]<%s> detached to UKCC\n", current->pid,
+ printk("client [%d]<%s> detached to UKCC\n", current->pid,
current->comm);
chan_state = UKCC_STANDBY;
stop_sample_timer();
-/* Tokens for Rule-based Classification Engine (RBCE) and
- * Consolidated RBCE module code (combined)
- *
- * Copyright (C) Hubertus Franke, IBM Corp. 2003
- * (C) Chandra Seetharaman, IBM Corp. 2003
- * (C) Vivek Kashyap, IBM Corp. 2004
- *
- * Latest version, more details at http://ckrm.sf.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- *
- */
-
#include <linux/parser.h>
#include <linux/ctype.h>
nterms = 0;
while (*rp++) {
- if (*rp == '>' || *rp == '<' || *rp == '=' || *rp == '!') {
+ if (*rp == '>' || *rp == '<' || *rp == '=') {
nterms++;
}
}
*term_mask = 0;
} /* else {
for (i = 0; i < nterms; i++) {
- printk(KERN_DEBUG "token: i %d; op %d, operator %d, str %ld\n",
+ printk("token: i %d; op %d, operator %d, str %ld\n",
i, terms[i].op, terms[i].operator, terms[i].u.id);
}
} */
//add to new positon, round robin for classes with same priority
list_add_tail(&(node->list), &cq->array.queue[index]);
- __set_bit(index, cq->array.bitmap);
+ __set_bit(index, cq->array.bitmap);
+
node->index = index;
}
-/**
- *classqueue_get_min_prio: return the priority of the last node in queue
- *
- * this function can be called without runqueue lock held
- */
-static inline int classqueue_get_min_prio(struct classqueue_struct *cq)
-{
- cq_node_t *result = NULL;
- int pos;
-
- /*
- * search over the bitmap to get the first class in the queue
- */
- pos = find_next_bit(cq->array.bitmap, CLASSQUEUE_SIZE, cq->base_offset);
- //do circular search from the beginning
- if (pos >= CLASSQUEUE_SIZE)
- pos = find_first_bit(cq->array.bitmap, CLASSQUEUE_SIZE);
-
- if (pos < CLASSQUEUE_SIZE) {
- result = list_entry(cq->array.queue[pos].next, cq_node_t, list);
- if (list_empty(&cq->array.queue[pos]))
- result = NULL;
- }
- if (result)
- return result->prio;
- else
- return 0;
-}
-
-/**
- * this function must be called with runqueue lock held
- */
cq_node_t *classqueue_get_head(struct classqueue_struct *cq)
{
cq_node_t *result = NULL;
* search over the bitmap to get the first class in the queue
*/
pos = find_next_bit(cq->array.bitmap, CLASSQUEUE_SIZE, cq->base_offset);
- //do circular search from the beginning
- if (pos >= CLASSQUEUE_SIZE)
+ if (pos >= CLASSQUEUE_SIZE) { //do circular search from the beginning
pos = find_first_bit(cq->array.bitmap, CLASSQUEUE_SIZE);
+ }
if (pos < CLASSQUEUE_SIZE) {
BUG_ON(list_empty(&cq->array.queue[pos]));
* Moving the end of queue forward
* the new_base here is logical, we need to translate to the abosule position
*/
-void classqueue_update_base(struct classqueue_struct *cq)
+void classqueue_update_base(struct classqueue_struct *cq, int new_base)
{
- int new_base;
-
- if (! cq_nr_member(cq)) {
+ if (!cq_nr_member(cq)) {
cq->base_offset = -1; //not defined
return;
}
- new_base = classqueue_get_min_prio(cq);
-
+ // assert(new_base >= cq->base);
+
if (new_base > cq->base) {
cq->base_offset = get_index(cq, &new_base);
cq->base = new_base;
#include <linux/init.h>
#include <linux/ckrm_sched.h>
-rwlock_t class_list_lock = RW_LOCK_UNLOCKED;
-LIST_HEAD(active_cpu_classes); // list of active cpu classes; anchor
-
-struct ckrm_cpu_class default_cpu_class_obj;
-
-struct ckrm_cpu_class * get_default_cpu_class(void) {
- return (&default_cpu_class_obj);
-}
-
/*******************************************************/
/* CVT Management */
/*******************************************************/
+#define CVT_WINDOW_SIZE (CLASSQUEUE_SIZE << CLASS_BONUS_RATE)
+static CVT_t max_CVT = CVT_WINDOW_SIZE;
-static inline void check_inactive_class(ckrm_lrq_t * lrq,CVT_t cur_cvt)
+/*
+ * Also ensure that the classes global cvt is upgraded to the
+ * minimum CVT in the system, as a class might not have run for a while
+ */
+static void update_global_cvt(struct ckrm_cpu_class *cpu_class, int cpu)
{
+ struct ckrm_local_runqueue *class_queue =
+ get_ckrm_local_runqueue(cpu_class, cpu);
CVT_t min_cvt;
- CVT_t bonus;
-
- //just a safty measure
- if (unlikely(! cur_cvt))
- return;
+ CVT_t local_cvt_old = class_queue->local_cvt;
-#ifndef INTERACTIVE_BONUS_SUPPORT
-#warning "ACB taking out interactive bonus calculation"
- bonus = 0;
-#else
- /*
- * Always leaving a small bonus for inactive classes
- * allows them to compete for cycles immediately when the become
- * active. This should improve interactive behavior
- */
- bonus = INTERACTIVE_BONUS(lrq);
+ spin_lock(&cvt_lock);
+ if (class_queue->uncounted_cvt) {
+ cpu_class->global_cvt += class_queue->uncounted_cvt;
+ class_queue->uncounted_cvt = 0;
+ }
+ min_cvt = max_CVT - CVT_WINDOW_SIZE;
+ if (cpu_class->global_cvt < min_cvt)
+ cpu_class->global_cvt = min_cvt;
+ else if (cpu_class->global_cvt > max_CVT)
+ max_CVT = cpu_class->global_cvt;
+
+/* update local cvt from global cvt*/
+#if 0
+ class_queue->local_cvt = cpu_class->global_cvt;
#endif
+ spin_unlock(&cvt_lock);
- //cvt can't be negative
- if (cur_cvt > bonus)
- min_cvt = cur_cvt - bonus;
- else
- min_cvt = 0;
-
- if (lrq->local_cvt < min_cvt) {
- CVT_t lost_cvt;
-
- lost_cvt = scale_cvt(min_cvt - lrq->local_cvt,lrq);
- lrq->local_cvt = min_cvt;
-
- /* add what the class lost to its savings*/
- lrq->savings += lost_cvt;
- if (lrq->savings > MAX_SAVINGS)
- lrq->savings = MAX_SAVINGS;
- } else if (lrq->savings) {
- /*
- *if a class saving and falling behind
- * then start to use it saving in a leaking bucket way
- */
- CVT_t savings_used;
-
- savings_used = scale_cvt((lrq->local_cvt - min_cvt),lrq);
- if (savings_used > lrq->savings)
- savings_used = lrq->savings;
-
- if (savings_used > SAVINGS_LEAK_SPEED)
- savings_used = SAVINGS_LEAK_SPEED;
-
- BUG_ON(lrq->savings < savings_used);
- lrq->savings -= savings_used;
- unscale_cvt(savings_used,lrq);
- BUG_ON(lrq->local_cvt < savings_used);
-#ifndef CVT_SAVINGS_SUPPORT
-#warning "ACB taking out cvt saving"
-#else
- lrq->local_cvt -= savings_used;
-#endif
- }
+ if (class_queue->local_cvt != local_cvt_old)
+ update_class_priority(class_queue);
}
/*
- * return the max_cvt of all the classes
- */
-static inline CVT_t get_max_cvt(int this_cpu)
-{
- struct ckrm_cpu_class *clsptr;
- ckrm_lrq_t * lrq;
- CVT_t max_cvt;
-
- max_cvt = 0;
-
- /*update class time, at the same time get max_cvt */
- list_for_each_entry(clsptr, &active_cpu_classes, links) {
- lrq = get_ckrm_lrq(clsptr, this_cpu);
- if (lrq->local_cvt > max_cvt)
- max_cvt = lrq->local_cvt;
- }
-
- return max_cvt;
-}
-
-/**
- * update_class_cputime - updates cvt of inactive classes
- * -- an inactive class shouldn't starve others when it comes back
- * -- the cpu time it lost when it's inactive should be accumulated
- * -- its accumulated saving should be compensated (in a leaky bucket fashion)
- *
* class_list_lock must have been acquired
*/
-void update_class_cputime(int this_cpu)
+void update_global_cvts(int this_cpu)
{
struct ckrm_cpu_class *clsptr;
- ckrm_lrq_t * lrq;
- CVT_t cur_cvt;
-
- /*
- * a class's local_cvt must not be significantly smaller than min_cvt
- * of active classes otherwise, it will starve other classes when it
- * is reactivated.
- *
- * Hence we keep all local_cvt's within a range of the min_cvt off
- * all active classes (approximated by the local_cvt of the currently
- * running class) and account for how many cycles where thus taken
- * from an inactive class building a savings (not to exceed a few seconds)
- * for a class to gradually make up upon reactivation, without
- * starvation of other classes.
- *
- */
- cur_cvt = get_local_cur_cvt(this_cpu);
+ struct ckrm_local_runqueue *class_queue;
- /*
- * cur_cvt == 0 means the system is now idle
- * in this case, we use max_cvt as cur_cvt
- * max_cvt roughly represents the cvt of the class
- * that has just finished running
- *
- * fairness wouldn't be a problem since we account for whatever lost in savings
- * if the system is not busy, the system responsiveness is not a problem.
- * still fine if the sytem is busy, but happened to be idle at this certain point
- * since bias toward interactive classes (class priority) is a more important way to improve system responsiveness
- */
- if (unlikely(! cur_cvt)) {
- cur_cvt = get_max_cvt(this_cpu);
- //return;
- }
-
- /*
- * - check the local cvt of all the classes
- * - update total_ns received by the class
- * - do a usage sampling for the whole class
- */
+ /*for each class*/
list_for_each_entry(clsptr, &active_cpu_classes, links) {
- lrq = get_ckrm_lrq(clsptr, this_cpu);
-
- spin_lock(&clsptr->stat.stat_lock);
- clsptr->stat.total_ns += lrq->uncounted_ns;
- ckrm_sample_usage(clsptr);
- spin_unlock(&clsptr->stat.stat_lock);
- lrq->uncounted_ns = 0;
-
- check_inactive_class(lrq,cur_cvt);
+ update_global_cvt(clsptr, this_cpu);
+ class_queue = get_ckrm_local_runqueue(clsptr, this_cpu);
+ clsptr->stat.total_ns += class_queue->uncounted_ns;
+ class_queue->uncounted_ns = 0;
}
}
-
-/*******************************************************/
-/* PID load balancing stuff */
-/*******************************************************/
-#define PID_SAMPLE_T 32
-#define PID_KP 20
-#define PID_KI 60
-#define PID_KD 20
-
-/**
- * sample pid load periodically
- */
-void ckrm_load_sample(ckrm_load_t* pid,int cpu)
-{
- long load;
- long err;
-
- if (jiffies % PID_SAMPLE_T)
- return;
-
- adjust_local_weight();
-
- load = ckrm_cpu_load(cpu);
- err = load - pid->load_p;
- pid->load_d = err;
- pid->load_p = load;
- pid->load_i *= 9;
- pid->load_i += load;
- pid->load_i /= 10;
-}
-
-long pid_get_pressure(ckrm_load_t* ckrm_load, int local_group)
-{
- long pressure;
- pressure = ckrm_load->load_p * PID_KP;
- pressure += ckrm_load->load_i * PID_KI;
- pressure += ckrm_load->load_d * PID_KD;
- pressure /= 100;
- return pressure;
-}
err = sys_clock_getres(which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
- if (!err && tp && put_compat_timespec(&ts, tp))
+ if (!err && put_compat_timespec(&ts, tp))
return -EFAULT;
return err;
}
#include <linux/ckrm.h>
#include <linux/ckrm_tsk.h>
#include <linux/vs_limit.h>
-#include <linux/ckrm_mem.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
/* tasklist_lock is held, is this sufficient? */
if (p->vx_info) {
atomic_dec(&p->vx_info->cacct.nr_threads);
- atomic_dec(&p->vx_info->limit.rcur[RLIMIT_NPROC]);
+ vx_nproc_dec(p->vx_info);
+ // atomic_dec(&p->vx_info->limit.res[RLIMIT_NPROC]);
}
detach_pid(p, PIDTYPE_PID);
detach_pid(p, PIDTYPE_TGID);
filp_close(file, files);
cond_resched();
}
- // vx_openfd_dec(fd);
+ vx_openfd_dec(fd);
}
i++;
set >>= 1;
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
-#ifdef CONFIG_CKRM_RES_MEM
- spin_lock(&mm->peertask_lock);
- list_del_init(&tsk->mm_peers);
- ckrm_mem_evaluate_mm(mm);
- spin_unlock(&mm->peertask_lock);
-#endif
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mmput(mm);
}
acct_process(code);
- if (current->tux_info) {
-#ifdef CONFIG_TUX_DEBUG
- printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n",
- code, __builtin_return_address(0));
-#endif
- current->tux_exit();
- }
__exit_mm(tsk);
exit_sem(tsk);
module_put(tsk->binfmt->module);
tsk->exit_code = code;
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
+ numtasks_put_ref(tsk->taskclass);
+#endif
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
if (p->real_parent != p->parent) {
__ptrace_unlink(p);
p->state = TASK_ZOMBIE;
- /*
- * If this is not a detached task, notify the parent. If it's
- * still not detached after that, don't release it now.
- */
- if (p->exit_signal != -1) {
+ /* If this is a detached thread, this is where it goes away. */
+ if (p->exit_signal == -1) {
+ /* release_task takes the lock itself. */
+ write_unlock_irq(&tasklist_lock);
+ release_task (p);
+ }
+ else {
do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1)
- p = NULL;
+ write_unlock_irq(&tasklist_lock);
}
+ p = NULL;
}
- write_unlock_irq(&tasklist_lock);
+ else
+ write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
--- /dev/null
+/*
+ * linux/kernel/exit.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/namespace.h>
+#include <linux/security.h>
+#include <linux/acct.h>
+#include <linux/file.h>
+#include <linux/binfmts.h>
+#include <linux/ptrace.h>
+#include <linux/profile.h>
+#include <linux/mount.h>
+#include <linux/proc_fs.h>
+#include <linux/mempolicy.h>
+#include <linux/ckrm.h>
+#include <linux/ckrm_tsk.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+
+extern void sem_exit (void);
+extern struct task_struct *child_reaper;
+
+int getrusage(struct task_struct *, int, struct rusage __user *);
+
+static void __unhash_process(struct task_struct *p)
+{
+ nr_threads--;
+ detach_pid(p, PIDTYPE_PID);
+ detach_pid(p, PIDTYPE_TGID);
+ if (thread_group_leader(p)) {
+ detach_pid(p, PIDTYPE_PGID);
+ detach_pid(p, PIDTYPE_SID);
+ if (p->pid)
+ __get_cpu_var(process_counts)--;
+ }
+
+ REMOVE_LINKS(p);
+}
+
+void release_task(struct task_struct * p)
+{
+ int zap_leader;
+ task_t *leader;
+ struct dentry *proc_dentry;
+
+repeat:
+ BUG_ON(p->state < TASK_ZOMBIE);
+
+ atomic_dec(&p->user->processes);
+ spin_lock(&p->proc_lock);
+ proc_dentry = proc_pid_unhash(p);
+ write_lock_irq(&tasklist_lock);
+ if (unlikely(p->ptrace))
+ __ptrace_unlink(p);
+ BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
+ __exit_signal(p);
+ __exit_sighand(p);
+ __unhash_process(p);
+
+ /*
+ * If we are the last non-leader member of the thread
+ * group, and the leader is zombie, then notify the
+ * group leader's parent process. (if it wants notification.)
+ */
+ zap_leader = 0;
+ leader = p->group_leader;
+ if (leader != p && thread_group_empty(leader) && leader->state == TASK_ZOMBIE) {
+ BUG_ON(leader->exit_signal == -1);
+ do_notify_parent(leader, leader->exit_signal);
+ /*
+ * If we were the last child thread and the leader has
+ * exited already, and the leader's parent ignores SIGCHLD,
+ * then we are the one who should release the leader.
+ *
+ * do_notify_parent() will have marked it self-reaping in
+ * that case.
+ */
+ zap_leader = (leader->exit_signal == -1);
+ }
+
+ p->parent->cutime += p->utime + p->cutime;
+ p->parent->cstime += p->stime + p->cstime;
+ p->parent->cmin_flt += p->min_flt + p->cmin_flt;
+ p->parent->cmaj_flt += p->maj_flt + p->cmaj_flt;
+ p->parent->cnvcsw += p->nvcsw + p->cnvcsw;
+ p->parent->cnivcsw += p->nivcsw + p->cnivcsw;
+ sched_exit(p);
+ write_unlock_irq(&tasklist_lock);
+ spin_unlock(&p->proc_lock);
+ proc_pid_flush(proc_dentry);
+ release_thread(p);
+ put_task_struct(p);
+
+ p = leader;
+ if (unlikely(zap_leader))
+ goto repeat;
+}
+
+/* we are using it only for SMP init */
+
+void unhash_process(struct task_struct *p)
+{
+ struct dentry *proc_dentry;
+
+ spin_lock(&p->proc_lock);
+ proc_dentry = proc_pid_unhash(p);
+ write_lock_irq(&tasklist_lock);
+ __unhash_process(p);
+ write_unlock_irq(&tasklist_lock);
+ spin_unlock(&p->proc_lock);
+ proc_pid_flush(proc_dentry);
+}
+
+/*
+ * This checks not only the pgrp, but falls back on the pid if no
+ * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
+ * without this...
+ */
+int session_of_pgrp(int pgrp)
+{
+ struct task_struct *p;
+ struct list_head *l;
+ struct pid *pid;
+ int sid = -1;
+
+ read_lock(&tasklist_lock);
+ for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid)
+ if (p->signal->session > 0) {
+ sid = p->signal->session;
+ goto out;
+ }
+ p = find_task_by_pid(pgrp);
+ if (p)
+ sid = p->signal->session;
+out:
+ read_unlock(&tasklist_lock);
+
+ return sid;
+}
+
+/*
+ * Determine if a process group is "orphaned", according to the POSIX
+ * definition in 2.2.2.52. Orphaned process groups are not to be affected
+ * by terminal-generated stop signals. Newly orphaned process groups are
+ * to receive a SIGHUP and a SIGCONT.
+ *
+ * "I ask you, have you ever known what it is to be an orphan?"
+ */
+static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
+{
+ struct task_struct *p;
+ struct list_head *l;
+ struct pid *pid;
+ int ret = 1;
+
+ for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
+ if (p == ignored_task
+ || p->state >= TASK_ZOMBIE
+ || p->real_parent->pid == 1)
+ continue;
+ if (process_group(p->real_parent) != pgrp
+ && p->real_parent->signal->session == p->signal->session) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret; /* (sighing) "Often!" */
+}
+
+int is_orphaned_pgrp(int pgrp)
+{
+ int retval;
+
+ read_lock(&tasklist_lock);
+ retval = will_become_orphaned_pgrp(pgrp, NULL);
+ read_unlock(&tasklist_lock);
+
+ return retval;
+}
+
+static inline int has_stopped_jobs(int pgrp)
+{
+ int retval = 0;
+ struct task_struct *p;
+ struct list_head *l;
+ struct pid *pid;
+
+ for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
+ if (p->state != TASK_STOPPED)
+ continue;
+
+ /* If p is stopped by a debugger on a signal that won't
+ stop it, then don't count p as stopped. This isn't
+ perfect but it's a good approximation. */
+ if (unlikely (p->ptrace)
+ && p->exit_code != SIGSTOP
+ && p->exit_code != SIGTSTP
+ && p->exit_code != SIGTTOU
+ && p->exit_code != SIGTTIN)
+ continue;
+
+ retval = 1;
+ break;
+ }
+ return retval;
+}
+
+/**
+ * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ *
+ * If a kernel thread is launched as a result of a system call, or if
+ * it ever exits, it should generally reparent itself to init so that
+ * it is correctly cleaned up on exit.
+ *
+ * The various task state such as scheduling policy and priority may have
+ * been inherited from a user process, so we reset them to sane values here.
+ *
+ * NOTE that reparent_to_init() gives the caller full capabilities.
+ */
+void reparent_to_init(void)
+{
+ write_lock_irq(&tasklist_lock);
+
+ ptrace_unlink(current);
+ /* Reparent to init */
+ REMOVE_LINKS(current);
+ current->parent = child_reaper;
+ current->real_parent = child_reaper;
+ SET_LINKS(current);
+
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+ if ((current->policy == SCHED_NORMAL) && (task_nice(current) < 0))
+ set_user_nice(current, 0);
+ /* cpus_allowed? */
+ /* rt_priority? */
+ /* signals? */
+ security_task_reparent_to_init(current);
+ memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
+ atomic_inc(&(INIT_USER->__count));
+ switch_uid(INIT_USER);
+
+ write_unlock_irq(&tasklist_lock);
+}
+
+void __set_special_pids(pid_t session, pid_t pgrp)
+{
+ struct task_struct *curr = current;
+
+ if (curr->signal->session != session) {
+ detach_pid(curr, PIDTYPE_SID);
+ curr->signal->session = session;
+ attach_pid(curr, PIDTYPE_SID, session);
+ }
+ if (process_group(curr) != pgrp) {
+ detach_pid(curr, PIDTYPE_PGID);
+ curr->signal->pgrp = pgrp;
+ attach_pid(curr, PIDTYPE_PGID, pgrp);
+ }
+}
+
+void set_special_pids(pid_t session, pid_t pgrp)
+{
+ write_lock_irq(&tasklist_lock);
+ __set_special_pids(session, pgrp);
+ write_unlock_irq(&tasklist_lock);
+}
+
+/*
+ * Let kernel threads use this to say that they
+ * allow a certain signal (since daemonize() will
+ * have disabled all of them by default).
+ */
+int allow_signal(int sig)
+{
+ if (sig < 1 || sig > _NSIG)
+ return -EINVAL;
+
+ spin_lock_irq(¤t->sighand->siglock);
+ sigdelset(¤t->blocked, sig);
+ if (!current->mm) {
+ /* Kernel threads handle their own signals.
+ Let the signal code know it'll be handled, so
+ that they don't get converted to SIGKILL or
+ just silently dropped */
+ current->sighand->action[(sig)-1].sa.sa_handler = (void *)2;
+ }
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ return 0;
+}
+
+EXPORT_SYMBOL(allow_signal);
+
+int disallow_signal(int sig)
+{
+ if (sig < 1 || sig > _NSIG)
+ return -EINVAL;
+
+ spin_lock_irq(¤t->sighand->siglock);
+ sigaddset(¤t->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ return 0;
+}
+
+EXPORT_SYMBOL(disallow_signal);
+
+/*
+ * Put all the gunge required to become a kernel thread without
+ * attached user resources in one place where it belongs.
+ */
+
+void daemonize(const char *name, ...)
+{
+ va_list args;
+ struct fs_struct *fs;
+ sigset_t blocked;
+
+ va_start(args, name);
+ vsnprintf(current->comm, sizeof(current->comm), name, args);
+ va_end(args);
+
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+ * they would be locked into memory.
+ */
+ exit_mm(current);
+
+ set_special_pids(1, 1);
+ current->signal->tty = NULL;
+
+ /* Block and flush all signals */
+ sigfillset(&blocked);
+ sigprocmask(SIG_BLOCK, &blocked, NULL);
+ flush_signals(current);
+
+ /* Become as one with the init task */
+
+ exit_fs(current); /* current->fs->count--; */
+ fs = init_task.fs;
+ current->fs = fs;
+ atomic_inc(&fs->count);
+ exit_files(current);
+ current->files = init_task.files;
+ atomic_inc(¤t->files->count);
+
+ reparent_to_init();
+}
+
+EXPORT_SYMBOL(daemonize);
+
+static inline void close_files(struct files_struct * files)
+{
+ int i, j;
+
+ j = 0;
+ for (;;) {
+ unsigned long set;
+ i = j * __NFDBITS;
+ if (i >= files->max_fdset || i >= files->max_fds)
+ break;
+ set = files->open_fds->fds_bits[j++];
+ while (set) {
+ if (set & 1) {
+ struct file * file = xchg(&files->fd[i], NULL);
+ if (file)
+ filp_close(file, files);
+ }
+ i++;
+ set >>= 1;
+ }
+ }
+}
+
+struct files_struct *get_files_struct(struct task_struct *task)
+{
+ struct files_struct *files;
+
+ task_lock(task);
+ files = task->files;
+ if (files)
+ atomic_inc(&files->count);
+ task_unlock(task);
+
+ return files;
+}
+
+void fastcall put_files_struct(struct files_struct *files)
+{
+ if (atomic_dec_and_test(&files->count)) {
+ close_files(files);
+ /*
+ * Free the fd and fdset arrays if we expanded them.
+ */
+ if (files->fd != &files->fd_array[0])
+ free_fd_array(files->fd, files->max_fds);
+ if (files->max_fdset > __FD_SETSIZE) {
+ free_fdset(files->open_fds, files->max_fdset);
+ free_fdset(files->close_on_exec, files->max_fdset);
+ }
+ kmem_cache_free(files_cachep, files);
+ }
+}
+
+EXPORT_SYMBOL(put_files_struct);
+
+static inline void __exit_files(struct task_struct *tsk)
+{
+ struct files_struct * files = tsk->files;
+
+ if (files) {
+ task_lock(tsk);
+ tsk->files = NULL;
+ task_unlock(tsk);
+ put_files_struct(files);
+ }
+}
+
+void exit_files(struct task_struct *tsk)
+{
+ __exit_files(tsk);
+}
+
+static inline void __put_fs_struct(struct fs_struct *fs)
+{
+ /* No need to hold fs->lock if we are killing it */
+ if (atomic_dec_and_test(&fs->count)) {
+ dput(fs->root);
+ mntput(fs->rootmnt);
+ dput(fs->pwd);
+ mntput(fs->pwdmnt);
+ if (fs->altroot) {
+ dput(fs->altroot);
+ mntput(fs->altrootmnt);
+ }
+ kmem_cache_free(fs_cachep, fs);
+ }
+}
+
+void put_fs_struct(struct fs_struct *fs)
+{
+ __put_fs_struct(fs);
+}
+
+static inline void __exit_fs(struct task_struct *tsk)
+{
+ struct fs_struct * fs = tsk->fs;
+
+ if (fs) {
+ task_lock(tsk);
+ tsk->fs = NULL;
+ task_unlock(tsk);
+ __put_fs_struct(fs);
+ }
+}
+
+void exit_fs(struct task_struct *tsk)
+{
+ __exit_fs(tsk);
+}
+
+EXPORT_SYMBOL_GPL(exit_fs);
+
+/*
+ * Turn us into a lazy TLB process if we
+ * aren't already..
+ */
+static inline void __exit_mm(struct task_struct * tsk)
+{
+ struct mm_struct *mm = tsk->mm;
+
+ mm_release(tsk, mm);
+ if (!mm)
+ return;
+ /*
+ * Serialize with any possible pending coredump.
+ * We must hold mmap_sem around checking core_waiters
+ * and clearing tsk->mm. The core-inducing thread
+ * will increment core_waiters for each thread in the
+ * group with ->mm != NULL.
+ */
+ down_read(&mm->mmap_sem);
+ if (mm->core_waiters) {
+ up_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ if (!--mm->core_waiters)
+ complete(mm->core_startup_done);
+ up_write(&mm->mmap_sem);
+
+ wait_for_completion(&mm->core_done);
+ down_read(&mm->mmap_sem);
+ }
+ atomic_inc(&mm->mm_count);
+ if (mm != tsk->active_mm) BUG();
+ /* more a memory barrier than a real lock */
+ task_lock(tsk);
+ tsk->mm = NULL;
+ up_read(&mm->mmap_sem);
+ enter_lazy_tlb(mm, current);
+ task_unlock(tsk);
+ mmput(mm);
+}
+
+void exit_mm(struct task_struct *tsk)
+{
+ __exit_mm(tsk);
+}
+
+EXPORT_SYMBOL(exit_mm);
+
+static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
+{
+ /*
+ * Make sure we're not reparenting to ourselves and that
+ * the parent is not a zombie.
+ */
+ if (p == reaper || reaper->state >= TASK_ZOMBIE)
+ p->real_parent = child_reaper;
+ else
+ p->real_parent = reaper;
+ if (p->parent == p->real_parent)
+ BUG();
+}
+
+static inline void reparent_thread(task_t *p, task_t *father, int traced)
+{
+ /* We don't want people slaying init. */
+ if (p->exit_signal != -1)
+ p->exit_signal = SIGCHLD;
+ p->self_exec_id++;
+
+ if (p->pdeath_signal)
+ /* We already hold the tasklist_lock here. */
+ group_send_sig_info(p->pdeath_signal, (void *) 0, p);
+
+ /* Move the child from its dying parent to the new one. */
+ if (unlikely(traced)) {
+ /* Preserve ptrace links if someone else is tracing this child. */
+ list_del_init(&p->ptrace_list);
+ if (p->parent != p->real_parent)
+ list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
+ } else {
+ /* If this child is being traced, then we're the one tracing it
+ * anyway, so let go of it.
+ */
+ p->ptrace = 0;
+ list_del_init(&p->sibling);
+ p->parent = p->real_parent;
+ list_add_tail(&p->sibling, &p->parent->children);
+
+ /* If we'd notified the old parent about this child's death,
+ * also notify the new parent.
+ */
+ if (p->state == TASK_ZOMBIE && p->exit_signal != -1 &&
+ thread_group_empty(p))
+ do_notify_parent(p, p->exit_signal);
+ }
+
+ /*
+ * process group orphan check
+ * Case ii: Our child is in a different pgrp
+ * than we are, and it was the only connection
+ * outside, so the child pgrp is now orphaned.
+ */
+ if ((process_group(p) != process_group(father)) &&
+ (p->signal->session == father->signal->session)) {
+ int pgrp = process_group(p);
+
+ if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
+ __kill_pg_info(SIGHUP, (void *)1, pgrp);
+ __kill_pg_info(SIGCONT, (void *)1, pgrp);
+ }
+ }
+}
+
+/*
+ * When we die, we re-parent all our children.
+ * Try to give them to another thread in our thread
+ * group, and if no such member exists, give it to
+ * the global child reaper process (ie "init")
+ */
+static inline void forget_original_parent(struct task_struct * father)
+{
+ struct task_struct *p, *reaper = father;
+ struct list_head *_p, *_n;
+
+ reaper = father->group_leader;
+ if (reaper == father)
+ reaper = child_reaper;
+
+ /*
+ * There are only two places where our children can be:
+ *
+ * - in our child list
+ * - in our ptraced child list
+ *
+ * Search them and reparent children.
+ */
+ list_for_each_safe(_p, _n, &father->children) {
+ p = list_entry(_p,struct task_struct,sibling);
+ if (father == p->real_parent) {
+ choose_new_parent(p, reaper, child_reaper);
+ reparent_thread(p, father, 0);
+ } else {
+ ptrace_unlink (p);
+ if (p->state == TASK_ZOMBIE && p->exit_signal != -1 &&
+ thread_group_empty(p))
+ do_notify_parent(p, p->exit_signal);
+ }
+ }
+ list_for_each_safe(_p, _n, &father->ptrace_children) {
+ p = list_entry(_p,struct task_struct,ptrace_list);
+ choose_new_parent(p, reaper, child_reaper);
+ reparent_thread(p, father, 1);
+ }
+}
+
+/*
+ * Send signals to all our closest relatives so that they know
+ * to properly mourn us..
+ */
+static void exit_notify(struct task_struct *tsk)
+{
+ int state;
+ struct task_struct *t;
+
+ ckrm_cb_exit(tsk);
+
+ if (signal_pending(tsk) && !tsk->signal->group_exit
+ && !thread_group_empty(tsk)) {
+ /*
+ * This occurs when there was a race between our exit
+ * syscall and a group signal choosing us as the one to
+ * wake up. It could be that we are the only thread
+ * alerted to check for pending signals, but another thread
+ * should be woken now to take the signal since we will not.
+ * Now we'll wake all the threads in the group just to make
+ * sure someone gets all the pending signals.
+ */
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
+ for (t = next_thread(tsk); t != tsk; t = next_thread(t))
+ if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
+ recalc_sigpending_tsk(t);
+ if (signal_pending(t))
+ signal_wake_up(t, 0);
+ }
+ spin_unlock_irq(&tsk->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ }
+
+ write_lock_irq(&tasklist_lock);
+
+ /*
+ * This does two things:
+ *
+ * A. Make init inherit all the child processes
+ * B. Check to see if any process groups have become orphaned
+ * as a result of our exiting, and if they have any stopped
+ * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
+ */
+
+ forget_original_parent(tsk);
+ BUG_ON(!list_empty(&tsk->children));
+
+ /*
+ * Check to see if any process groups have become orphaned
+ * as a result of our exiting, and if they have any stopped
+ * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
+ *
+ * Case i: Our father is in a different pgrp than we are
+ * and we were the only connection outside, so our pgrp
+ * is about to become orphaned.
+ */
+
+ t = tsk->real_parent;
+
+ if ((process_group(t) != process_group(tsk)) &&
+ (t->signal->session == tsk->signal->session) &&
+ will_become_orphaned_pgrp(process_group(tsk), tsk) &&
+ has_stopped_jobs(process_group(tsk))) {
+ __kill_pg_info(SIGHUP, (void *)1, process_group(tsk));
+ __kill_pg_info(SIGCONT, (void *)1, process_group(tsk));
+ }
+
+ /* Let father know we died
+ *
+ * Thread signals are configurable, but you aren't going to use
+ * that to send signals to arbitary processes.
+ * That stops right now.
+ *
+ * If the parent exec id doesn't match the exec id we saved
+ * when we started then we know the parent has changed security
+ * domain.
+ *
+ * If our self_exec id doesn't match our parent_exec_id then
+ * we have changed execution domain as these two values started
+ * the same after a fork.
+ *
+ */
+
+ if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
+ ( tsk->parent_exec_id != t->self_exec_id ||
+ tsk->self_exec_id != tsk->parent_exec_id)
+ && !capable(CAP_KILL))
+ tsk->exit_signal = SIGCHLD;
+
+
+ /* If something other than our normal parent is ptracing us, then
+ * send it a SIGCHLD instead of honoring exit_signal. exit_signal
+ * only has special meaning to our real parent.
+ */
+ if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
+ int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
+ do_notify_parent(tsk, signal);
+ } else if (tsk->ptrace) {
+ do_notify_parent(tsk, SIGCHLD);
+ }
+
+ state = TASK_ZOMBIE;
+ if (tsk->exit_signal == -1 && tsk->ptrace == 0)
+ state = TASK_DEAD;
+ tsk->state = state;
+ tsk->flags |= PF_DEAD;
+
+ /*
+ * Clear these here so that update_process_times() won't try to deliver
+ * itimer, profile or rlimit signals to this task while it is in late exit.
+ */
+ tsk->it_virt_value = 0;
+ tsk->it_prof_value = 0;
+ tsk->rlim[RLIMIT_CPU].rlim_cur = RLIM_INFINITY;
+
+ /*
+ * In the preemption case it must be impossible for the task
+ * to get runnable again, so use "_raw_" unlock to keep
+ * preempt_count elevated until we schedule().
+ *
+ * To avoid deadlock on SMP, interrupts must be unmasked. If we
+ * don't, subsequently called functions (e.g, wait_task_inactive()
+ * via release_task()) will spin, with interrupt flags
+ * unwittingly blocked, until the other task sleeps. That task
+ * may itself be waiting for smp_call_function() to answer and
+ * complete, and with interrupts blocked that will never happen.
+ */
+ _raw_write_unlock(&tasklist_lock);
+ local_irq_enable();
+
+ /* If the process is dead, release it - nobody will wait for it */
+ if (state == TASK_DEAD)
+ release_task(tsk);
+
+}
+
+asmlinkage NORET_TYPE void do_exit(long code)
+{
+ struct task_struct *tsk = current;
+
+ if (unlikely(in_interrupt()))
+ panic("Aiee, killing interrupt handler!");
+ if (unlikely(!tsk->pid))
+ panic("Attempted to kill the idle task!");
+ if (unlikely(tsk->pid == 1))
+ panic("Attempted to kill init!");
+ if (tsk->io_context)
+ exit_io_context();
+ tsk->flags |= PF_EXITING;
+ del_timer_sync(&tsk->real_timer);
+
+ if (unlikely(in_atomic()))
+ printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
+ current->comm, current->pid,
+ preempt_count());
+
+ profile_exit_task(tsk);
+
+ if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
+ current->ptrace_message = code;
+ ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
+ }
+
+ acct_process(code);
+ __exit_mm(tsk);
+
+ exit_sem(tsk);
+ __exit_files(tsk);
+ __exit_fs(tsk);
+ exit_namespace(tsk);
+ exit_thread();
+#ifdef CONFIG_NUMA
+ mpol_free(tsk->mempolicy);
+#endif
+
+ if (tsk->signal->leader)
+ disassociate_ctty(1);
+
+ module_put(tsk->thread_info->exec_domain->module);
+ if (tsk->binfmt)
+ module_put(tsk->binfmt->module);
+
+ tsk->exit_code = code;
+#ifdef CONFIG_CKRM_TYPE_TASKCLASS
+ numtasks_put_ref(tsk->taskclass);
+#endif
+ exit_notify(tsk);
+ schedule();
+ BUG();
+ /* Avoid "noreturn function does return". */
+ for (;;) ;
+}
+
+NORET_TYPE void complete_and_exit(struct completion *comp, long code)
+{
+ if (comp)
+ complete(comp);
+
+ do_exit(code);
+}
+
+EXPORT_SYMBOL(complete_and_exit);
+
+asmlinkage long sys_exit(int error_code)
+{
+ do_exit((error_code&0xff)<<8);
+}
+
+task_t fastcall *next_thread(task_t *p)
+{
+ struct pid_link *link = p->pids + PIDTYPE_TGID;
+ struct list_head *tmp, *head = &link->pidptr->task_list;
+
+#ifdef CONFIG_SMP
+ if (!p->sighand)
+ BUG();
+ if (!spin_is_locked(&p->sighand->siglock) &&
+ !rwlock_is_locked(&tasklist_lock))
+ BUG();
+#endif
+ tmp = link->pid_chain.next;
+ if (tmp == head)
+ tmp = head->next;
+
+ return pid_task(tmp, PIDTYPE_TGID);
+}
+
+EXPORT_SYMBOL(next_thread);
+
+/*
+ * Take down every thread in the group. This is called by fatal signals
+ * as well as by sys_exit_group (below).
+ */
+NORET_TYPE void
+do_group_exit(int exit_code)
+{
+ BUG_ON(exit_code & 0x80); /* core dumps don't get here */
+
+ if (current->signal->group_exit)
+ exit_code = current->signal->group_exit_code;
+ else if (!thread_group_empty(current)) {
+ struct signal_struct *const sig = current->signal;
+ struct sighand_struct *const sighand = current->sighand;
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&sighand->siglock);
+ if (sig->group_exit)
+ /* Another thread got here before we took the lock. */
+ exit_code = sig->group_exit_code;
+ else {
+ sig->group_exit = 1;
+ sig->group_exit_code = exit_code;
+ zap_other_threads(current);
+ }
+ spin_unlock_irq(&sighand->siglock);
+ read_unlock(&tasklist_lock);
+ }
+
+ do_exit(exit_code);
+ /* NOTREACHED */
+}
+
+/*
+ * this kills every thread in the thread group. Note that any externally
+ * wait4()-ing process will get the correct exit code - even if this
+ * thread is not the thread group leader.
+ */
+asmlinkage void sys_exit_group(int error_code)
+{
+ do_group_exit((error_code & 0xff) << 8);
+}
+
+static int eligible_child(pid_t pid, int options, task_t *p)
+{
+ if (pid > 0) {
+ if (p->pid != pid)
+ return 0;
+ } else if (!pid) {
+ if (process_group(p) != process_group(current))
+ return 0;
+ } else if (pid != -1) {
+ if (process_group(p) != -pid)
+ return 0;
+ }
+
+ /*
+ * Do not consider detached threads that are
+ * not ptraced:
+ */
+ if (p->exit_signal == -1 && !p->ptrace)
+ return 0;
+
+ /* Wait for all children (clone and not) if __WALL is set;
+ * otherwise, wait for clone children *only* if __WCLONE is
+ * set; otherwise, wait for non-clone children *only*. (Note:
+ * A "clone" child here is one that reports to its parent
+ * using a signal other than SIGCHLD.) */
+ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
+ && !(options & __WALL))
+ return 0;
+ /*
+ * Do not consider thread group leaders that are
+ * in a non-empty thread group:
+ */
+ if (current->tgid != p->tgid && delay_group_leader(p))
+ return 2;
+
+ if (security_task_wait(p))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Handle sys_wait4 work for one task in state TASK_ZOMBIE. We hold
+ * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
+ * the lock and this task is uninteresting. If we return nonzero, we have
+ * released the lock and the system call should return.
+ */
+static int wait_task_zombie(task_t *p, unsigned int __user *stat_addr, struct rusage __user *ru)
+{
+ unsigned long state;
+ int retval;
+
+ /*
+ * Try to move the task's state to DEAD
+ * only one thread is allowed to do this:
+ */
+ state = xchg(&p->state, TASK_DEAD);
+ if (state != TASK_ZOMBIE) {
+ BUG_ON(state != TASK_DEAD);
+ return 0;
+ }
+ if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
+ /*
+ * This can only happen in a race with a ptraced thread
+ * dying on another processor.
+ */
+ return 0;
+
+ /*
+ * Now we are sure this task is interesting, and no other
+ * thread can reap it because we set its state to TASK_DEAD.
+ */
+ read_unlock(&tasklist_lock);
+
+ retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
+ if (!retval && stat_addr) {
+ if (p->signal->group_exit)
+ retval = put_user(p->signal->group_exit_code, stat_addr);
+ else
+ retval = put_user(p->exit_code, stat_addr);
+ }
+ if (retval) {
+ p->state = TASK_ZOMBIE;
+ return retval;
+ }
+ retval = p->pid;
+ if (p->real_parent != p->parent) {
+ write_lock_irq(&tasklist_lock);
+ /* Double-check with lock held. */
+ if (p->real_parent != p->parent) {
+ __ptrace_unlink(p);
+ p->state = TASK_ZOMBIE;
+ /* If this is a detached thread, this is where it goes away. */
+ if (p->exit_signal == -1) {
+ /* release_task takes the lock itself. */
+ write_unlock_irq(&tasklist_lock);
+ release_task (p);
+ }
+ else {
+ do_notify_parent(p, p->exit_signal);
+ write_unlock_irq(&tasklist_lock);
+ }
+ p = NULL;
+ }
+ else
+ write_unlock_irq(&tasklist_lock);
+ }
+ if (p != NULL)
+ release_task(p);
+ BUG_ON(!retval);
+ return retval;
+}
+
+/*
+ * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
+ * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
+ * the lock and this task is uninteresting. If we return nonzero, we have
+ * released the lock and the system call should return.
+ */
+static int wait_task_stopped(task_t *p, int delayed_group_leader,
+ unsigned int __user *stat_addr,
+ struct rusage __user *ru)
+{
+ int retval, exit_code;
+
+ if (!p->exit_code)
+ return 0;
+ if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
+ p->signal && p->signal->group_stop_count > 0)
+ /*
+ * A group stop is in progress and this is the group leader.
+ * We won't report until all threads have stopped.
+ */
+ return 0;
+
+ /*
+ * Now we are pretty sure this task is interesting.
+ * Make sure it doesn't get reaped out from under us while we
+ * give up the lock and then examine it below. We don't want to
+ * keep holding onto the tasklist_lock while we call getrusage and
+ * possibly take page faults for user memory.
+ */
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+ write_lock_irq(&tasklist_lock);
+
+ /*
+ * This uses xchg to be atomic with the thread resuming and setting
+ * it. It must also be done with the write lock held to prevent a
+ * race with the TASK_ZOMBIE case.
+ */
+ exit_code = xchg(&p->exit_code, 0);
+ if (unlikely(p->state > TASK_STOPPED)) {
+ /*
+ * The task resumed and then died. Let the next iteration
+ * catch it in TASK_ZOMBIE. Note that exit_code might
+ * already be zero here if it resumed and did _exit(0).
+ * The task itself is dead and won't touch exit_code again;
+ * other processors in this function are locked out.
+ */
+ p->exit_code = exit_code;
+ exit_code = 0;
+ }
+ if (unlikely(exit_code == 0)) {
+ /*
+ * Another thread in this function got to it first, or it
+ * resumed, or it resumed and then died.
+ */
+ write_unlock_irq(&tasklist_lock);
+ put_task_struct(p);
+ read_lock(&tasklist_lock);
+ return 0;
+ }
+
+ /* move to end of parent's list to avoid starvation */
+ remove_parent(p);
+ add_parent(p, p->parent);
+
+ write_unlock_irq(&tasklist_lock);
+
+ retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
+ if (!retval && stat_addr)
+ retval = put_user((exit_code << 8) | 0x7f, stat_addr);
+ if (!retval)
+ retval = p->pid;
+ put_task_struct(p);
+
+ BUG_ON(!retval);
+ return retval;
+}
+
+asmlinkage long sys_wait4(pid_t pid,unsigned int __user *stat_addr, int options, struct rusage __user *ru)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct task_struct *tsk;
+ int flag, retval;
+
+ if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL))
+ return -EINVAL;
+
+ add_wait_queue(¤t->wait_chldexit,&wait);
+repeat:
+ flag = 0;
+ current->state = TASK_INTERRUPTIBLE;
+ read_lock(&tasklist_lock);
+ tsk = current;
+ do {
+ struct task_struct *p;
+ struct list_head *_p;
+ int ret;
+
+ list_for_each(_p,&tsk->children) {
+ p = list_entry(_p,struct task_struct,sibling);
+
+ ret = eligible_child(pid, options, p);
+ if (!ret)
+ continue;
+ flag = 1;
+
+ switch (p->state) {
+ case TASK_STOPPED:
+ if (!(options & WUNTRACED) &&
+ !(p->ptrace & PT_PTRACED))
+ continue;
+ retval = wait_task_stopped(p, ret == 2,
+ stat_addr, ru);
+ if (retval != 0) /* He released the lock. */
+ goto end_wait4;
+ break;
+ case TASK_ZOMBIE:
+ /*
+ * Eligible but we cannot release it yet:
+ */
+ if (ret == 2)
+ continue;
+ retval = wait_task_zombie(p, stat_addr, ru);
+ if (retval != 0) /* He released the lock. */
+ goto end_wait4;
+ break;
+ }
+ }
+ if (!flag) {
+ list_for_each (_p,&tsk->ptrace_children) {
+ p = list_entry(_p,struct task_struct,ptrace_list);
+ if (!eligible_child(pid, options, p))
+ continue;
+ flag = 1;
+ break;
+ }
+ }
+ if (options & __WNOTHREAD)
+ break;
+ tsk = next_thread(tsk);
+ if (tsk->signal != current->signal)
+ BUG();
+ } while (tsk != current);
+ read_unlock(&tasklist_lock);
+ if (flag) {
+ retval = 0;
+ if (options & WNOHANG)
+ goto end_wait4;
+ retval = -ERESTARTSYS;
+ if (signal_pending(current))
+ goto end_wait4;
+ schedule();
+ goto repeat;
+ }
+ retval = -ECHILD;
+end_wait4:
+ current->state = TASK_RUNNING;
+ remove_wait_queue(¤t->wait_chldexit,&wait);
+ return retval;
+}
+
+#ifdef __ARCH_WANT_SYS_WAITPID
+
+/*
+ * sys_waitpid() remains for compatibility. waitpid() should be
+ * implemented by calling sys_wait4() from libc.a.
+ */
+asmlinkage long sys_waitpid(pid_t pid, unsigned __user *stat_addr, int options)
+{
+ return sys_wait4(pid, stat_addr, options, NULL);
+}
+
+#endif
#include <linux/fs.h>
#include <linux/cpu.h>
#include <linux/security.h>
-#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/rmap.h>
#include <linux/vs_network.h>
#include <linux/vs_limit.h>
-#include <linux/vs_memory.h>
#include <linux/ckrm.h>
#include <linux/ckrm_tsk.h>
-#include <linux/ckrm_mem_inline.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ vxdprintk("freeing up task %p\n", tsk);
clr_vx_info(&tsk->vx_info);
clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
ckrm_cb_newtask(tsk);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
-#ifdef CONFIG_CKRM_RES_MEM
- INIT_LIST_HEAD(&tsk->mm_peers);
-#endif
return tsk;
}
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
- mm->free_area_cache = oldmm->mmap_base;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->map_count = 0;
mm->rss = 0;
cpus_clear(mm->cpu_vm_mask);
mm->ioctx_list = NULL;
mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
mm->free_area_cache = TASK_UNMAPPED_BASE;
-#ifdef CONFIG_CKRM_RES_MEM
- INIT_LIST_HEAD(&mm->tasklist);
- mm->peertask_lock = SPIN_LOCK_UNLOCKED;
-#endif
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
+#ifdef __HAVE_ARCH_MMAP_TOP
+ mm->mmap_top = mmap_top();
+#endif
set_vx_info(&mm->mm_vx_info, current->vx_info);
return mm;
}
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm);
-#ifdef CONFIG_CKRM_RES_MEM
- mm->memclass = GET_MEM_CLASS(current);
- mem_class_get(mm->memclass);
-#endif
}
return mm;
}
mm_free_pgd(mm);
destroy_context(mm);
clr_vx_info(&mm->mm_vx_info);
-#ifdef CONFIG_CKRM_RES_MEM
- /* class can be null and mm's tasklist can be empty here */
- if (mm->memclass) {
- mem_class_put(mm->memclass);
- mm->memclass = NULL;
- }
-#endif
free_mm(mm);
}
spin_unlock(&mmlist_lock);
exit_aio(mm);
exit_mmap(mm);
- put_swap_token(mm);
mmdrop(mm);
}
}
good_mm:
tsk->mm = mm;
tsk->active_mm = mm;
- ckrm_init_mm_to_task(mm, tsk);
return 0;
free_pt:
goto fork_out;
retval = -ENOMEM;
+
p = dup_task_struct(current);
if (!p)
goto fork_out;
- p->tux_info = NULL;
p->vx_info = NULL;
set_vx_info(&p->vx_info, current->vx_info);
}
if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
if (!vx_rsspages_avail(p->mm, p->mm->rss))
- goto bad_fork_cleanup_vm;
+ goto bad_fork_free;
}
retval = -EAGAIN;
if (!vx_nproc_avail(1))
- goto bad_fork_cleanup_vm;
+ goto bad_fork_free;
if (atomic_read(&p->user->processes) >=
p->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != &root_user)
- goto bad_fork_cleanup_vm;
+ goto bad_fork_free;
}
atomic_inc(&p->user->__count);
}
#endif
+ retval = -ENOMEM;
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
} else
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
- p->ioprio = current->ioprio;
nr_threads++;
- /* p is copy of current */
- vxi = p->vx_info;
+ vxi = current->vx_info;
if (vxi) {
atomic_inc(&vxi->cacct.nr_threads);
- atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
+ // atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
}
+ vx_nproc_inc();
write_unlock_irq(&tasklist_lock);
retval = 0;
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
free_uid(p->user);
-bad_fork_cleanup_vm:
- if (p->mm && !(clone_flags & CLONE_VM))
- vx_pages_sub(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm);
bad_fork_free:
free_task(p);
goto fork_out;
struct task_struct * p = (struct task_struct *) __data;
unsigned long interval;
- if (send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p))
- printk("*warning*: failed to send SIGALRM to %u\n", p->pid);
-
+ send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
interval = p->it_real_incr;
if (interval) {
if (interval > (unsigned long) LONG_MAX)
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+#define symbol_is(literal, string) \
+ (strcmp(MODULE_SYMBOL_PREFIX literal, (string)) == 0)
+
/* Protects module list */
static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
#include <linux/kexec.h>
#endif
-int panic_timeout = 900;
-int panic_on_oops = 1;
+int panic_timeout;
+int panic_on_oops;
int tainted;
void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
return "machine";
if(pmdisk_info.cpus != num_online_cpus())
return "number of cpus";
- return NULL;
+ return 0;
}
* locking requirements, the list it's pulling from has to belong to a cpu
* which is dead and hence not processing interrupts.
*/
-static void rcu_move_batch(struct rcu_head *list)
+static void rcu_move_batch(struct list_head *list)
{
- int cpu;
+ struct list_head *entry;
+ int cpu = smp_processor_id();
local_irq_disable();
-
- cpu = smp_processor_id();
-
- while (list != NULL) {
- *RCU_nxttail(cpu) = list;
- RCU_nxttail(cpu) = &list->next;
- list = list->next;
+ while (!list_empty(list)) {
+ entry = list->next;
+ list_del(entry);
+ list_add_tail(entry, &RCU_nxtlist(cpu));
}
local_irq_enable();
}
spin_lock_bh(&rcu_state.mutex);
if (rcu_ctrlblk.cur != rcu_ctrlblk.completed)
cpu_quiet(cpu);
+unlock:
spin_unlock_bh(&rcu_state.mutex);
- rcu_move_batch(RCU_curlist(cpu));
- rcu_move_batch(RCU_nxtlist(cpu));
+ rcu_move_batch(&RCU_curlist(cpu));
+ rcu_move_batch(&RCU_nxtlist(cpu));
tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
}
#define LOW_CREDIT(p) \
((p)->interactive_credit < -CREDIT_LIMIT)
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-/*
- * if belong to different class, compare class priority
- * otherwise compare task priority
- */
-#define TASK_PREEMPTS_CURR(p, rq) \
- ( ((p)->cpu_class != (rq)->curr->cpu_class) \
- && ((rq)->curr != (rq)->idle) && ((p) != (rq)->idle )) \
- ? class_preempts_curr((p),(rq)->curr) \
- : ((p)->prio < (rq)->curr->prio)
-#else
-#define TASK_PREEMPTS_CURR(p, rq) \
- ((p)->prio < (rq)->curr->prio)
-#endif
-
/*
* BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
* to time slice values.
((MAX_TIMESLICE - MIN_TIMESLICE) * \
(MAX_PRIO-1 - (p)->static_prio) / (MAX_USER_PRIO-1)))
-unsigned int task_timeslice(task_t *p)
+static unsigned int task_timeslice(task_t *p)
{
return BASE_TIMESLICE(p);
}
#define task_hot(p, now, sd) ((now) - (p)->timestamp < (sd)->cache_hot_time)
-/*
- * These are the runqueue data structures:
- */
-
-typedef struct runqueue runqueue_t;
-#include <linux/ckrm_classqueue.h>
-#include <linux/ckrm_sched.h>
-
-/*
- * This is the main, per-CPU runqueue data structure.
- *
- * Locking rule: those places that want to lock multiple runqueues
- * (such as the load balancing or the thread migration code), lock
- * acquire operations must be ordered by ascending &runqueue.
- */
-struct runqueue {
- spinlock_t lock;
-
- /*
- * nr_running and cpu_load should be in the same cacheline because
- * remote CPUs use both these fields when doing load calculation.
- */
- unsigned long nr_running;
-#if defined(CONFIG_SMP)
- unsigned long cpu_load;
-#endif
- unsigned long long nr_switches, nr_preempt;
- unsigned long expired_timestamp, nr_uninterruptible;
- unsigned long long timestamp_last_tick;
- task_t *curr, *idle;
- struct mm_struct *prev_mm;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- struct classqueue_struct classqueue;
- ckrm_load_t ckrm_load;
-#else
- prio_array_t *active, *expired, arrays[2];
-#endif
- int best_expired_prio;
- atomic_t nr_iowait;
-
-#ifdef CONFIG_SMP
- struct sched_domain *sd;
-
- /* For active balancing */
- int active_balance;
- int push_cpu;
-
- task_t *migration_thread;
- struct list_head migration_queue;
-#endif
-
-#ifdef CONFIG_VSERVER_HARDCPU
- struct list_head hold_queue;
- int idle_tokens;
-#endif
-};
-
-static DEFINE_PER_CPU(struct runqueue, runqueues);
+DEFINE_PER_CPU(struct runqueue, runqueues);
#define for_each_domain(cpu, domain) \
for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
# define task_running(rq, p) ((rq)->curr == (p))
#endif
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+#include <linux/ckrm_sched.h>
+spinlock_t cvt_lock = SPIN_LOCK_UNLOCKED;
+rwlock_t class_list_lock = RW_LOCK_UNLOCKED;
+LIST_HEAD(active_cpu_classes); // list of active cpu classes; anchor
+struct ckrm_cpu_class default_cpu_class_obj;
+
/*
- * task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts. Note the ordering: we can safely lookup the task_rq without
- * explicitly disabling preemption.
+ * the minimum CVT allowed is the base_cvt
+ * otherwise, it will starve others
*/
-static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+CVT_t get_min_cvt(int cpu)
{
- struct runqueue *rq;
-
-repeat_lock_task:
- local_irq_save(*flags);
- rq = task_rq(p);
- spin_lock(&rq->lock);
- if (unlikely(rq != task_rq(p))) {
- spin_unlock_irqrestore(&rq->lock, *flags);
- goto repeat_lock_task;
- }
- return rq;
-}
+ cq_node_t *node;
+ struct ckrm_local_runqueue * lrq;
+ CVT_t min_cvt;
-static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
-{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ node = classqueue_get_head(bpt_queue(cpu));
+ lrq = (node) ? class_list_entry(node) : NULL;
+
+ if (lrq)
+ min_cvt = lrq->local_cvt;
+ else
+ min_cvt = 0;
+
+ return min_cvt;
}
/*
- * rq_lock - lock a given runqueue and disable interrupts.
+ * update the classueue base for all the runqueues
+ * TODO: we can only update half of the min_base to solve the movebackward issue
*/
-static runqueue_t *this_rq_lock(void)
-{
- runqueue_t *rq;
+static inline void check_update_class_base(int this_cpu) {
+ unsigned long min_base = 0xFFFFFFFF;
+ cq_node_t *node;
+ int i;
- local_irq_disable();
- rq = this_rq();
- spin_lock(&rq->lock);
+ if (! cpu_online(this_cpu)) return;
- return rq;
+ /*
+ * find the min_base across all the processors
+ */
+ for_each_online_cpu(i) {
+ /*
+ * I should change it to directly use bpt->base
+ */
+ node = classqueue_get_head(bpt_queue(i));
+ if (node && node->prio < min_base) {
+ min_base = node->prio;
+ }
+ }
+ if (min_base != 0xFFFFFFFF)
+ classqueue_update_base(bpt_queue(this_cpu),min_base);
}
-static inline void rq_unlock(runqueue_t *rq)
+static inline void ckrm_rebalance_tick(int j,int this_cpu)
{
- spin_unlock_irq(&rq->lock);
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+ read_lock(&class_list_lock);
+ if (!(j % CVT_UPDATE_TICK))
+ update_global_cvts(this_cpu);
+
+#define CKRM_BASE_UPDATE_RATE 400
+ if (! (jiffies % CKRM_BASE_UPDATE_RATE))
+ check_update_class_base(this_cpu);
+
+ read_unlock(&class_list_lock);
+#endif
}
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
-static inline ckrm_lrq_t *rq_get_next_class(struct runqueue *rq)
+static inline struct ckrm_local_runqueue *rq_get_next_class(struct runqueue *rq)
{
cq_node_t *node = classqueue_get_head(&rq->classqueue);
return ((node) ? class_list_entry(node) : NULL);
}
-/*
- * return the cvt of the current running class
- * if no current running class, return 0
- * assume cpu is valid (cpu_online(cpu) == 1)
- */
-CVT_t get_local_cur_cvt(int cpu)
-{
- ckrm_lrq_t * lrq = rq_get_next_class(cpu_rq(cpu));
-
- if (lrq)
- return lrq->local_cvt;
- else
- return 0;
-}
-
static inline struct task_struct * rq_get_next_task(struct runqueue* rq)
{
prio_array_t *array;
struct task_struct *next;
- ckrm_lrq_t *queue;
- int idx;
+ struct ckrm_local_runqueue *queue;
int cpu = smp_processor_id();
-
- // it is guaranteed be the ( rq->nr_running > 0 ) check in
- // schedule that a task will be found.
-
+
+ next = rq->idle;
retry_next_class:
- queue = rq_get_next_class(rq);
- // BUG_ON( !queue );
-
- array = queue->active;
- if (unlikely(!array->nr_active)) {
- queue->active = queue->expired;
- queue->expired = array;
- queue->expired_timestamp = 0;
+ if ((queue = rq_get_next_class(rq))) {
+ array = queue->active;
+ //check switch active/expired queue
+ if (unlikely(!queue->active->nr_active)) {
+ queue->active = queue->expired;
+ queue->expired = array;
+ queue->expired_timestamp = 0;
+
+ if (queue->active->nr_active)
+ set_top_priority(queue,
+ find_first_bit(queue->active->bitmap, MAX_PRIO));
+ else {
+ classqueue_dequeue(queue->classqueue,
+ &queue->classqueue_linkobj);
+ cpu_demand_event(get_rq_local_stat(queue,cpu),CPU_DEMAND_DEQUEUE,0);
+ }
- if (queue->active->nr_active)
- set_top_priority(queue,
- find_first_bit(queue->active->bitmap, MAX_PRIO));
- else {
- classqueue_dequeue(queue->classqueue,
- &queue->classqueue_linkobj);
- cpu_demand_event(get_rq_local_stat(queue,cpu),CPU_DEMAND_DEQUEUE,0);
+ goto retry_next_class;
}
- goto retry_next_class;
+ BUG_ON(!queue->active->nr_active);
+ next = task_list_entry(array->queue[queue->top_priority].next);
}
- // BUG_ON(!array->nr_active);
-
- idx = queue->top_priority;
- // BUG_ON (idx == MAX_PRIO);
- next = task_list_entry(array->queue[idx].next);
return next;
}
-#else /*! CONFIG_CKRM_CPU_SCHEDULE*/
+
+static inline void rq_load_inc(runqueue_t *rq, struct task_struct *p) { rq->ckrm_cpu_load += cpu_class_weight(p->cpu_class); }
+static inline void rq_load_dec(runqueue_t *rq, struct task_struct *p) { rq->ckrm_cpu_load -= cpu_class_weight(p->cpu_class); }
+
+#else /*CONFIG_CKRM_CPU_SCHEDULE*/
+
static inline struct task_struct * rq_get_next_task(struct runqueue* rq)
{
prio_array_t *array;
static inline void class_enqueue_task(struct task_struct* p, prio_array_t *array) { }
static inline void class_dequeue_task(struct task_struct* p, prio_array_t *array) { }
static inline void init_cpu_classes(void) { }
-#define rq_ckrm_load(rq) NULL
-static inline void ckrm_sched_tick(int j,int this_cpu,void* name) {}
+static inline void rq_load_inc(runqueue_t *rq, struct task_struct *p) { }
+static inline void rq_load_dec(runqueue_t *rq, struct task_struct *p) { }
#endif /* CONFIG_CKRM_CPU_SCHEDULE */
+
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
+runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+{
+ struct runqueue *rq;
+
+repeat_lock_task:
+ local_irq_save(*flags);
+ rq = task_rq(p);
+ spin_lock(&rq->lock);
+ if (unlikely(rq != task_rq(p))) {
+ spin_unlock_irqrestore(&rq->lock, *flags);
+ goto repeat_lock_task;
+ }
+ return rq;
+}
+
+void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&rq->lock, *flags);
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static runqueue_t *this_rq_lock(void)
+{
+ runqueue_t *rq;
+
+ local_irq_disable();
+ rq = this_rq();
+ spin_lock(&rq->lock);
+
+ return rq;
+}
+
+static inline void rq_unlock(runqueue_t *rq)
+{
+ spin_unlock_irq(&rq->lock);
+}
+
/*
* Adding/removing a task to/from a priority array:
*/
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
+void dequeue_task(struct task_struct *p, prio_array_t *array)
{
+ BUG_ON(! array);
array->nr_active--;
list_del(&p->run_list);
if (list_empty(array->queue + p->prio))
class_dequeue_task(p,array);
}
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
+void enqueue_task(struct task_struct *p, prio_array_t *array)
{
list_add_tail(&p->run_list, array->queue + p->prio);
__set_bit(p->prio, array->bitmap);
{
enqueue_task(p, rq_active(p,rq));
rq->nr_running++;
+ rq_load_inc(rq,p);
}
/*
{
enqueue_task_head(p, rq_active(p,rq));
rq->nr_running++;
+ rq_load_inc(rq,p);
}
static void recalc_task_prio(task_t *p, unsigned long long now)
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
+ rq_load_dec(rq,p);
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible++;
dequeue_task(p, p->array);
INIT_LIST_HEAD(&p->run_list);
p->array = NULL;
spin_lock_init(&p->switch_lock);
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- cpu_demand_event(&p->demand_stat,CPU_DEMAND_INIT,0);
-#endif
-
#ifdef CONFIG_PREEMPT
/*
* During context-switch we hold precisely one spinlock, which
p->array = current->array;
p->array->nr_active++;
rq->nr_running++;
- class_enqueue_task(p,p->array);
+ rq_load_inc(rq,p);
}
task_rq_unlock(rq, &flags);
}
{
unsigned long i, sum = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
return sum;
{
unsigned long long i, sum = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
{
unsigned long i, sum = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
p->array = current->array;
p->array->nr_active++;
rq->nr_running++;
- class_enqueue_task(p,p->array);
+ rq_load_inc(rq,p);
}
} else {
/* Not the local CPU - must adjust timestamp */
{
dequeue_task(p, src_array);
src_rq->nr_running--;
+ rq_load_dec(src_rq,p);
+
set_task_cpu(p, this_cpu);
this_rq->nr_running++;
+ rq_load_inc(this_rq,p);
enqueue_task(p, this_array);
+
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
/*
}
#ifdef CONFIG_CKRM_CPU_SCHEDULE
-static inline int ckrm_preferred_task(task_t *tmp,long min, long max,
- int phase, enum idle_type idle)
+
+struct ckrm_cpu_class *find_unbalanced_class(int busiest_cpu, int this_cpu, unsigned long *cls_imbalance)
{
- long pressure = task_load(tmp);
-
- if (pressure > max)
- return 0;
+ struct ckrm_cpu_class *most_unbalanced_class = NULL;
+ struct ckrm_cpu_class *clsptr;
+ int max_unbalance = 0;
- if ((idle == NOT_IDLE) && ! phase && (pressure <= min))
- return 0;
- return 1;
+ list_for_each_entry(clsptr,&active_cpu_classes,links) {
+ struct ckrm_local_runqueue *this_lrq = get_ckrm_local_runqueue(clsptr,this_cpu);
+ struct ckrm_local_runqueue *busiest_lrq = get_ckrm_local_runqueue(clsptr,busiest_cpu);
+ int unbalance_degree;
+
+ unbalance_degree = (local_queue_nr_running(busiest_lrq) - local_queue_nr_running(this_lrq)) * cpu_class_weight(clsptr);
+ if (unbalance_degree >= *cls_imbalance)
+ continue; // already looked at this class
+
+ if (unbalance_degree > max_unbalance) {
+ max_unbalance = unbalance_degree;
+ most_unbalanced_class = clsptr;
+ }
+ }
+ *cls_imbalance = max_unbalance;
+ return most_unbalanced_class;
}
+
/*
- * move tasks for a specic local class
- * return number of tasks pulled
+ * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
*/
-static inline int ckrm_cls_move_tasks(ckrm_lrq_t* src_lrq,ckrm_lrq_t*dst_lrq,
- runqueue_t *this_rq,
- runqueue_t *busiest,
- struct sched_domain *sd,
- int this_cpu,
- enum idle_type idle,
- long* pressure_imbalance)
+static int find_busiest_cpu(runqueue_t *this_rq, int this_cpu, int idle,
+ int *imbalance)
{
- prio_array_t *array, *dst_array;
+ int cpu_load, load, max_load, i, busiest_cpu;
+ runqueue_t *busiest, *rq_src;
+
+
+ /*Hubertus ... the concept of nr_running is replace with cpu_load */
+ cpu_load = this_rq->ckrm_cpu_load;
+
+ busiest = NULL;
+ busiest_cpu = -1;
+
+ max_load = -1;
+ for_each_online_cpu(i) {
+ rq_src = cpu_rq(i);
+ load = rq_src->ckrm_cpu_load;
+
+ if ((load > max_load) && (rq_src != this_rq)) {
+ busiest = rq_src;
+ busiest_cpu = i;
+ max_load = load;
+ }
+ }
+
+ if (likely(!busiest))
+ goto out;
+
+ *imbalance = max_load - cpu_load;
+
+ /* It needs an at least ~25% imbalance to trigger balancing. */
+ if (!idle && ((*imbalance)*4 < max_load)) {
+ busiest = NULL;
+ goto out;
+ }
+
+ double_lock_balance(this_rq, busiest);
+ /*
+ * Make sure nothing changed since we checked the
+ * runqueue length.
+ */
+ if (busiest->ckrm_cpu_load <= cpu_load) {
+ spin_unlock(&busiest->lock);
+ busiest = NULL;
+ }
+out:
+ return (busiest ? busiest_cpu : -1);
+}
+
+static int load_balance(int this_cpu, runqueue_t *this_rq,
+ struct sched_domain *sd, enum idle_type idle)
+{
+ int imbalance, idx;
+ int busiest_cpu;
+ runqueue_t *busiest;
+ prio_array_t *array;
struct list_head *head, *curr;
task_t *tmp;
- int idx;
- int pulled = 0;
- int phase = -1;
- long pressure_min, pressure_max;
- /*hzheng: magic : 90% balance is enough*/
- long balance_min = *pressure_imbalance / 10;
-/*
- * we don't want to migrate tasks that will reverse the balance
- * or the tasks that make too small difference
- */
-#define CKRM_BALANCE_MAX_RATIO 100
-#define CKRM_BALANCE_MIN_RATIO 1
- start:
- phase ++;
+ struct ckrm_local_runqueue * busiest_local_queue;
+ struct ckrm_cpu_class *clsptr;
+ int weight;
+ unsigned long cls_imbalance; // so we can retry other classes
+
+ // need to update global CVT based on local accumulated CVTs
+ read_lock(&class_list_lock);
+ busiest_cpu = find_busiest_cpu(this_rq, this_cpu, idle, &imbalance);
+ if (busiest_cpu == -1)
+ goto out;
+
+ busiest = cpu_rq(busiest_cpu);
+
+ /*
+ * We only want to steal a number of tasks equal to 1/2 the imbalance,
+ * otherwise we'll just shift the imbalance to the new queue:
+ */
+ imbalance /= 2;
+
+ /* now find class on that runqueue with largest inbalance */
+ cls_imbalance = 0xFFFFFFFF;
+
+ retry_other_class:
+ clsptr = find_unbalanced_class(busiest_cpu, this_cpu, &cls_imbalance);
+ if (!clsptr)
+ goto out_unlock;
+
+ busiest_local_queue = get_ckrm_local_runqueue(clsptr,busiest_cpu);
+ weight = cpu_class_weight(clsptr);
+
/*
* We first consider expired tasks. Those will likely not be
* executed in the near future, and they are most likely to
* be cache-cold, thus switching CPUs has the least effect
* on them.
*/
- if (src_lrq->expired->nr_active) {
- array = src_lrq->expired;
- dst_array = dst_lrq->expired;
- } else {
- array = src_lrq->active;
- dst_array = dst_lrq->active;
- }
+ if (busiest_local_queue->expired->nr_active)
+ array = busiest_local_queue->expired;
+ else
+ array = busiest_local_queue->active;
new_array:
/* Start searching at priority 0: */
else
idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
if (idx >= MAX_PRIO) {
- if (array == src_lrq->expired && src_lrq->active->nr_active) {
- array = src_lrq->active;
- dst_array = dst_lrq->active;
+ if (array == busiest_local_queue->expired && busiest_local_queue->active->nr_active) {
+ array = busiest_local_queue->active;
goto new_array;
}
- if ((! phase) && (! pulled) && (idle != IDLE))
- goto start; //try again
- else
- goto out; //finished search for this lrq
+ goto retry_other_class;
}
head = array->queue + idx;
curr = curr->prev;
- if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+ if (!can_migrate_task(tmp, busiest, this_cpu, sd,idle)) {
if (curr != head)
goto skip_queue;
idx++;
goto skip_bitmap;
}
-
- pressure_min = *pressure_imbalance * CKRM_BALANCE_MIN_RATIO/100;
- pressure_max = *pressure_imbalance * CKRM_BALANCE_MAX_RATIO/100;
+ pull_task(busiest, array, tmp, this_rq, rq_active(tmp,this_rq),this_cpu);
/*
- * skip the tasks that will reverse the balance too much
+ * tmp BUG FIX: hzheng
+ * load balancing can make the busiest local queue empty
+ * thus it should be removed from bpt
*/
- if (ckrm_preferred_task(tmp,pressure_min,pressure_max,phase,idle)) {
- *pressure_imbalance -= task_load(tmp);
- pull_task(busiest, array, tmp,
- this_rq, dst_array, this_cpu);
- pulled++;
-
- if (*pressure_imbalance <= balance_min)
- goto out;
+ if (! local_queue_nr_running(busiest_local_queue)) {
+ classqueue_dequeue(busiest_local_queue->classqueue,&busiest_local_queue->classqueue_linkobj);
+ cpu_demand_event(get_rq_local_stat(busiest_local_queue,busiest_cpu),CPU_DEMAND_DEQUEUE,0);
}
-
- if (curr != head)
- goto skip_queue;
- idx++;
- goto skip_bitmap;
- out:
- return pulled;
-}
-
-static inline long ckrm_rq_imbalance(runqueue_t *this_rq,runqueue_t *dst_rq)
-{
- long imbalance;
- /*
- * make sure after balance, imbalance' > - imbalance/2
- * we don't want the imbalance be reversed too much
- */
- imbalance = pid_get_pressure(rq_ckrm_load(dst_rq),0)
- - pid_get_pressure(rq_ckrm_load(this_rq),1);
- imbalance /= 2;
- return imbalance;
-}
-
-/*
- * try to balance the two runqueues
- *
- * Called with both runqueues locked.
- * if move_tasks is called, it will try to move at least one task over
- */
-static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
- unsigned long max_nr_move, struct sched_domain *sd,
- enum idle_type idle)
-{
- struct ckrm_cpu_class *clsptr,*vip_cls = NULL;
- ckrm_lrq_t* src_lrq,*dst_lrq;
- long pressure_imbalance, pressure_imbalance_old;
- int src_cpu = task_cpu(busiest->curr);
- struct list_head *list;
- int pulled = 0;
- long imbalance;
- imbalance = ckrm_rq_imbalance(this_rq,busiest);
-
- if ((idle == NOT_IDLE && imbalance <= 0) || busiest->nr_running <= 1)
- goto out;
-
- //try to find the vip class
- list_for_each_entry(clsptr,&active_cpu_classes,links) {
- src_lrq = get_ckrm_lrq(clsptr,src_cpu);
-
- if (! lrq_nr_running(src_lrq))
- continue;
-
- if (! vip_cls || cpu_class_weight(vip_cls) < cpu_class_weight(clsptr) )
- {
- vip_cls = clsptr;
- }
+ imbalance -= weight;
+ if (!idle && (imbalance>0)) {
+ if (curr != head)
+ goto skip_queue;
+ idx++;
+ goto skip_bitmap;
}
-
- /*
- * do search from the most significant class
- * hopefully, less tasks will be migrated this way
- */
- clsptr = vip_cls;
-
- move_class:
- if (! clsptr)
- goto out;
-
-
- src_lrq = get_ckrm_lrq(clsptr,src_cpu);
- if (! lrq_nr_running(src_lrq))
- goto other_class;
-
- dst_lrq = get_ckrm_lrq(clsptr,this_cpu);
-
- //how much pressure for this class should be transferred
- pressure_imbalance = src_lrq->lrq_load * imbalance/src_lrq->local_weight;
- if (pulled && ! pressure_imbalance)
- goto other_class;
-
- pressure_imbalance_old = pressure_imbalance;
-
- //move tasks
- pulled +=
- ckrm_cls_move_tasks(src_lrq,dst_lrq,
- this_rq,
- busiest,
- sd,this_cpu,idle,
- &pressure_imbalance);
-
- /*
- * hzheng: 2 is another magic number
- * stop balancing if the imbalance is less than 25% of the orig
- */
- if (pressure_imbalance <= (pressure_imbalance_old >> 2))
- goto out;
-
- //update imbalance
- imbalance *= pressure_imbalance / pressure_imbalance_old;
- other_class:
- //who is next?
- list = clsptr->links.next;
- if (list == &active_cpu_classes)
- list = list->next;
- clsptr = list_entry(list, typeof(*clsptr), links);
- if (clsptr != vip_cls)
- goto move_class;
+ out_unlock:
+ spin_unlock(&busiest->lock);
out:
- return pulled;
-}
-
-/**
- * ckrm_check_balance - is load balancing necessary?
- * return 0 if load balancing is not necessary
- * otherwise return the average load of the system
- * also, update nr_group
- *
- * heuristics:
- * no load balancing if it's load is over average
- * no load balancing if it's load is far more than the min
- * task:
- * read the status of all the runqueues
- */
-static unsigned long ckrm_check_balance(struct sched_domain *sd, int this_cpu,
- enum idle_type idle, int* nr_group)
-{
- struct sched_group *group = sd->groups;
- unsigned long min_load, max_load, avg_load;
- unsigned long total_load, this_load, total_pwr;
-
- max_load = this_load = total_load = total_pwr = 0;
- min_load = 0xFFFFFFFF;
- *nr_group = 0;
-
- do {
- cpumask_t tmp;
- unsigned long load;
- int local_group;
- int i, nr_cpus = 0;
-
- /* Tally up the load of all CPUs in the group */
- cpus_and(tmp, group->cpumask, cpu_online_map);
- if (unlikely(cpus_empty(tmp)))
- goto nextgroup;
-
- avg_load = 0;
- local_group = cpu_isset(this_cpu, group->cpumask);
-
- for_each_cpu_mask(i, tmp) {
- load = pid_get_pressure(rq_ckrm_load(cpu_rq(i)),local_group);
- nr_cpus++;
- avg_load += load;
- }
-
- if (!nr_cpus)
- goto nextgroup;
-
- total_load += avg_load;
- total_pwr += group->cpu_power;
-
- /* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
-
- if (local_group) {
- this_load = avg_load;
- goto nextgroup;
- } else if (avg_load > max_load) {
- max_load = avg_load;
- }
- if (avg_load < min_load) {
- min_load = avg_load;
- }
-nextgroup:
- group = group->next;
- *nr_group = *nr_group + 1;
- } while (group != sd->groups);
-
- if (!max_load || this_load >= max_load)
- goto out_balanced;
-
- avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
-
- /* hzheng: debugging: 105 is a magic number
- * 100*max_load <= sd->imbalance_pct*this_load)
- * should use imbalance_pct instead
- */
- if (this_load > avg_load
- || 100*max_load < 105*this_load
- || 100*min_load < 70*this_load
- )
- goto out_balanced;
-
- return avg_load;
- out_balanced:
- return 0;
-}
-
-/**
- * any group that has above average load is considered busy
- * find the busiest queue from any of busy group
- */
-static runqueue_t *
-ckrm_find_busy_queue(struct sched_domain *sd, int this_cpu,
- unsigned long avg_load, enum idle_type idle,
- int nr_group)
-{
- struct sched_group *group;
- runqueue_t * busiest=NULL;
- unsigned long rand;
-
- group = sd->groups;
- rand = get_ckrm_rand(nr_group);
- nr_group = 0;
-
- do {
- unsigned long load,total_load,max_load;
- cpumask_t tmp;
- int i;
- runqueue_t * grp_busiest;
-
- cpus_and(tmp, group->cpumask, cpu_online_map);
- if (unlikely(cpus_empty(tmp)))
- goto find_nextgroup;
-
- total_load = 0;
- max_load = 0;
- grp_busiest = NULL;
- for_each_cpu_mask(i, tmp) {
- load = pid_get_pressure(rq_ckrm_load(cpu_rq(i)),0);
- total_load += load;
- if (load > max_load) {
- max_load = load;
- grp_busiest = cpu_rq(i);
- }
- }
-
- total_load = (total_load * SCHED_LOAD_SCALE) / group->cpu_power;
- if (total_load > avg_load) {
- busiest = grp_busiest;
- if (nr_group >= rand)
- break;
- }
- find_nextgroup:
- group = group->next;
- nr_group ++;
- } while (group != sd->groups);
-
- return busiest;
-}
-
-/**
- * load_balance - pressure based load balancing algorithm used by ckrm
- */
-static int ckrm_load_balance(int this_cpu, runqueue_t *this_rq,
- struct sched_domain *sd, enum idle_type idle)
-{
- runqueue_t *busiest;
- unsigned long avg_load;
- int nr_moved,nr_group;
-
- avg_load = ckrm_check_balance(sd, this_cpu, idle, &nr_group);
- if (! avg_load)
- goto out_balanced;
-
- busiest = ckrm_find_busy_queue(sd,this_cpu,avg_load,idle,nr_group);
- if (! busiest)
- goto out_balanced;
- /*
- * This should be "impossible", but since load
- * balancing is inherently racy and statistical,
- * it could happen in theory.
- */
- if (unlikely(busiest == this_rq)) {
- WARN_ON(1);
- goto out_balanced;
- }
-
- nr_moved = 0;
- if (busiest->nr_running > 1) {
- /*
- * Attempt to move tasks. If find_busiest_group has found
- * an imbalance but busiest->nr_running <= 1, the group is
- * still unbalanced. nr_moved simply stays zero, so it is
- * correctly treated as an imbalance.
- */
- double_lock_balance(this_rq, busiest);
- nr_moved = move_tasks(this_rq, this_cpu, busiest,
- 0,sd, idle);
- spin_unlock(&busiest->lock);
- if (nr_moved) {
- adjust_local_weight();
- }
- }
-
- if (!nr_moved)
- sd->nr_balance_failed ++;
- else
- sd->nr_balance_failed = 0;
-
- /* We were unbalanced, so reset the balancing interval */
- sd->balance_interval = sd->min_interval;
-
- return nr_moved;
-
-out_balanced:
- /* tune up the balancing interval */
- if (sd->balance_interval < sd->max_interval)
- sd->balance_interval *= 2;
-
+ read_unlock(&class_list_lock);
return 0;
}
-/*
- * this_rq->lock is already held
- */
-static inline int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
- struct sched_domain *sd)
-{
- int ret;
- read_lock(&class_list_lock);
- ret = ckrm_load_balance(this_cpu,this_rq,sd,NEWLY_IDLE);
- read_unlock(&class_list_lock);
- return ret;
-}
-static inline int load_balance(int this_cpu, runqueue_t *this_rq,
- struct sched_domain *sd, enum idle_type idle)
+static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
{
- int ret;
-
- spin_lock(&this_rq->lock);
- read_lock(&class_list_lock);
- ret= ckrm_load_balance(this_cpu,this_rq,sd,NEWLY_IDLE);
- read_unlock(&class_list_lock);
- spin_unlock(&this_rq->lock);
- return ret;
}
-#else /*! CONFIG_CKRM_CPU_SCHEDULE */
+#else /* CONFIG_CKRM_CPU_SCHEDULE */
/*
* move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
* as part of a balancing operation within "domain". Returns the number of
out:
return nr_moved;
}
-#endif /* CONFIG_CKRM_CPU_SCHEDULE*/
-
/*
* idle_balance is called by schedule() if this_cpu is about to become
group = group->next;
} while (group != sd->groups);
}
+#endif /* CONFIG_CKRM_CPU_SCHEDULE*/
/*
* rebalance_tick will get called every timer tick, on every CPU.
unsigned long j = jiffies + CPU_OFFSET(this_cpu);
struct sched_domain *sd;
+ ckrm_rebalance_tick(j,this_cpu);
+
/* Update our load */
old_load = this_rq->cpu_load;
this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
*/
static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle)
{
+ ckrm_rebalance_tick(jiffies,cpu);
}
+
static inline void idle_balance(int cpu, runqueue_t *rq)
{
}
return 0;
}
-DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } };
+
EXPORT_PER_CPU_SYMBOL(kstat);
/*
#define EXPIRED_STARVING(rq) \
(STARVATION_LIMIT && ((rq)->expired_timestamp && \
(jiffies - (rq)->expired_timestamp >= \
- STARVATION_LIMIT * (lrq_nr_running(rq)) + 1)))
+ STARVATION_LIMIT * (local_queue_nr_running(rq)) + 1)))
#endif
/*
}
if (p == rq->idle) {
-#ifdef CONFIG_VSERVER_HARDCPU
if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
set_need_resched();
-#endif
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait += sys_ticks;
cpustat->idle += sys_ticks;
if (wake_priority_sleeper(rq))
goto out;
- ckrm_sched_tick(jiffies,cpu,rq_ckrm_load(rq));
rebalance_tick(cpu, rq, IDLE);
return;
}
}
goto out_unlock;
}
+#warning MEF PLANETLAB: "if (vx_need_resched(p)) was if (!--p->time_slice) */"
if (vx_need_resched(p)) {
#ifdef CONFIG_CKRM_CPU_SCHEDULE
/* Hubertus ... we can abstract this out */
- ckrm_lrq_t* rq = get_task_lrq(p);
+ struct ckrm_local_runqueue* rq = get_task_class_queue(p);
#endif
dequeue_task(p, rq->active);
set_tsk_need_resched(p);
out_unlock:
spin_unlock(&rq->lock);
out:
- ckrm_sched_tick(jiffies,cpu,rq_ckrm_load(rq));
rebalance_tick(cpu, rq, NOT_IDLE);
}
spin_lock_irq(&rq->lock);
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- if (prev != rq->idle) {
- unsigned long long run = now - prev->timestamp;
- ckrm_lrq_t * lrq = get_task_lrq(prev);
-
- lrq->lrq_load -= task_load(prev);
- cpu_demand_event(&prev->demand_stat,CPU_DEMAND_DESCHEDULE,run);
- lrq->lrq_load += task_load(prev);
-
- cpu_demand_event(get_task_lrq_stat(prev),CPU_DEMAND_DESCHEDULE,run);
- update_local_cvt(prev, run);
- }
-#endif
/*
* if entering off of a kernel preemption go straight
* to picking the next task.
#endif
if (unlikely(!rq->nr_running)) {
idle_balance(cpu, rq);
- if (!rq->nr_running) {
- next = rq->idle;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- rq->expired_timestamp = 0;
-#endif
- wake_sleeping_dependent(cpu, rq);
- goto switch_tasks;
- }
+ if (!rq->nr_running) {
+ next = rq->idle;
+ rq->expired_timestamp = 0;
+ wake_sleeping_dependent(cpu, rq);
+ goto switch_tasks;
+ }
}
next = rq_get_next_task(rq);
+ if (next == rq->idle)
+ goto switch_tasks;
if (dependent_sleeper(cpu, rq, next)) {
next = rq->idle;
rq->nr_preempt++;
RCU_qsctr(task_cpu(prev))++;
+#ifdef CONFIG_CKRM_CPU_SCHEDULE
+ if (prev != rq->idle) {
+ unsigned long long run = now - prev->timestamp;
+ cpu_demand_event(get_task_local_stat(prev),CPU_DEMAND_DESCHEDULE,run);
+ update_local_cvt(prev, run);
+ }
+#endif
+
prev->sleep_avg -= run_time;
if ((long)prev->sleep_avg <= 0) {
prev->sleep_avg = 0;
}
EXPORT_SYMBOL(schedule);
+
#ifdef CONFIG_PREEMPT
/*
* this is is the entry point to schedule() from in-kernel preemption
if (!cpu_isset(dest_cpu, p->cpus_allowed))
goto out;
+ set_task_cpu(p, dest_cpu);
if (p->array) {
/*
* Sync timestamp with rq_dest's before activating.
p->timestamp = p->timestamp - rq_src->timestamp_last_tick
+ rq_dest->timestamp_last_tick;
deactivate_task(p, rq_src);
- set_task_cpu(p, dest_cpu);
activate_task(p, rq_dest, 0);
if (TASK_PREEMPTS_CURR(p, rq_dest))
resched_task(rq_dest->curr);
- } else
- set_task_cpu(p, dest_cpu);
+ }
out:
double_rq_unlock(rq_src, rq_dest);
}
if (rq->active_balance) {
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
active_load_balance(rq, cpu);
+#endif
rq->active_balance = 0;
}
{
runqueue_t *rq;
int i;
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
+ int j, k;
+#endif
#ifdef CONFIG_SMP
/* Set up an initial dummy domain for early boot */
sched_domain_init.groups = &sched_group_init;
sched_domain_init.last_balance = jiffies;
sched_domain_init.balance_interval = INT_MAX; /* Don't balance */
- sched_domain_init.busy_factor = 1;
memset(&sched_group_init, 0, sizeof(struct sched_group));
sched_group_init.cpumask = CPU_MASK_ALL;
sched_group_init.next = &sched_group_init;
sched_group_init.cpu_power = SCHED_LOAD_SCALE;
#endif
+
init_cpu_classes();
for (i = 0; i < NR_CPUS; i++) {
#ifndef CONFIG_CKRM_CPU_SCHEDULE
- int j, k;
prio_array_t *array;
-
+#endif
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
- for (j = 0; j < 2; j++) {
- array = rq->arrays + j;
- for (k = 0; k < MAX_PRIO; k++) {
- INIT_LIST_HEAD(array->queue + k);
- __clear_bit(k, array->bitmap);
- }
- // delimiter for bitsearch
- __set_bit(MAX_PRIO, array->bitmap);
- }
-
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
#else
- rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ rq->ckrm_cpu_load = 0;
#endif
-
rq->best_expired_prio = MAX_PRIO;
#ifdef CONFIG_SMP
rq->sd = &sched_domain_init;
rq->cpu_load = 0;
-#ifdef CONFIG_CKRM_CPU_SCHEDULE
- ckrm_load_init(rq_ckrm_load(rq));
-#endif
rq->active_balance = 0;
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
#endif
-#ifdef CONFIG_VSERVER_HARDCPU
INIT_LIST_HEAD(&rq->hold_queue);
-#endif
atomic_set(&rq->nr_iowait, 0);
+
+#ifndef CONFIG_CKRM_CPU_SCHEDULE
+ for (j = 0; j < 2; j++) {
+ array = rq->arrays + j;
+ for (k = 0; k < MAX_PRIO; k++) {
+ INIT_LIST_HEAD(array->queue + k);
+ __clear_bit(k, array->bitmap);
+ }
+ // delimiter for bitsearch
+ __set_bit(MAX_PRIO, array->bitmap);
+ }
+#endif
}
/*
rq->idle = current;
set_task_cpu(current, smp_processor_id());
#ifdef CONFIG_CKRM_CPU_SCHEDULE
- cpu_demand_event(&(current)->demand_stat,CPU_DEMAND_INIT,0);
- current->cpu_class = get_default_cpu_class();
+ current->cpu_class = default_cpu_class;
current->array = NULL;
#endif
wake_up_forked_process(current);
#ifdef CONFIG_CKRM_CPU_SCHEDULE
/**
* return the classqueue object of a certain processor
+ * Note: not supposed to be used in performance sensitive functions
*/
struct classqueue_struct * get_cpu_classqueue(int cpu)
{
return (& (cpu_rq(cpu)->classqueue) );
}
-
-/**
- * _ckrm_cpu_change_class - change the class of a task
- */
-void _ckrm_cpu_change_class(task_t *tsk, struct ckrm_cpu_class *newcls)
-{
- prio_array_t *array;
- struct runqueue *rq;
- unsigned long flags;
-
- rq = task_rq_lock(tsk,&flags);
- array = tsk->array;
- if (array) {
- dequeue_task(tsk,array);
- tsk->cpu_class = newcls;
- enqueue_task(tsk,rq_active(tsk,rq));
- } else
- tsk->cpu_class = newcls;
-
- task_rq_unlock(rq,&flags);
-}
#endif
INIT_LIST_HEAD(&q->list);
q->flags = 0;
q->lock = NULL;
+#warning MEF PLANETLAB: q->user = get_uid(current->user); is something new in Fedora Core.
q->user = get_uid(current->user);
atomic_inc(&q->user->sigpending);
}
}
}
-EXPORT_SYMBOL_GPL(flush_signal_handlers);
/* Notify the system that a driver wants to block all signals for this
* process, and wants to be notified if any signals at all were to be
struct task_struct *t)
{
int error = -EINVAL;
- int user;
-
if (sig < 0 || sig > _NSIG)
return error;
-
- user = (!info ||
- (info != SEND_SIG_PRIV &&
- info != SEND_SIG_FORCED &&
- SI_FROMUSER(info)));
-
error = -EPERM;
- if (user && (sig != SIGCONT ||
- current->signal->session != t->signal->session)
+ if ((!info || ((unsigned long)info != 1 &&
+ (unsigned long)info != 2 && SI_FROMUSER(info)))
+ && ((sig != SIGCONT) ||
+ (current->signal->session != t->signal->session))
&& (current->euid ^ t->suid) && (current->euid ^ t->uid)
&& (current->uid ^ t->suid) && (current->uid ^ t->uid)
&& !capable(CAP_KILL))
return error;
-
- error = -ESRCH;
- if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
- return error;
-
return security_task_kill(t, info, sig);
}
if (q) {
q->flags = 0;
+#warning MEF PLANETLAB: q->user = get_uid(t->user); is something new in Fedora Core.
q->user = get_uid(t->user);
atomic_inc(&q->user->sigpending);
list_add_tail(&q->list, &signals->list);
unsigned long flags;
int ret;
+ if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
+ return -ESRCH;
+
ret = check_kill_permission(sig, info, p);
if (!ret && sig && p->sighand) {
spin_lock_irqsave(&p->sighand->siglock, flags);
#if defined(CONFIG_PPC32) && defined(CONFIG_6xx)
extern unsigned long powersave_nap;
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp);
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void __user *buffer, size_t *lenp);
static ctl_table root_table[];
static struct ctl_table_header root_table_header =
extern ctl_table pty_table[];
#endif
-int sysctl_legacy_va_layout;
-
/* /proc declarations: */
#ifdef CONFIG_PROC_FS
.procname = "tainted",
.data = &tainted,
.maxlen = sizeof(int),
- .mode = 0444,
+ .mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
- {
- .ctl_name = VM_LEGACY_VA_LAYOUT,
- .procname = "legacy_va_layout",
- .data = &sysctl_legacy_va_layout,
- .maxlen = sizeof(sysctl_legacy_va_layout),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- .strategy = &sysctl_intvec,
- .extra1 = &zero,
- },
{ .ctl_name = 0 }
};
res = count;
- error = (*table->proc_handler) (table, write, file, buf, &res, ppos);
+ /*
+ * FIXME: we need to pass on ppos to the handler.
+ */
+
+ error = (*table->proc_handler) (table, write, file, buf, &res);
if (error)
return error;
return res;
* Returns 0 on success.
*/
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
size_t len;
char __user *p;
char c;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
if(copy_from_user(table->data, buffer, len))
return -EFAULT;
((char *) table->data)[len] = 0;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
} else {
len = strlen(table->data);
if (len > table->maxlen)
len++;
}
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
}
return 0;
}
*/
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int r;
if (!write) {
down_read(&uts_sem);
- r=proc_dostring(table,0,filp,buffer,lenp, ppos);
+ r=proc_dostring(table,0,filp,buffer,lenp);
up_read(&uts_sem);
} else {
down_write(&uts_sem);
- r=proc_dostring(table,1,filp,buffer,lenp, ppos);
+ r=proc_dostring(table,1,filp,buffer,lenp);
up_write(&uts_sem);
}
return r;
}
static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos,
+ void __user *buffer, size_t *lenp,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
void *data)
{
-#define TMPBUFLEN 21
+#define TMPBUFLEN 20
int *i, vleft, first=1, neg, val;
unsigned long lval;
size_t left, len;
char __user *s = buffer;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
if (write && first)
return -EINVAL;
*lenp -= left;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
#undef TMPBUFLEN
}
* Returns 0 on success.
*/
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
NULL,NULL);
}
*/
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int op;
}
op = (current->pid == 1) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
do_proc_dointvec_bset_conv,&op);
}
* Returns 0 on success.
*/
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
struct do_proc_dointvec_minmax_conv_param param = {
.min = (int *) table->extra1,
.max = (int *) table->extra2,
};
- return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
+ return do_proc_dointvec(table, write, filp, buffer, lenp,
do_proc_dointvec_minmax_conv, ¶m);
}
static int do_proc_doulongvec_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos,
+ void __user *buffer, size_t *lenp,
unsigned long convmul,
unsigned long convdiv)
{
-#define TMPBUFLEN 21
+#define TMPBUFLEN 20
unsigned long *i, *min, *max, val;
int vleft, first=1, neg;
size_t len, left;
char __user *s = buffer;
if (!table->data || !table->maxlen || !*lenp ||
- (*ppos && !write)) {
+ (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
if (write && first)
return -EINVAL;
*lenp -= left;
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
#undef TMPBUFLEN
}
* Returns 0 on success.
*/
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
+ return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, 1l, 1l);
}
/**
*/
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return do_proc_doulongvec_minmax(table, write, filp, buffer,
- lenp, ppos, HZ, 1000l);
+ lenp, HZ, 1000l);
}
* Returns 0 on success.
*/
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
do_proc_dointvec_jiffies_conv,NULL);
}
* Returns 0 on success.
*/
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,
do_proc_dointvec_userhz_jiffies_conv,NULL);
}
#else /* CONFIG_PROC_FS */
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
}
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
return -ENOSYS;
}
psecs = (p->utime += user);
psecs += (p->stime += system);
- if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur) {
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
/* Send SIGXCPU every second.. */
if (!(psecs % HZ))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
- if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
send_sig(SIGKILL, p, 1);
}
}
This enables the legacy API used in vs1.xx, which allows
to use older tools (for migration purposes).
-config VSERVER_PROC_SECURE
+config PROC_SECURE
bool "Enable Proc Security"
depends on PROC_FS
default y
choice
prompt "Persistent Inode Context Tagging"
- default INOXID_UGID24
+ default INOXID_GID24
help
This adds persistent context information to filesystems
mounted with the tagxid option. Tagging is a requirement
help
no context information is store for inodes
-config INOXID_UID16
- bool "UID16/GID32"
- help
- reduces UID to 16 bit, but leaves GID at 32 bit.
-
config INOXID_GID16
bool "UID32/GID16"
help
reduces GID to 16 bit, but leaves UID at 32 bit.
-config INOXID_UGID24
+config INOXID_GID24
bool "UID24/GID24"
help
uses the upper 8bit from UID and GID for XID tagging
which leaves 24bit for UID/GID each, which should be
more than sufficient for normal use.
-config INOXID_INTERN
+config INOXID_GID32
bool "UID32/GID32"
help
this uses otherwise reserved inode fields in the on
disk representation, which limits the use to a few
filesystems (currently ext2 and ext3)
-config INOXID_RUNTIME
+config INOXID_MAGIC
bool "Runtime"
depends on EXPERIMENTAL
help
endchoice
-config VSERVER_DEBUG
- bool "Compile Debugging Code"
- default n
- help
- Set this to yes if you want to be able to activate
- debugging output at runtime. It adds a probably small
- overhead (~ ??%) to all vserver related functions and
- increases the kernel size by about 20k.
-
endmenu
obj-y += vserver.o
vserver-y := switch.o context.o namespace.o sched.o network.o inode.o \
- limit.o cvirt.o signal.o proc.o helper.o init.o dlimit.o
+ limit.o cvirt.o signal.o proc.o sysctl.o helper.o init.o \
+ dlimit.o
-vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o
vserver-$(CONFIG_VSERVER_LEGACY) += legacy.o
{
struct vx_info *new = NULL;
- vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
+ vxdprintk("alloc_vx_info(%d)\n", xid);
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
new->vx_bcaps = CAP_INIT_EFF_SET;
new->vx_ccaps = 0;
- vxdprintk(VXD_CBIT(xid, 0),
- "alloc_vx_info(%d) = %p", xid, new);
+ vxdprintk("alloc_vx_info(%d) = %p\n", xid, new);
return new;
}
static void __dealloc_vx_info(struct vx_info *vxi)
{
- vxdprintk(VXD_CBIT(xid, 0),
- "dealloc_vx_info(%p)", vxi);
+ vxdprintk("dealloc_vx_info(%p)\n", vxi);
vxi->vx_hlist.next = LIST_POISON1;
vxi->vx_id = -1;
{
struct hlist_head *head;
- vxdprintk(VXD_CBIT(xid, 4),
- "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+ vxdprintk("__hash_vx_info: %p[#%d]\n", vxi, vxi->vx_id);
get_vx_info(vxi);
head = &vx_info_hash[__hashval(vxi->vx_id)];
hlist_add_head_rcu(&vxi->vx_hlist, head);
static inline void __unhash_vx_info(struct vx_info *vxi)
{
- vxdprintk(VXD_CBIT(xid, 4),
- "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+ vxdprintk("__unhash_vx_info: %p[#%d]\n", vxi, vxi->vx_id);
hlist_del_rcu(&vxi->vx_hlist);
put_vx_info(vxi);
}
do {
if (++seq > MAX_S_CONTEXT)
seq = MIN_D_CONTEXT;
- if (!__lookup_vx_info(seq)) {
- vxdprintk(VXD_CBIT(xid, 4),
- "__vx_dynamic_id: [#%d]", seq);
+ if (!__lookup_vx_info(seq))
return seq;
- }
} while (barrier != seq);
return 0;
}
{
struct vx_info *new, *vxi = NULL;
- vxdprintk(VXD_CBIT(xid, 1), "loc_vx_info(%d)*", id);
+ vxdprintk("loc_vx_info(%d)\n", id);
if (!(new = __alloc_vx_info(id))) {
*err = -ENOMEM;
else if ((vxi = __lookup_vx_info(id))) {
/* context in setup is not available */
if (vxi->vx_flags & VXF_STATE_SETUP) {
- vxdprintk(VXD_CBIT(xid, 0),
- "loc_vx_info(%d) = %p (not available)", id, vxi);
+ vxdprintk("loc_vx_info(%d) = %p (not available)\n", id, vxi);
vxi = NULL;
*err = -EBUSY;
} else {
- vxdprintk(VXD_CBIT(xid, 0),
- "loc_vx_info(%d) = %p (found)", id, vxi);
+ vxdprintk("loc_vx_info(%d) = %p (found)\n", id, vxi);
get_vx_info(vxi);
*err = 0;
}
}
/* new context requested */
- vxdprintk(VXD_CBIT(xid, 0),
- "loc_vx_info(%d) = %p (new)", id, new);
+ vxdprintk("loc_vx_info(%d) = %p (new)\n", id, new);
__hash_vx_info(get_vx_info(new));
vxi = new, new = NULL;
*err = 1;
struct vx_info *vxi = container_of(head, struct vx_info, vx_rcu);
int usecnt, refcnt;
- BUG_ON(!vxi || !head);
+ BUG_ON(!vxi);
usecnt = atomic_read(&vxi->vx_usecnt);
BUG_ON(usecnt < 0);
refcnt = atomic_read(&vxi->vx_refcnt);
BUG_ON(refcnt < 0);
- vxdprintk(VXD_CBIT(xid, 3),
- "rcu_free_vx_info(%p): uc=%d", vxi, usecnt);
if (!usecnt)
__dealloc_vx_info(vxi);
else
static inline int vx_nofiles_task(struct task_struct *tsk)
{
struct files_struct *files = tsk->files;
- const unsigned long *obptr;
+ const unsigned long *obptr, *cbptr;
int count, total;
spin_lock(&files->file_lock);
obptr = files->open_fds->fds_bits;
+ cbptr = files->close_on_exec->fds_bits;
count = files->max_fds / (sizeof(unsigned long) * 8);
for (total = 0; count > 0; count--) {
if (*obptr)
total += hweight_long(*obptr);
obptr++;
+ /* if (*cbptr)
+ total += hweight_long(*cbptr);
+ cbptr++; */
}
spin_unlock(&files->file_lock);
return total;
}
-#if 0
-
static inline int vx_openfd_task(struct task_struct *tsk)
{
struct files_struct *files = tsk->files;
return total;
}
-#endif
-
/*
* migrate task to new context
* gets vxi, puts old_vxi on change
if (old_vxi == vxi)
goto out;
- vxdprintk(VXD_CBIT(xid, 5),
- "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
+ vxdprintk("vx_migrate_task(%p,%p[#%d.%d)\n", p, vxi,
vxi->vx_id, atomic_read(&vxi->vx_usecnt));
if (!(ret = vx_migrate_user(p, vxi))) {
- int nofiles;
+ int openfd, nofiles;
task_lock(p);
- // openfd = vx_openfd_task(p);
+ openfd = vx_openfd_task(p);
nofiles = vx_nofiles_task(p);
if (old_vxi) {
atomic_dec(&old_vxi->cacct.nr_threads);
atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
- atomic_sub(nofiles, &old_vxi->limit.rcur[RLIMIT_NOFILE]);
- // atomic_sub(openfd, &old_vxi->limit.rcur[RLIMIT_OPENFD]);
+ atomic_sub(nofiles, &vxi->limit.rcur[RLIMIT_NOFILE]);
+ atomic_sub(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
}
atomic_inc(&vxi->cacct.nr_threads);
atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
atomic_add(nofiles, &vxi->limit.rcur[RLIMIT_NOFILE]);
- // atomic_add(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
-
- vxdprintk(VXD_CBIT(xid, 5),
- "moved task %p into vxi:%p[#%d]",
- p, vxi, vxi->vx_id);
-
+ atomic_add(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
/* should be handled in set_vx_info !! */
if (old_vxi)
clr_vx_info(&p->vx_info);
void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
{
struct vx_info *vxi = current->vx_info;
- struct timeval bias;
-
- jiffies_to_timeval(vxi->cvirt.bias_jiffies - INITIAL_JIFFIES, &bias);
set_normalized_timespec(uptime,
- uptime->tv_sec - bias.tv_sec,
- uptime->tv_nsec - bias.tv_usec*1000);
+ uptime->tv_sec - vxi->cvirt.bias_tp.tv_sec,
+ uptime->tv_nsec - vxi->cvirt.bias_tp.tv_nsec);
if (!idle)
return;
set_normalized_timespec(idle,
{
struct dl_info *new = NULL;
- vxdprintk(VXD_CBIT(dlim, 5),
- "alloc_dl_info(%p,%d)*", sb, xid);
+ vxdprintk("alloc_dl_info(%p,%d)\n", sb, xid);
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
/* rest of init goes here */
- vxdprintk(VXD_CBIT(dlim, 4),
- "alloc_dl_info(%p,%d) = %p", sb, xid, new);
+ vxdprintk("alloc_dl_info(%p,%d) = %p\n", sb, xid, new);
return new;
}
static void __dealloc_dl_info(struct dl_info *dli)
{
- vxdprintk(VXD_CBIT(dlim, 4),
- "dealloc_dl_info(%p)", dli);
+ vxdprintk("dealloc_dl_info(%p)\n", dli);
dli->dl_hlist.next = LIST_POISON1;
dli->dl_xid = -1;
static inline unsigned int __hashval(struct super_block *sb, xid_t xid)
{
- return ((xid ^ (unsigned long)sb) % DL_HASH_SIZE);
+ return ((xid ^ (unsigned int)sb) % DL_HASH_SIZE);
}
{
struct hlist_head *head;
- vxdprintk(VXD_CBIT(dlim, 6),
- "__hash_dl_info: %p[#%d]", dli, dli->dl_xid);
+ vxdprintk("__hash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
get_dl_info(dli);
head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_xid)];
hlist_add_head_rcu(&dli->dl_hlist, head);
static inline void __unhash_dl_info(struct dl_info *dli)
{
- vxdprintk(VXD_CBIT(dlim, 6),
- "__unhash_dl_info: %p[#%d]", dli, dli->dl_xid);
+ vxdprintk("__unhash_dl_info: %p[#%d]\n", dli, dli->dl_xid);
hlist_del_rcu(&dli->dl_hlist);
put_dl_info(dli);
}
rcu_read_lock();
dli = get_dl_info(__lookup_dl_info(sb, xid));
- vxdprintk(VXD_CBIT(dlim, 7),
- "locate_dl_info(%p,#%d) = %p", sb, xid, dli);
rcu_read_unlock();
return dli;
}
struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
int usecnt, refcnt;
- BUG_ON(!dli || !head);
+ BUG_ON(!dli);
usecnt = atomic_read(&dli->dl_usecnt);
BUG_ON(usecnt < 0);
refcnt = atomic_read(&dli->dl_refcnt);
BUG_ON(refcnt < 0);
- vxdprintk(VXD_CBIT(dlim, 3),
- "rcu_free_dl_info(%p)", dli);
if (!usecnt)
__dealloc_dl_info(dli);
else
return;
}
-#include <linux/module.h>
-
-EXPORT_SYMBOL_GPL(locate_dl_info);
-EXPORT_SYMBOL_GPL(rcu_free_dl_info);
-// EXPORT_SYMBOL_GPL(dl_info_hash_lock);
-// EXPORT_SYMBOL_GPL(unhash_dl_info);
-
{
int ret = 0;
-#ifdef CONFIG_VSERVER_DEBUG
vserver_register_sysctl();
-#endif
return ret;
}
static void __exit exit_vserver(void)
{
-#ifdef CONFIG_VSERVER_DEBUG
vserver_unregister_sysctl();
-#endif
return;
}
return ret;
}
-int vc_iattr_ioctl(struct dentry *de, unsigned int cmd, unsigned long arg)
-{
- void __user *data = (void __user *)arg;
- struct vcmd_ctx_iattr_v1 vc_data;
- int ret;
-
- /*
- * I don't think we need any dget/dput pairs in here as long as
- * this function is always called from sys_ioctl i.e., de is
- * a field of a struct file that is guaranteed not to be freed.
- */
- if (cmd == FIOC_SETIATTR) {
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
- if (copy_from_user (&vc_data, data, sizeof(vc_data)))
- return -EFAULT;
- ret = __vc_set_iattr(de,
- &vc_data.xid, &vc_data.flags, &vc_data.mask);
- }
- else {
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
- ret = __vc_get_iattr(de->d_inode,
- &vc_data.xid, &vc_data.flags, &vc_data.mask);
- }
-
- if (!ret && copy_to_user (data, &vc_data, sizeof(vc_data)))
- ret = -EFAULT;
- return ret;
-}
-
#ifdef CONFIG_VSERVER_LEGACY
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
-const char *vlimit_name[NUM_LIMITS] = {
- [RLIMIT_CPU] = "CPU",
- [RLIMIT_RSS] = "RSS",
- [RLIMIT_NPROC] = "NPROC",
- [RLIMIT_NOFILE] = "NOFILE",
- [RLIMIT_MEMLOCK] = "VML",
- [RLIMIT_AS] = "VM",
- [RLIMIT_LOCKS] = "LOCKS",
- [RLIMIT_MSGQUEUE] = "MSGQ",
- [VLIMIT_NSOCK] = "NSOCK",
-};
-
-
static int is_valid_rlimit(int id)
{
int valid = 0;
{
struct nx_info *new = NULL;
- vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
+ nxdprintk("alloc_nx_info()\n");
/* would this benefit from a slab cache? */
new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
/* rest of init goes here */
- vxdprintk(VXD_CBIT(nid, 0),
- "alloc_nx_info() = %p", new);
+ nxdprintk("alloc_nx_info() = %p\n", new);
return new;
}
static void __dealloc_nx_info(struct nx_info *nxi)
{
- vxdprintk(VXD_CBIT(nid, 0),
- "dealloc_nx_info(%p)", nxi);
+ nxdprintk("dealloc_nx_info(%p)\n", nxi);
nxi->nx_hlist.next = LIST_POISON1;
nxi->nx_id = -1;
{
struct hlist_head *head;
- vxdprintk(VXD_CBIT(nid, 4),
- "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+ nxdprintk("__hash_nx_info: %p[#%d]\n", nxi, nxi->nx_id);
get_nx_info(nxi);
head = &nx_info_hash[__hashval(nxi->nx_id)];
hlist_add_head_rcu(&nxi->nx_hlist, head);
static inline void __unhash_nx_info(struct nx_info *nxi)
{
- vxdprintk(VXD_CBIT(nid, 4),
- "__unhash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+ nxdprintk("__unhash_nx_info: %p[#%d]\n", nxi, nxi->nx_id);
hlist_del_rcu(&nxi->nx_hlist);
put_nx_info(nxi);
}
do {
if (++seq > MAX_N_CONTEXT)
seq = MIN_D_CONTEXT;
- if (!__lookup_nx_info(seq)) {
- vxdprintk(VXD_CBIT(nid, 4),
- "__nx_dynamic_id: [#%d]", seq);
+ if (!__lookup_nx_info(seq))
return seq;
- }
} while (barrier != seq);
return 0;
}
{
struct nx_info *new, *nxi = NULL;
- vxdprintk(VXD_CBIT(nid, 1), "loc_nx_info(%d)*", id);
+ nxdprintk("loc_nx_info(%d)\n", id);
if (!(new = __alloc_nx_info(id))) {
*err = -ENOMEM;
else if ((nxi = __lookup_nx_info(id))) {
/* context in setup is not available */
if (nxi->nx_flags & VXF_STATE_SETUP) {
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (not available)", id, nxi);
+ nxdprintk("loc_nx_info(%d) = %p (not available)\n", id, nxi);
nxi = NULL;
*err = -EBUSY;
} else {
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (found)", id, nxi);
+ nxdprintk("loc_nx_info(%d) = %p (found)\n", id, nxi);
get_nx_info(nxi);
*err = 0;
}
}
/* new context requested */
- vxdprintk(VXD_CBIT(nid, 0),
- "loc_nx_info(%d) = %p (new)", id, new);
+ nxdprintk("loc_nx_info(%d) = %p (new)\n", id, new);
__hash_nx_info(get_nx_info(new));
nxi = new, new = NULL;
*err = 1;
struct nx_info *nxi = container_of(head, struct nx_info, nx_rcu);
int usecnt, refcnt;
- BUG_ON(!nxi || !head);
-
usecnt = atomic_read(&nxi->nx_usecnt);
BUG_ON(usecnt < 0);
refcnt = atomic_read(&nxi->nx_refcnt);
BUG_ON(refcnt < 0);
- vxdprintk(VXD_CBIT(nid, 3),
- "rcu_free_nx_info(%p): uc=%d", nxi, usecnt);
if (!usecnt)
__dealloc_nx_info(nxi);
else
struct nx_info *new;
int err;
- vxdprintk(VXD_CBIT(nid, 5), "create_nx_info(%s)", "void");
+ nxdprintk("create_nx_info()\n");
if (!(new = __loc_nx_info(NX_DYNAMIC_ID, &err)))
return NULL;
return new;
if (!p || !nxi)
BUG();
- vxdprintk(VXD_CBIT(nid, 5),
- "nx_migrate_task(%p,%p[#%d.%d.%d])",
+ nxdprintk("nx_migrate_task(%p,%p[#%d.%d.%d])\n",
p, nxi, nxi->nx_id,
atomic_read(&nxi->nx_usecnt),
atomic_read(&nxi->nx_refcnt));
#include <linux/vserver/dlimit.h>
+extern unsigned int vx_debug_switch;
+
+
extern asmlinkage long
sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
{
- vxdprintk(VXD_CBIT(switch, 0),
- "vc: VCMD_%02d_%d[%d], %d",
- VC_CATEGORY(cmd), VC_COMMAND(cmd),
- VC_VERSION(cmd), id);
+ if (vx_debug_switch)
+ printk( "vc: VCMD_%02d_%d[%d], %d\n",
+ VC_CATEGORY(cmd), VC_COMMAND(cmd),
+ VC_VERSION(cmd), id);
switch (cmd) {
case VCMD_get_version:
enum {
CTL_DEBUG_SWITCH = 1,
- CTL_DEBUG_XID,
- CTL_DEBUG_NID,
- CTL_DEBUG_NET,
CTL_DEBUG_LIMIT,
- CTL_DEBUG_DLIM,
- CTL_DEBUG_CVIRT,
+ CTL_DEBUG_DLIMIT,
};
unsigned int vx_debug_switch = 0;
-unsigned int vx_debug_xid = 0;
-unsigned int vx_debug_nid = 0;
-unsigned int vx_debug_net = 0;
unsigned int vx_debug_limit = 0;
-unsigned int vx_debug_dlim = 0;
-unsigned int vx_debug_cvirt = 0;
+unsigned int vx_debug_dlimit = 0;
static struct ctl_table_header *vserver_table_header;
static int proc_dodebug(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+ struct file *file, void *buffer, size_t *lenp)
{
char tmpbuf[20], *p, c;
unsigned int value;
size_t left, len;
- if ((*ppos && !write) || !*lenp) {
+ if ((file->f_pos && !write) || !*lenp) {
*lenp = 0;
return 0;
}
done:
*lenp -= left;
- *ppos += *lenp;
+ file->f_pos += *lenp;
return 0;
}
.mode = 0644,
.proc_handler = &proc_dodebug
},
- {
- .ctl_name = CTL_DEBUG_XID,
- .procname = "debug_xid",
- .data = &vx_debug_xid,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
- {
- .ctl_name = CTL_DEBUG_NID,
- .procname = "debug_nid",
- .data = &vx_debug_nid,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
- {
- .ctl_name = CTL_DEBUG_NET,
- .procname = "debug_net",
- .data = &vx_debug_net,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
{
.ctl_name = CTL_DEBUG_LIMIT,
.procname = "debug_limit",
.proc_handler = &proc_dodebug
},
{
- .ctl_name = CTL_DEBUG_DLIM,
- .procname = "debug_dlim",
- .data = &vx_debug_dlim,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dodebug
- },
- {
- .ctl_name = CTL_DEBUG_CVIRT,
- .procname = "debug_cvirt",
- .data = &vx_debug_cvirt,
+ .ctl_name = CTL_DEBUG_DLIMIT,
+ .procname = "debug_dlimit",
+ .data = &vx_debug_dlimit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dodebug
{ .ctl_name = 0 }
};
-
-EXPORT_SYMBOL_GPL(vx_debug_dlim);
-EXPORT_SYMBOL_GPL(vx_debug_nid);
-EXPORT_SYMBOL_GPL(vx_debug_xid);
-
/* nothing */;
return tmp;
}
-EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
}
return dest;
}
-EXPORT_SYMBOL(strncpy);
#endif
#ifndef __HAVE_ARCH_STRLCPY
return tmp;
}
-EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
return tmp;
}
-EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
return __res;
}
-EXPORT_SYMBOL(strcmp);
#endif
#ifndef __HAVE_ARCH_STRNCMP
return __res;
}
-EXPORT_SYMBOL(strncmp);
#endif
#ifndef __HAVE_ARCH_STRCHR
return NULL;
return (char *) s;
}
-EXPORT_SYMBOL(strchr);
#endif
#ifndef __HAVE_ARCH_STRRCHR
} while (--p >= s);
return NULL;
}
-EXPORT_SYMBOL(strrchr);
#endif
#ifndef __HAVE_ARCH_STRNCHR
return (char *) s;
return NULL;
}
-EXPORT_SYMBOL(strnchr);
#endif
#ifndef __HAVE_ARCH_STRLEN
/* nothing */;
return sc - s;
}
-EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
/* nothing */;
return sc - s;
}
-EXPORT_SYMBOL(strnlen);
#endif
#ifndef __HAVE_ARCH_STRSPN
return count;
}
-EXPORT_SYMBOL(strcspn);
#ifndef __HAVE_ARCH_STRPBRK
/**
}
return NULL;
}
-EXPORT_SYMBOL(strpbrk);
#endif
#ifndef __HAVE_ARCH_STRSEP
return s;
}
-EXPORT_SYMBOL(memset);
#endif
#ifndef __HAVE_ARCH_BCOPY
while (count--)
*dest++ = *src++;
}
-EXPORT_SYMBOL(bcopy);
#endif
#ifndef __HAVE_ARCH_MEMCPY
return dest;
}
-EXPORT_SYMBOL(memcpy);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
return dest;
}
-EXPORT_SYMBOL(memmove);
#endif
#ifndef __HAVE_ARCH_MEMCMP
break;
return res;
}
-EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
}
return (void *) p;
}
-EXPORT_SYMBOL(memscan);
#endif
#ifndef __HAVE_ARCH_STRSTR
}
return NULL;
}
-EXPORT_SYMBOL(strstr);
#endif
#ifndef __HAVE_ARCH_MEMCHR
}
return NULL;
}
-EXPORT_SYMBOL(memchr);
+
#endif
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
*fmt == 'Z' || *fmt == 'z') {
- qualifier = *fmt++;
- if (unlikely(qualifier == *fmt)) {
- if (qualifier == 'h') {
- qualifier = 'H';
- fmt++;
- } else if (qualifier == 'l') {
- qualifier = 'L';
- fmt++;
- }
- }
+ qualifier = *fmt;
+ fmt++;
}
base = 10;
is_sign = 0;
break;
switch(qualifier) {
- case 'H': /* that's 'hh' in format */
- if (is_sign) {
- signed char *s = (signed char *) va_arg(args,signed char *);
- *s = (signed char) simple_strtol(str,&next,base);
- } else {
- unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
- *s = (unsigned char) simple_strtoul(str, &next, base);
- }
- break;
case 'h':
if (is_sign) {
short *s = (short *) va_arg(args,short *);
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
shmem.o vmalloc.o
-obj-y := bootmem.o filemap.o mempool.o fadvise.o \
+obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
page_alloc.o page-writeback.o pdflush.o prio_tree.o \
readahead.o slab.o swap.o truncate.o vmscan.o \
$(mmu-y)
-obj-$(CONFIG_OOM_KILL) += oom_kill.o
-obj-$(CONFIG_OOM_PANIC) += oom_panic.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_X86_4G) += usercopy.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_PROC_MM) += proc_mm.o
{
struct page *page;
+ /*
+ * We scan the hash list read-only. Addition to and removal from
+ * the hash-list needs a held write-lock.
+ */
spin_lock_irq(&mapping->tree_lock);
page = radix_tree_lookup(&mapping->page_tree, offset);
if (page)
struct file * filp,
loff_t *ppos,
read_descriptor_t * desc,
- read_actor_t actor,
- int nonblock)
+ read_actor_t actor)
{
struct inode *inode = mapping->host;
unsigned long index, end_index, offset;
find_page:
page = find_get_page(mapping, index);
if (unlikely(page == NULL)) {
- if (nonblock) {
- desc->error = -EWOULDBLOCKIO;
- break;
- }
handle_ra_miss(mapping, &ra, index);
goto no_cached_page;
}
- if (!PageUptodate(page)) {
- if (nonblock) {
- page_cache_release(page);
- desc->error = -EWOULDBLOCKIO;
- break;
- }
+ if (!PageUptodate(page))
goto page_not_up_to_date;
- }
page_ok:
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (desc.count == 0)
continue;
desc.error = 0;
- do_generic_file_read(filp,ppos,&desc,file_read_actor,0);
+ do_generic_file_read(filp,ppos,&desc,file_read_actor);
retval += desc.written;
if (!retval) {
retval = desc.error;
desc.arg.data = target;
desc.error = 0;
- do_generic_file_read(in_file, ppos, &desc, actor, 0);
+ do_generic_file_read(in_file, ppos, &desc, actor);
if (desc.written)
return desc.written;
return desc.error;
* effect.
*/
error = page_cache_read(file, pgoff);
- grab_swap_token();
/*
* The page we want has now been added to the page cache.
return err;
}
} else {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
+ /*
+ * If a nonlinear mapping then store the file page offset
+ * in the pte.
+ */
+ if (pgoff != linear_page_index(vma, addr)) {
+ err = install_file_pte(mm, vma, addr, pgoff, prot);
+ if (err)
+ return err;
+ }
}
len -= PAGE_SIZE;
if (err)
goto out;
- inode_update_time(inode, file->f_vfsmnt, 1);
+ inode_update_time(inode, 1);
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, struct page *page, pgprot_t prot)
{
- struct inode *inode;
- pgoff_t size;
int err = -ENOMEM;
pte_t *pte;
pgd_t *pgd;
if (!pte)
goto err_unlock;
- /*
- * This page may have been truncated. Tell the
- * caller about it.
- */
- err = -EINVAL;
- inode = vma->vm_file->f_mapping->host;
- size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (!page->mapping || page->index >= size)
- goto err_unlock;
-
zap_pte(mm, vma, addr, pte);
// mm->rss++;
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
- int i, err = 0;
+ int i;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- err = -EIO;
+ goto out_eio;
+
+ set_bit(BIO_UPTODATE, &bio_orig->bi_flags);
/*
* free up bounce indirect pages used
mempool_free(bvec->bv_page, pool);
}
- bio_endio(bio_orig, bio_orig->bi_size, err);
+out_eio:
+ bio_endio(bio_orig, bio_orig->bi_size, 0);
bio_put(bio);
}
#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer,
- size_t *length, loff_t *ppos)
+ size_t *length)
{
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, file, buffer, length);
max_huge_pages = set_max_huge_pages(max_huge_pages);
return 0;
}
pte_t *pte;
if (write) /* user gate pages are read-only */
return i ? : -EFAULT;
- pgd = pgd_offset_gate(mm, pg);
+ pgd = pgd_offset(mm, pg);
if (!pgd)
return i ? : -EFAULT;
pmd = pmd_offset(pgd, pg);
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
- grab_swap_token();
}
if (!vx_rsspages_avail(mm, 1)) {
*/
/* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) {
- if (!PageReserved(new_page))
- //++mm->rss;
- vx_rsspages_inc(mm);
+ if (!PageReserved(new_page))
+ ++mm->rss;
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
up_write(¤t->mm->mmap_sem);
return ret;
}
-
-/*
- * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
- * shm segments) get accounted against the user_struct instead.
- */
-static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED;
-
-int user_shm_lock(size_t size, struct user_struct *user)
-{
- unsigned long lock_limit, locked;
- int allowed = 0;
-
- spin_lock(&shmlock_user_lock);
- locked = size >> PAGE_SHIFT;
- lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
- lock_limit >>= PAGE_SHIFT;
- if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
- goto out;
- get_uid(user);
- user->locked_shm += locked;
- allowed = 1;
-out:
- spin_unlock(&shmlock_user_lock);
- return allowed;
-}
-
-void user_shm_unlock(size_t size, struct user_struct *user)
-{
- spin_lock(&shmlock_user_lock);
- user->locked_shm -= (size >> PAGE_SHIFT);
- spin_unlock(&shmlock_user_lock);
- free_uid(user);
-}
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
-#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
int accountable = 1;
unsigned long charged = 0;
- /*
- * Does the application expect PROT_READ to imply PROT_EXEC:
- */
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
- prot |= PROT_EXEC;
-
if (file) {
if (is_file_hugepages(file))
accountable = 0;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
- addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+ addr = get_unmapped_area(file, addr, len, pgoff, flags, prot & PROT_EXEC);
if (addr & ~PAGE_MASK)
return addr;
* This function "knows" that -ENOMEM has the bits set.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
-unsigned long
+static inline unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
addr = vma->vm_end;
}
}
+#else
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
#endif
-void arch_unmap_area(struct vm_area_struct *area)
-{
- /*
- * Is this a new hole at the lowest possible address?
- */
- if (area->vm_start >= TASK_UNMAPPED_BASE &&
- area->vm_start < area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_start;
-}
-
-/*
- * This mmap-allocator allocates new areas top-down from below the
- * stack's low limit (the base):
- */
unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- struct vm_area_struct *vma, *prev_vma;
- struct mm_struct *mm = current->mm;
- unsigned long base = mm->mmap_base, addr = addr0;
- int first_time = 1;
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /* dont allow allocations above current base */
- if (mm->free_area_cache > base)
- mm->free_area_cache = base;
-
- /* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
-
-try_again:
- /* make sure it can fit in the remaining address space */
- if (mm->free_area_cache < len)
- goto fail;
-
- /* either no address requested or cant fit in requested address hole */
- addr = (mm->free_area_cache - len) & PAGE_MASK;
- do {
- /*
- * Lookup failure means no vma is above this address,
- * i.e. return with success:
- */
- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
- return addr;
-
- /*
- * new region fits between prev_vma->vm_end and
- * vma->vm_start, use it:
- */
- if (addr+len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end)))
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- else
- /* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end)
- mm->free_area_cache = vma->vm_start;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (len <= vma->vm_start);
-
-fail:
- /*
- * if hint left us with no space for the requested
- * mapping then try again:
- */
- if (first_time) {
- mm->free_area_cache = base;
- first_time = 0;
- goto try_again;
- }
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- mm->free_area_cache = TASK_UNMAPPED_BASE;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
- mm->free_area_cache = base;
-
- return addr;
-}
-
-void arch_unmap_area_topdown(struct vm_area_struct *area)
-{
- /*
- * Is this a new hole at the highest possible address?
- */
- if (area->vm_end > area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_end;
-}
-
-
-unsigned long
-get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, int exec)
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags, unsigned long exec)
{
if (flags & MAP_FIXED) {
unsigned long ret;
return file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
- if (exec && current->mm->get_unmapped_exec_area)
- return current->mm->get_unmapped_exec_area(file, addr, len, pgoff, flags);
- else
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags, exec);
}
-EXPORT_SYMBOL(get_unmapped_area_prot);
-
-
-#define SHLIB_BASE 0x00111000
-
-unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
- unsigned long len0, unsigned long pgoff, unsigned long flags)
-{
- unsigned long addr = addr0, len = len0;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long tmp;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (!addr && !(flags & MAP_FIXED))
- addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start)) {
- return addr;
- }
- }
-
- addr = SHLIB_BASE;
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr) {
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Must not let a PROT_EXEC mapping get into the
- * brk area:
- */
- if (addr + len > mm->brk)
- goto failed;
-
- /*
- * Up until the brk area we randomize addresses
- * as much as possible:
- */
- if (addr >= 0x01000000) {
- tmp = randomize_range(0x01000000, mm->brk, len);
- vma = find_vma(mm, tmp);
- if (TASK_SIZE - len >= tmp &&
- (!vma || tmp + len <= vma->vm_start))
- return tmp;
- }
- /*
- * Ok, randomization didnt work out - return
- * the result of the linear search:
- */
- return addr;
- }
- addr = vma->vm_end;
- }
-
-failed:
- return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
-}
-
-
+EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
{
size_t len = area->vm_end - area->vm_start;
+ unsigned long old_end = area->vm_end;
// area->vm_mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
if (area->vm_flags & VM_LOCKED)
// area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
- area->vm_mm->unmap_area(area);
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+ area->vm_start < area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_start;
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+ if (area->vm_start > area->vm_mm->non_executable_cache)
+ area->vm_mm->non_executable_cache = area->vm_start;
remove_vm_struct(area);
+ if (unlikely(area->vm_flags & VM_EXEC))
+ arch_remove_exec_range(mm, old_end);
}
/*
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
-#include <linux/personality.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
return -EINVAL;
if (end == start)
return 0;
- /*
- * Does the application expect PROT_READ to imply PROT_EXEC:
- */
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
- prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
-#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
- new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
- vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC);
+ new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+ vma->vm_pgoff, map_flags,
+ vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out;
/**
* out_of_memory - is the system out of memory?
*/
-void out_of_memory(int gfp_mask)
+void out_of_memory(void)
{
/*
* oom_lock protects out_of_memory()'s static variables.
*/
lastkill = now;
- printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
- show_free_areas();
-
/* oom_kill() sleeps */
spin_unlock(&oom_lock);
oom_kill();
+++ /dev/null
-/*
- * Just panic() instead of the default behavior of selecting processes
- * for death.
- *
- * Based on
- * Modular OOM handlers for 2.6.4 (C) 2003,2004 Tvrtko A. Ursulin
- * and
- * linux/mm/oom_kill.c (C) 1998,2000 Rik van Riel.
- *
- * Mark Huang <mlhuang@cs.princeton.edu>
- *
- * $Id$
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-
-/**
- * out_of_memory - is the system out of memory?
- */
-void out_of_memory(int gfp_mask)
-{
- /*
- * oom_lock protects out_of_memory()'s static variables.
- * It's a global lock; this is not performance-critical.
- */
- static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED;
- static unsigned long count;
-
- spin_lock(&oom_lock);
-
- /*
- * If we have gotten only a few failures,
- * we're not really oom.
- */
- if (++count < 10)
- goto out_unlock;
-
- /*
- * Ok, really out of memory. Panic.
- */
-
- printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
- show_free_areas();
-
- panic("Out Of Memory");
-
-out_unlock:
- spin_unlock(&oom_lock);
-}
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ struct file *file, void __user *buffer, size_t *length)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, file, buffer, length);
if (dirty_writeback_centisecs) {
mod_timer(&wb_timer,
jiffies + (dirty_writeback_centisecs * HZ) / 100);
#include <linux/cpu.h>
#include <linux/vs_base.h>
#include <linux/vs_limit.h>
-#include <linux/ckrm_mem_inline.h>
#include <asm/tlbflush.h>
/* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->lru);
__free_pages_bulk(page, base, zone, area, order);
- ckrm_clear_page_class(page);
ret++;
}
spin_unlock_irqrestore(&zone->lock, flags);
might_sleep_if(wait);
- if (!ckrm_class_limit_ok((GET_MEM_CLASS(current)))) {
- return NULL;
- }
-
zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
if (zones[0] == NULL) /* no zones in the zonelist */
return NULL;
return NULL;
got_pg:
kernel_map_pages(page, 1 << order, 1);
- ckrm_set_pages_class(page, 1 << order, GET_MEM_CLASS(current));
return page;
}
EXPORT_SYMBOL(nr_free_pages);
+unsigned int nr_used_zone_pages(void)
+{
+ unsigned int pages = 0;
+ struct zone *zone;
+
+ for_each_zone(zone)
+ pages += zone->nr_active + zone->nr_inactive;
+
+ return pages;
+}
+
#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -= zholes_size[i];
pgdat->node_present_pages = realtotalpages;
- printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
+ printk("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
- if (!is_highmem_idx(zone))
+ if (!is_highmem(zone))
set_page_address(page, __va(start_pfn << PAGE_SHIFT));
#endif
start_pfn++;
pcp->batch = 1 * batch;
INIT_LIST_HEAD(&pcp->list);
}
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
+ printk(" %s zone: %lu pages, LIFO batch:%lu\n",
zone_names[j], realsize, batch);
INIT_LIST_HEAD(&zone->active_list);
INIT_LIST_HEAD(&zone->inactive_list);
* changes.
*/
int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ struct file *file, void __user *buffer, size_t *length)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, file, buffer, length);
setup_per_zone_pages_min();
setup_per_zone_protection();
return 0;
* whenever sysctl_lower_zone_protection changes.
*/
int lower_zone_protection_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ struct file *file, void __user *buffer, size_t *length)
{
- proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ proc_dointvec_minmax(table, write, file, buffer, length);
setup_per_zone_protection();
return 0;
}
struct file *filp, unsigned long offset)
{
unsigned max;
+ unsigned min;
unsigned orig_next_size;
unsigned actual;
int first_access=0;
if (max == 0)
goto out; /* No readahead */
+ min = get_min_readahead(ra);
orig_next_size = ra->next_size;
if (ra->next_size == 0) {
* pages shall be accessed in the next
* current window.
*/
- average = ra->average;
- if (ra->serial_cnt > average)
- average = (ra->serial_cnt + ra->average + 1) / 2;
-
- ra->next_size = min(average , (unsigned long)max);
+ ra->next_size = min(ra->average , (unsigned long)max);
}
ra->start = offset;
ra->size = ra->next_size;
ra->size = max;
ra->ahead_start = 0;
ra->ahead_size = 0;
- ra->average = max / 2;
}
}
ra->prev_page = offset;
if (ptep_clear_flush_young(vma, address, pte))
referenced++;
- if (mm != current->mm && has_swap_token(mm))
- referenced++;
-
(*mapcount)--;
out_unmap:
* an exclusive swap page, do_wp_page will replace it by a copy
* page, and the user never get to see the data GUP was holding
* the original page for.
- *
- * This test is also useful for when swapoff (unuse_process) has
- * to drop page lock: its reference to the page stops existing
- * ptes from being unmapped, so swapoff can make progress.
*/
if (PageSwapCache(page) &&
page_count(page) != page->mapcount + 2) {
page_remove_rmap(page);
page_cache_release(page);
- // mm->rss--;
- vx_rsspages_dec(mm);
+ mm->rss--;
(*mapcount)--;
}
#include <asm/pgtable.h>
/* This magic number is used in glibc for posix shared memory */
+#define TMPFS_MAGIC 0x01021994
#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
return err;
}
} else if (nonblock) {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
+ /*
+ * If a nonlinear mapping then store the file page
+ * offset in the pte.
+ */
+ if (pgoff != linear_page_index(vma, addr)) {
+ err = install_file_pte(mm, vma, addr, pgoff, prot);
+ if (err)
+ return err;
+ }
}
len -= PAGE_SIZE;
}
#endif
-int shmem_lock(struct file *file, int lock, struct user_struct *user)
+/* Protects current->user->locked_shm from concurrent access */
+static spinlock_t shmem_lock_user = SPIN_LOCK_UNLOCKED;
+
+int shmem_lock(struct file *file, int lock, struct user_struct * user)
{
struct inode *inode = file->f_dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
+ unsigned long lock_limit, locked;
int retval = -ENOMEM;
spin_lock(&info->lock);
+ spin_lock(&shmem_lock_user);
if (lock && !(info->flags & VM_LOCKED)) {
- if (!user_shm_lock(inode->i_size, user))
+ locked = inode->i_size >> PAGE_SHIFT;
+ locked += user->locked_shm;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit >>= PAGE_SHIFT;
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
goto out_nomem;
- info->flags |= VM_LOCKED;
+ /* for this branch user == current->user so it won't go away under us */
+ atomic_inc(&user->__count);
+ user->locked_shm = locked;
}
if (!lock && (info->flags & VM_LOCKED) && user) {
- user_shm_unlock(inode->i_size, user);
- info->flags &= ~VM_LOCKED;
+ locked = inode->i_size >> PAGE_SHIFT;
+ user->locked_shm -= locked;
+ free_uid(user);
}
+ if (lock)
+ info->flags |= VM_LOCKED;
+ else
+ info->flags &= ~VM_LOCKED;
retval = 0;
out_nomem:
+ spin_unlock(&shmem_lock_user);
spin_unlock(&info->lock);
return retval;
}
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- buf->f_type = TMPFS_SUPER_MAGIC;
+ buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
spin_lock(&sbinfo->stat_lock);
buf->f_blocks = sbinfo->max_blocks;
sb->s_maxbytes = SHMEM_MAX_BYTES;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = TMPFS_SUPER_MAGIC;
+ sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode)
/*
* This file contains the default values for the opereation of the
* Linux VM subsystem. Fine-tuning documentation can be found in
- * Documentation/sysctl/vm.txt.
+ * linux/Documentation/sysctl/vm.txt.
* Started 18.12.91
* Swap aging added 23.2.95, Stephen Tweedie.
* Buffermem limits added 12.3.98, Rik van Riel.
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/vs_base.h>
-#include <linux/vs_memory.h>
spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
unsigned int nr_swapfiles;
check_next_cluster:
if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
{
- unsigned long nr;
+ int nr;
for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
if (si->swap_map[nr])
{
/*
* Go through process' page directory.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
- /*
- * Our reference to the page stops try_to_unmap_one from
- * unmapping its ptes, so swapoff can make progress.
- */
- unlock_page(page);
- down_read(&mm->mmap_sem);
- lock_page(page);
- }
+ down_read(&mm->mmap_sem);
spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!is_vm_hugetlb_page(vma)) {
+++ /dev/null
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cs.wm.edu/~sjiang/token.pdf
- */
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-
-static spinlock_t swap_token_lock = SPIN_LOCK_UNLOCKED;
-static unsigned long swap_token_timeout;
-unsigned long swap_token_check;
-struct mm_struct * swap_token_mm = &init_mm;
-
-#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2)
-#define SWAP_TOKEN_TIMEOUT (HZ * 300)
-
-/*
- * Take the token away if the process had no page faults
- * in the last interval, or if it has held the token for
- * too long.
- */
-#define SWAP_TOKEN_ENOUGH_RSS 1
-#define SWAP_TOKEN_TIMED_OUT 2
-static int should_release_swap_token(struct mm_struct *mm)
-{
- int ret = 0;
- if (!mm->recent_pagein)
- ret = SWAP_TOKEN_ENOUGH_RSS;
- else if (time_after(jiffies, swap_token_timeout))
- ret = SWAP_TOKEN_TIMED_OUT;
- mm->recent_pagein = 0;
- return ret;
-}
-
-/*
- * Try to grab the swapout protection token. We only try to
- * grab it once every TOKEN_CHECK_INTERVAL, both to prevent
- * SMP lock contention and to check that the process that held
- * the token before is no longer thrashing.
- */
-void grab_swap_token(void)
-{
- struct mm_struct *mm;
- int reason;
-
- /* We have the token. Let others know we still need it. */
- if (has_swap_token(current->mm)) {
- current->mm->recent_pagein = 1;
- return;
- }
-
- if (time_after(jiffies, swap_token_check)) {
-
- /* Can't get swapout protection if we exceed our RSS limit. */
- // if (current->mm->rss > current->mm->rlimit_rss)
- // return;
-
- /* ... or if we recently held the token. */
- if (time_before(jiffies, current->mm->swap_token_time))
- return;
-
- if (!spin_trylock(&swap_token_lock))
- return;
-
- swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-
- mm = swap_token_mm;
- if ((reason = should_release_swap_token(mm))) {
- unsigned long eligible = jiffies;
- if (reason == SWAP_TOKEN_TIMED_OUT) {
- eligible += SWAP_TOKEN_TIMEOUT;
- }
- mm->swap_token_time = eligible;
- swap_token_timeout = jiffies + SWAP_TOKEN_TIMEOUT;
- swap_token_mm = current->mm;
- }
- spin_unlock(&swap_token_lock);
- }
- return;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
- spin_lock(&swap_token_lock);
- if (likely(mm == swap_token_mm)) {
- swap_token_mm = &init_mm;
- swap_token_check = jiffies;
- }
- spin_unlock(&swap_token_lock);
-}
#include <asm/div64.h>
#include <linux/swapops.h>
-#include <linux/ckrm_mem.h>
-
-#ifndef AT_LIMIT_SUPPORT
-#warning "ckrm_at_limit disabled due to problems with memory hog tests -- seting ckrm_shrink_list_empty to true"
-#undef ckrm_shrink_list_empty
-#define ckrm_shrink_list_empty() (1)
-#endif
/* possible outcome of pageout() */
typedef enum {
/* This context's GFP mask */
unsigned int gfp_mask;
- /* Flag used by CKRM */
- unsigned int ckrm_flags;
-
int may_writepage;
};
* slab to avoid swapping.
*
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
- *
- * `lru_pages' represents the number of on-LRU pages in all the zones which
- * are eligible for the caller's allocation attempt. It is used for balancing
- * slab reclaim versus page reclaim.
*/
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
- unsigned long lru_pages)
+static int shrink_slab(unsigned long scanned, unsigned int gfp_mask)
{
struct shrinker *shrinker;
+ long pages;
if (down_trylock(&shrinker_sem))
return 0;
+ pages = nr_used_zone_pages();
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
delta = (4 * scanned) / shrinker->seeks;
delta *= (*shrinker->shrinker)(0, gfp_mask);
- do_div(delta, lru_pages + 1);
+ do_div(delta, pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0)
shrinker->nr = LONG_MAX; /* It wrapped! */
{
LIST_HEAD(page_list);
struct pagevec pvec;
- int max_scan = sc->nr_to_scan, nr_pass;
- unsigned int ckrm_flags = sc->ckrm_flags, bit_flag;
+ int max_scan = sc->nr_to_scan;
pagevec_init(&pvec, 1);
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
-redo:
- ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag);
- nr_pass = zone->nr_inactive;
while (max_scan > 0) {
struct page *page;
int nr_taken = 0;
int nr_scan = 0;
int nr_freed;
- while (nr_pass-- && nr_scan++ < SWAP_CLUSTER_MAX &&
+ while (nr_scan++ < SWAP_CLUSTER_MAX &&
!list_empty(&zone->inactive_list)) {
page = lru_to_page(&zone->inactive_list);
SetPageLRU(page);
list_add(&page->lru, &zone->inactive_list);
continue;
- } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) {
- __put_page(page);
- SetPageLRU(page);
-#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE
- list_add_tail(&page->lru, &zone->inactive_list);
-#else
- list_add(&page->lru, &zone->inactive_list);
-#endif
- continue;
}
list_add(&page->lru, &page_list);
- ckrm_mem_dec_inactive(page);
nr_taken++;
}
zone->nr_inactive -= nr_taken;
zone->pages_scanned += nr_taken;
spin_unlock_irq(&zone->lru_lock);
- if ((bit_flag == 0) && (nr_taken == 0))
+ if (nr_taken == 0)
goto done;
max_scan -= nr_scan;
spin_lock_irq(&zone->lru_lock);
}
}
- if (ckrm_flags && (nr_pass <= 0)) {
- goto redo;
- }
}
spin_unlock_irq(&zone->lru_lock);
done:
long mapped_ratio;
long distress;
long swap_tendency;
- unsigned int ckrm_flags = sc->ckrm_flags, bit_flag;
- int nr_pass;
lru_add_drain();
pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
-redo:
- ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag);
- nr_pass = zone->nr_active;
- while (pgscanned < nr_pages && !list_empty(&zone->active_list) &&
- nr_pass) {
+ while (pgscanned < nr_pages && !list_empty(&zone->active_list)) {
page = lru_to_page(&zone->active_list);
prefetchw_prev_lru_page(page, &zone->active_list, flags);
if (!TestClearPageLRU(page))
__put_page(page);
SetPageLRU(page);
list_add(&page->lru, &zone->active_list);
- pgscanned++;
- } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) {
- __put_page(page);
- SetPageLRU(page);
-#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE
- list_add_tail(&page->lru, &zone->active_list);
-#else
- list_add(&page->lru, &zone->active_list);
-#endif
} else {
list_add(&page->lru, &l_hold);
- ckrm_mem_dec_active(page);
pgmoved++;
- pgscanned++;
- }
- if (!--nr_pass && ckrm_flags) {
- goto redo;
}
+ pgscanned++;
}
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
if (!TestClearPageActive(page))
BUG();
list_move(&page->lru, &zone->inactive_list);
- ckrm_mem_inc_inactive(page);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
zone->nr_inactive += pgmoved;
BUG();
BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list);
- ckrm_mem_inc_active(page);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
zone->nr_active += pgmoved;
sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
while (nr_active || nr_inactive) {
- sc->ckrm_flags = ckrm_setup_reclamation();
if (nr_active) {
sc->nr_to_scan = min(nr_active,
(unsigned long)SWAP_CLUSTER_MAX);
if (sc->nr_to_reclaim <= 0)
break;
}
- ckrm_teardown_reclamation();
- }
-}
-
-#if defined(CONFIG_CKRM_RES_MEM) && defined(AT_LIMIT_SUPPORT)
-// This function needs to be given more thought.
-// Shrink the class to be at 90% of its limit
-static void
-ckrm_shrink_class(ckrm_mem_res_t *cls)
-{
- struct scan_control sc;
- struct zone *zone;
- int zindex = 0, active_credit = 0, inactive_credit = 0;
-
- if (ckrm_test_set_shrink(cls)) { // set the SHRINK bit atomically
- // if it is already set somebody is working on it. so... leave
- return;
- }
- sc.nr_mapped = read_page_state(nr_mapped);
- sc.nr_scanned = 0;
- sc.ckrm_flags = ckrm_get_reclaim_flags(cls);
- sc.nr_reclaimed = 0;
- sc.priority = 0; // always very high priority
-
- for_each_zone(zone) {
- int zone_total, zone_limit, active_limit, inactive_limit;
- int active_over, inactive_over;
- unsigned long nr_active, nr_inactive;
- u64 temp;
-
- zone->temp_priority = zone->prev_priority;
- zone->prev_priority = sc.priority;
-
- zone_total = zone->nr_active + zone->nr_inactive + zone->free_pages;
-
- temp = (u64) cls->pg_limit * zone_total;
- do_div(temp, ckrm_tot_lru_pages);
- zone_limit = (int) temp;
- active_limit = (6 * zone_limit) / 10; // 2/3rd in active list
- inactive_limit = (3 * zone_limit) / 10; // 1/3rd in inactive list
-
- active_over = cls->nr_active[zindex] - active_limit + active_credit;
- inactive_over = active_over +
- (cls->nr_inactive[zindex] - inactive_limit) + inactive_credit;
-
- if (active_over > 0) {
- zone->nr_scan_active += active_over + 1;
- nr_active = zone->nr_scan_active;
- active_credit = 0;
- } else {
- active_credit += active_over;
- nr_active = 0;
- }
-
- if (inactive_over > 0) {
- zone->nr_scan_inactive += inactive_over;
- nr_inactive = zone->nr_scan_inactive;
- inactive_credit = 0;
- } else {
- inactive_credit += inactive_over;
- nr_inactive = 0;
- }
- while (nr_active || nr_inactive) {
- if (nr_active) {
- sc.nr_to_scan = min(nr_active,
- (unsigned long)SWAP_CLUSTER_MAX);
- nr_active -= sc.nr_to_scan;
- refill_inactive_zone(zone, &sc);
- }
-
- if (nr_inactive) {
- sc.nr_to_scan = min(nr_inactive,
- (unsigned long)SWAP_CLUSTER_MAX);
- nr_inactive -= sc.nr_to_scan;
- shrink_cache(zone, &sc);
- if (sc.nr_to_reclaim <= 0)
- break;
- }
- }
- zone->prev_priority = zone->temp_priority;
- zindex++;
}
- ckrm_clear_shrink(cls);
}
-static void
-ckrm_shrink_classes(void)
-{
- ckrm_mem_res_t *cls;
-
- spin_lock(&ckrm_mem_lock);
- while (!ckrm_shrink_list_empty()) {
- cls = list_entry(ckrm_shrink_list.next, ckrm_mem_res_t,
- shrink_list);
- spin_unlock(&ckrm_mem_lock);
- ckrm_shrink_class(cls);
- spin_lock(&ckrm_mem_lock);
- list_del(&cls->shrink_list);
- cls->flags &= ~MEM_AT_LIMIT;
- }
- spin_unlock(&ckrm_mem_lock);
-}
-
-#else
-
-#if defined(CONFIG_CKRM_RES_MEM) && !defined(AT_LIMIT_SUPPORT)
-#warning "disabling ckrm_at_limit -- setting ckrm_shrink_classes to noop "
-#endif
-
-#define ckrm_shrink_classes() do { } while(0)
-#endif
-
/*
* This is the direct reclaim path, for page-allocating processes. We only
* try to reclaim pages from zones which will satisfy the caller's allocation
int total_scanned = 0, total_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc;
- unsigned long lru_pages = 0;
int i;
sc.gfp_mask = gfp_mask;
inc_page_state(allocstall);
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
-
- zone->temp_priority = DEF_PRIORITY;
- lru_pages += zone->nr_active + zone->nr_inactive;
- }
+ for (i = 0; zones[i] != 0; i++)
+ zones[i]->temp_priority = DEF_PRIORITY;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_reclaimed = 0;
sc.priority = priority;
shrink_caches(zones, &sc);
- shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
+ shrink_slab(sc.nr_scanned, gfp_mask);
if (reclaim_state) {
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
blk_congestion_wait(WRITE, HZ/10);
}
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
- out_of_memory(gfp_mask);
+ out_of_memory();
out:
for (i = 0; zones[i] != 0; i++)
zones[i]->prev_priority = zones[i]->temp_priority;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int all_zones_ok = 1;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
- unsigned long lru_pages = 0;
+
if (nr_pages == 0) {
/*
end_zone = pgdat->nr_zones - 1;
}
scan:
- for (i = 0; i <= end_zone; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- lru_pages += zone->nr_active + zone->nr_inactive;
- }
-
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
sc.priority = priority;
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
- shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
+ shrink_slab(sc.nr_scanned, GFP_KERNEL);
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
total_reclaimed += sc.nr_reclaimed;
if (zone->all_unreclaimable)
finish_wait(&pgdat->kswapd_wait, &wait);
try_to_clip_inodes();
- if (!ckrm_shrink_list_empty())
- ckrm_shrink_classes();
- else
balance_pgdat(pgdat, 0);
}
return 0;
*/
void wakeup_kswapd(struct zone *zone)
{
- if ((zone->free_pages > zone->pages_low) && ckrm_shrink_list_empty())
+ if (zone->free_pages > zone->pages_low)
return;
if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
return;
/* Global VLAN variables */
/* Our listing of VLAN group(s) */
-struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
+struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
+spinlock_t vlan_group_lock = SPIN_LOCK_UNLOCKED;
#define vlan_grp_hashfn(IDX) ((((IDX) >> VLAN_GRP_HASH_SHIFT) ^ (IDX)) & VLAN_GRP_HASH_MASK)
static char vlan_fullname[] = "802.1Q VLAN Support";
.func = vlan_skb_recv, /* VLAN receive method */
};
-/* Bits of netdev state that are propogated from real device to virtual */
-#define VLAN_LINK_STATE_MASK \
- ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER))
-
/* End of global variables definitions. */
/*
* references left.
*/
for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) {
- BUG_ON(!hlist_empty(&vlan_group_hash[i]));
+ if (vlan_group_hash[i] != NULL)
+ BUG();
}
vlan_proc_cleanup();
module_init(vlan_proto_init);
module_exit(vlan_cleanup_module);
-/* Must be invoked with RCU read lock (no preempt) */
+/* Must be invoked with vlan_group_lock held. */
static struct vlan_group *__vlan_find_group(int real_dev_ifindex)
{
struct vlan_group *grp;
- struct hlist_node *n;
- int hash = vlan_grp_hashfn(real_dev_ifindex);
- hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) {
+ for (grp = vlan_group_hash[vlan_grp_hashfn(real_dev_ifindex)];
+ grp != NULL;
+ grp = grp->next) {
if (grp->real_dev_ifindex == real_dev_ifindex)
- return grp;
+ break;
}
- return NULL;
+ return grp;
+}
+
+/* Must hold vlan_group_lock. */
+static void __grp_hash(struct vlan_group *grp)
+{
+ struct vlan_group **head;
+
+ head = &vlan_group_hash[vlan_grp_hashfn(grp->real_dev_ifindex)];
+ grp->next = *head;
+ *head = grp;
+}
+
+/* Must hold vlan_group_lock. */
+static void __grp_unhash(struct vlan_group *grp)
+{
+ struct vlan_group *next, **pprev;
+
+ pprev = &vlan_group_hash[vlan_grp_hashfn(grp->real_dev_ifindex)];
+ next = *pprev;
+ while (next != grp) {
+ pprev = &next->next;
+ next = *pprev;
+ }
+ *pprev = grp->next;
}
/* Find the protocol handler. Assumes VID < VLAN_VID_MASK.
*
- * Must be invoked with RCU read lock (no preempt)
+ * Must be invoked with vlan_group_lock held.
*/
struct net_device *__find_vlan_dev(struct net_device *real_dev,
unsigned short VID)
return NULL;
}
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
- kfree(container_of(rcu, struct vlan_group, rcu));
-}
-
-
/* This returns 0 if everything went fine.
* It will return 1 if the group was killed as a result.
* A negative return indicates failure.
if (vlan_id >= VLAN_VID_MASK)
return -EINVAL;
- ASSERT_RTNL();
+ spin_lock_bh(&vlan_group_lock);
grp = __vlan_find_group(real_dev_ifindex);
+ spin_unlock_bh(&vlan_group_lock);
ret = 0;
if (real_dev->features & NETIF_F_HW_VLAN_RX)
real_dev->vlan_rx_register(real_dev, NULL);
- hlist_del_rcu(&grp->hlist);
-
- /* Free the group, after all cpu's are done. */
- call_rcu(&grp->rcu, vlan_rcu_free);
+ spin_lock_bh(&vlan_group_lock);
+ __grp_unhash(grp);
+ spin_unlock_bh(&vlan_group_lock);
+ /* Free the group, after we have removed it
+ * from the hash.
+ */
+ kfree(grp);
grp = NULL;
+
ret = 1;
}
}
new_dev->set_mac_address = vlan_dev_set_mac_address;
new_dev->set_multicast_list = vlan_dev_set_multicast_list;
new_dev->destructor = free_netdev;
- new_dev->do_ioctl = vlan_dev_ioctl;
}
/* Attach a VLAN device to a mac address (ie Ethernet Card).
struct vlan_group *grp;
struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */
+ int r;
char name[IFNAMSIZ];
#ifdef VLAN_DEBUG
if (!(real_dev->flags & IFF_UP))
goto out_unlock;
- if (__find_vlan_dev(real_dev, VLAN_ID) != NULL) {
+ spin_lock_bh(&vlan_group_lock);
+ r = (__find_vlan_dev(real_dev, VLAN_ID) != NULL);
+ spin_unlock_bh(&vlan_group_lock);
+
+ if (r) {
/* was already registered. */
printk(VLAN_DBG "%s: ALREADY had VLAN registered\n", __FUNCTION__);
goto out_unlock;
new_dev->flags = real_dev->flags;
new_dev->flags &= ~IFF_UP;
- new_dev->state = real_dev->state & VLAN_LINK_STATE_MASK;
-
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
*/
/* So, got the sucker initialized, now lets place
* it into our local structure.
*/
+ spin_lock_bh(&vlan_group_lock);
grp = __vlan_find_group(real_dev->ifindex);
+ spin_unlock_bh(&vlan_group_lock);
/* Note, we are running under the RTNL semaphore
* so it cannot "appear" on us.
memset(grp, 0, sizeof(struct vlan_group));
grp->real_dev_ifindex = real_dev->ifindex;
- hlist_add_head_rcu(&grp->hlist,
- &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]);
+ spin_lock_bh(&vlan_group_lock);
+ __grp_hash(grp);
+ spin_unlock_bh(&vlan_group_lock);
if (real_dev->features & NETIF_F_HW_VLAN_RX)
real_dev->vlan_rx_register(real_dev, grp);
static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
- struct vlan_group *grp = __vlan_find_group(dev->ifindex);
+ struct net_device *dev = (struct net_device *)(ptr);
+ struct vlan_group *grp = NULL;
int i, flgs;
- struct net_device *vlandev;
+ struct net_device *vlandev = NULL;
+
+ spin_lock_bh(&vlan_group_lock);
+ grp = __vlan_find_group(dev->ifindex);
+ spin_unlock_bh(&vlan_group_lock);
if (!grp)
goto out;
*/
switch (event) {
- case NETDEV_CHANGE:
- /* Propogate real device state to vlan devices */
- flgs = dev->state & VLAN_LINK_STATE_MASK;
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
- if (!vlandev)
- continue;
-
- if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
- vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK)
- | flgs;
- netdev_state_change(vlandev);
- }
- }
+ case NETDEV_CHANGEADDR:
+ case NETDEV_GOING_DOWN:
+ /* Ignore for now */
break;
case NETDEV_DOWN:
ret = unregister_vlan_dev(dev,
VLAN_DEV_INFO(vlandev)->vlan_id);
+ dev_put(vlandev);
unregister_netdevice(vlandev);
/* Group was destroyed? */
#define VLAN_GRP_HASH_SHIFT 5
#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
-extern struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
+extern struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
+extern spinlock_t vlan_group_lock;
/* Find a VLAN device by the MAC address of its Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*
- * Must be invoked with rcu_read_lock (ie preempt disabled)
- * or with RTNL.
+ * Must be invoked with vlan_group_lock held and that lock MUST NOT
+ * be dropped until a reference is obtained on the returned device.
+ * You may drop the lock earlier if you are running under the RTNL
+ * semaphore, however.
*/
struct net_device *__find_vlan_dev(struct net_device* real_dev,
unsigned short VID); /* vlan.c */
int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
-int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd);
int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
/* We have 12 bits of vlan ID.
*
- * We must not drop allow preempt until we hold a
+ * We must not drop the vlan_group_lock until we hold a
* reference to the device (netif_rx does that) or we
* fail.
*/
- rcu_read_lock();
+ spin_lock_bh(&vlan_group_lock);
skb->dev = __find_vlan_dev(dev, vid);
if (!skb->dev) {
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n",
*/
if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) {
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n",
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
- rcu_read_lock();
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
- rcu_read_unlock();
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
vlan_flush_mc_list(dev);
return 0;
}
-
-int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
- struct ifreq ifrr;
- int err = -EOPNOTSUPP;
-
- strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
- ifrr.ifr_ifru = ifr->ifr_ifru;
-
- switch(cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (real_dev->do_ioctl && netif_device_present(real_dev))
- err = real_dev->do_ioctl(dev, &ifrr, cmd);
- break;
-
- case SIOCETHTOOL:
- err = dev_ethtool(&ifrr);
- }
-
- if (!err)
- ifr->ifr_ifru = ifrr.ifr_ifru;
-
- return err;
-}
-
/** Taken from Gleb + Lennert's VLAN code, and modified... */
void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
{
config IPV6
tristate "The IPv6 protocol (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
- select CRYPTO if IPV6_PRIVACY
- select CRYPTO_MD5 if IPV6_PRIVACY
---help---
This is experimental support for the IP version 6 (formerly called
IPng "IP next generation"). You will still be able to do
If unsure, say N.
+config NET_FASTROUTE
+ bool "Fast switching (read help!)"
+ depends on EXPERIMENTAL
+ ---help---
+ Saying Y here enables direct NIC-to-NIC (NIC = Network Interface
+ Card) data transfers on the local network, which is fast.
+
+ IMPORTANT NOTE: This option is NOT COMPATIBLE with "Network packet
+ filtering" (CONFIG_NETFILTER). Say N here if you say Y there.
+
+ However, it will work with all options in the "Advanced router"
+ section (except for "Use TOS value as routing key" and
+ "Use FWMARK value as routing key").
+
+ At the moment, few devices support fast switching (tulip is one of
+ them, a modified 8390 driver can be found at
+ <ftp://ftp.tux.org/pub/net/ip-routing/fastroute/fastroute-8390.tar.gz>).
+
+ If unsure, say N.
+
config NET_HW_FLOWCONTROL
bool "Forwarding between high speed interfaces"
depends on EXPERIMENTAL
source "net/bluetooth/Kconfig"
-source "net/tux/Kconfig"
-
source "drivers/net/Kconfig"
endmenu
ifneq ($(CONFIG_IPV6),)
obj-y += ipv6/
endif
-obj-$(CONFIG_TUX) += tux/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_NET_SCHED) += sched/
case SIOCADDRT: {
struct net_device *dev = NULL;
+ /*
+ * FIXME: the name of the device is still in user
+ * space, isn't it?
+ */
if (rt.rt_dev) {
- char name[IFNAMSIZ];
- if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
- return -EFAULT;
- name[IFNAMSIZ-1] = '\0';
- dev = __dev_get_by_name(name);
+ dev = __dev_get_by_name(rt.rt_dev);
if (!dev)
return -ENODEV;
}
BRPRIV(skb->dev)->stats.rx_packets--;
br2684_push(atmvcc, skb);
}
- __module_get(THIS_MODULE);
+ (void) try_module_get(THIS_MODULE);
return 0;
error:
write_unlock_irq(&devs_lock);
/*
* lec.c: Lan Emulation driver
- * Marko Kiiskila mkiiskila@yahoo.com
+ * Marko Kiiskila carnil@cs.tut.fi
*
*/
static int lec_close(struct net_device *dev);
static struct net_device_stats *lec_get_stats(struct net_device *dev);
static void lec_init(struct net_device *dev);
-static struct lec_arp_table* lec_arp_find(struct lec_priv *priv,
+static inline struct lec_arp_table* lec_arp_find(struct lec_priv *priv,
unsigned char *mac_addr);
-static int lec_arp_remove(struct lec_priv *priv,
+static inline int lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove);
/* LANE2 functions */
static void lane2_associate_ind (struct net_device *dev, u8 *mac_address,
/*
* Remove entry from lec_arp_table
*/
-static int
+static inline int
lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove)
{
/*
* Find entry by mac_address
*/
-static struct lec_arp_table*
+static inline struct lec_arp_table*
lec_arp_find(struct lec_priv *priv,
unsigned char *mac_addr)
{
*
* Lan Emulation client header file
*
- * Marko Kiiskila mkiiskila@yahoo.com
+ * Marko Kiiskila carnil@cs.tut.fi
*
*/
/*
* Lec arp cache
- * Marko Kiiskila mkiiskila@yahoo.com
+ * Marko Kiiskila carnil@cs.tut.fi
*
*/
#ifndef _LEC_ARP_H
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bitops.h>
-#include <linux/seq_file.h>
/* We are an ethernet device */
#include <linux/if_ether.h>
return 0;
}
-/* this is buggered - we need locking for qos_head */
-void atm_mpoa_disp_qos(struct seq_file *m)
+void atm_mpoa_disp_qos(char *page, ssize_t *len)
{
+
unsigned char *ip;
char ipaddr[16];
struct atm_mpoa_qos *qos;
qos = qos_head;
- seq_printf(m, "QoS entries for shortcuts:\n");
- seq_printf(m, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
+ *len += sprintf(page + *len, "QoS entries for shortcuts:\n");
+ *len += sprintf(page + *len, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
ipaddr[sizeof(ipaddr)-1] = '\0';
while (qos != NULL) {
ip = (unsigned char *)&qos->ipaddr;
sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(ip));
- seq_printf(m, "%u.%u.%u.%u\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
+ *len += sprintf(page + *len, "%u.%u.%u.%u\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
NIPQUAD(ipaddr),
qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu,
qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu);
qos = qos->next;
}
+
+ return;
}
static struct net_device *find_lec_by_itfnum(int itf)
int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos);
/* Display QoS entries. This is for the procfs */
-struct seq_file;
-void atm_mpoa_disp_qos(struct seq_file *m);
+void atm_mpoa_disp_qos(char *page, ssize_t *len);
#endif /* _MPC_H_ */
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
extern struct mpoa_client *mpcs;
extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
-static int proc_mpc_open(struct inode *inode, struct file *file);
+static ssize_t proc_mpc_read(struct file *file, char __user *buff,
+ size_t count, loff_t *pos);
+
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos);
-static int parse_qos(const char *buff);
+static int parse_qos(const char *buff, int len);
/*
* Define allowed FILE OPERATIONS
*/
static struct file_operations mpc_file_operations = {
.owner = THIS_MODULE,
- .open = proc_mpc_open,
- .read = seq_read,
- .llseek = seq_lseek,
+ .read = proc_mpc_read,
.write = proc_mpc_write,
- .release = seq_release,
};
+static int print_header(char *buff,struct mpoa_client *mpc){
+ if(mpc != NULL){
+ return sprintf(buff,"\nInterface %d:\n\n",mpc->dev_num);
+
+ }
+ return 0;
+}
+
/*
* Returns the state of an ingress cache entry as a string
*/
}
}
-/*
- * FIXME: mpcs (and per-mpc lists) have no locking whatsoever.
- */
-
-static void *mpc_start(struct seq_file *m, loff_t *pos)
-{
- loff_t l = *pos;
- struct mpoa_client *mpc;
-
- if (!l--)
- return SEQ_START_TOKEN;
- for (mpc = mpcs; mpc; mpc = mpc->next)
- if (!l--)
- return mpc;
- return NULL;
-}
-
-static void *mpc_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct mpoa_client *p = v;
- (*pos)++;
- return v == SEQ_START_TOKEN ? mpcs : p->next;
-}
-
-static void mpc_stop(struct seq_file *m, void *v)
-{
-}
-
/*
* READING function - called when the /proc/atm/mpoa file is read from.
*/
-static ssize_t mpc_show(struct seq_file *m, void *v)
-{
- struct mpoa_client *mpc = v;
+static ssize_t proc_mpc_read(struct file *file, char __user *buff,
+ size_t count, loff_t *pos){
+ unsigned long page = 0;
unsigned char *temp;
- int i;
+ ssize_t length = 0;
+ int i = 0;
+ struct mpoa_client *mpc = mpcs;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
struct timeval now;
unsigned char ip_string[16];
-
- if (v == SEQ_START_TOKEN) {
- atm_mpoa_disp_qos(m);
- return 0;
- }
-
- seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
- seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
- do_gettimeofday(&now);
-
- for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
- temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip;
- sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
- seq_printf(m, "%-16s%s%-14lu%-12u",
- ip_string,
- ingress_state_string(in_entry->entry_state),
- in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec),
- in_entry->packets_fwded);
- if (in_entry->shortcut)
- seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
- seq_printf(m, "\n");
+ if(count == 0)
+ return 0;
+ page = get_zeroed_page(GFP_KERNEL);
+ if(!page)
+ return -ENOMEM;
+ atm_mpoa_disp_qos((char *)page, &length);
+ while(mpc != NULL){
+ length += print_header((char *)page + length, mpc);
+ length += sprintf((char *)page + length,"Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
+ in_entry = mpc->in_cache;
+ do_gettimeofday(&now);
+ while(in_entry != NULL){
+ temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ length += sprintf((char *)page + length,"%-16s%s%-14lu%-12u", ip_string, ingress_state_string(in_entry->entry_state), (in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec)), in_entry->packets_fwded);
+ if(in_entry->shortcut)
+ length += sprintf((char *)page + length," %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
+ length += sprintf((char *)page + length,"\n");
+ in_entry = in_entry->next;
+ }
+ length += sprintf((char *)page + length,"\n");
+ eg_entry = mpc->eg_cache;
+ length += sprintf((char *)page + length,"Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
+ while(eg_entry != NULL){
+ for(i=0;i<ATM_ESA_LEN;i++){
+ length += sprintf((char *)page + length,"%02x",eg_entry->ctrl_info.in_MPC_data_ATM_addr[i]);}
+ length += sprintf((char *)page + length,"\n%-16lu%s%-14lu%-15u",(unsigned long) ntohl(eg_entry->ctrl_info.cache_id), egress_state_string(eg_entry->entry_state), (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), eg_entry->packets_rcvd);
+
+ /* latest IP address */
+ temp = (unsigned char *)&eg_entry->latest_ip_addr;
+ sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ length += sprintf((char *)page + length, "%-16s", ip_string);
+
+ if(eg_entry->shortcut)
+ length += sprintf((char *)page + length," %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
+ length += sprintf((char *)page + length,"\n");
+ eg_entry = eg_entry->next;
+ }
+ length += sprintf((char *)page + length,"\n");
+ mpc = mpc->next;
}
- seq_printf(m, "\n");
- seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
- for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
- unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
- for(i = 0; i < ATM_ESA_LEN; i++)
- seq_printf(m, "%02x", p[i]);
- seq_printf(m, "\n%-16lu%s%-14lu%-15u",
- (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
- egress_state_string(eg_entry->entry_state),
- (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)),
- eg_entry->packets_rcvd);
-
- /* latest IP address */
- temp = (unsigned char *)&eg_entry->latest_ip_addr;
- sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
- seq_printf(m, "%-16s", ip_string);
-
- if (eg_entry->shortcut)
- seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
- seq_printf(m, "\n");
+ if (*pos >= length) length = 0;
+ else {
+ if ((count + *pos) > length) count = length - *pos;
+ if (copy_to_user(buff, (char *)page , count)) {
+ free_page(page);
+ return -EFAULT;
+ }
+ *pos += count;
}
- seq_printf(m, "\n");
- return 0;
-}
-
-static struct seq_operations mpc_op = {
- .start = mpc_start,
- .next = mpc_next,
- .stop = mpc_stop,
- .show = mpc_show
-};
-static int proc_mpc_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &mpc_op);
+ free_page(page);
+ return length;
}
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
- char *page, *p;
- unsigned len;
+ int incoming, error, retval;
+ char *page, c;
+ const char __user *tmp;
- if (nbytes == 0)
- return 0;
+ if (nbytes == 0) return 0;
+ if (nbytes >= PAGE_SIZE) nbytes = PAGE_SIZE-1;
- if (nbytes >= PAGE_SIZE)
- nbytes = PAGE_SIZE-1;
+ error = verify_area(VERIFY_READ, buff, nbytes);
+ if (error) return error;
page = (char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- for (p = page, len = 0; len < nbytes; p++, len++) {
- if (get_user(*p, buff++)) {
- free_page((unsigned long)page);
- return -EFAULT;
- }
- if (*p == '\0' || *p == '\n')
+ if (page == NULL) return -ENOMEM;
+
+ incoming = 0;
+ tmp = buff;
+ while(incoming < nbytes){
+ if (get_user(c, tmp++)) return -EFAULT;
+ incoming++;
+ if (c == '\0' || c == '\n')
break;
}
- *p = '\0';
+ retval = copy_from_user(page, buff, incoming);
+ if (retval != 0) {
+ printk("mpoa: proc_mpc_write: copy_from_user() failed\n");
+ return -EFAULT;
+ }
+
+ *ppos += incoming;
- if (!parse_qos(page))
+ page[incoming] = '\0';
+ retval = parse_qos(page, incoming);
+ if (retval == 0)
printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
free_page((unsigned long)page);
- return len;
+ return nbytes;
}
-static int parse_qos(const char *buff)
+static int parse_qos(const char *buff, int len)
{
/* possible lines look like this
* add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
*/
- unsigned char ip[4];
- int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
+
+ int pos, i;
uint32_t ipaddr;
+ unsigned char ip[4];
+ char cmd[4], temp[256];
+ const char *tmp, *prev;
struct atm_qos qos;
+ int value[5];
memset(&qos, 0, sizeof(struct atm_qos));
-
- if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
- ip, ip+1, ip+2, ip+3) == 4) {
- ipaddr = *(uint32_t *)ip;
- return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
- }
-
- if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx",
- ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) {
- rx_pcr = tx_pcr;
- rx_sdu = tx_sdu;
- } else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d",
- ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
- return 0;
-
+ strlcpy(cmd, buff, sizeof(cmd));
+ if( strncmp(cmd,"add", 3) && strncmp(cmd,"del", 3))
+ return 0; /* not add or del */
+
+ pos = 4;
+ /* next parse ip */
+ prev = buff + pos;
+ for (i = 0; i < 3; i++) {
+ tmp = strchr(prev, '.');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ ip[i] = (char)simple_strtoul(temp, NULL, 0);
+ tmp ++;
+ prev = tmp;
+ }
+ tmp = strchr(prev, ' ');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ ip[i] = (char)simple_strtoul(temp, NULL, 0);
ipaddr = *(uint32_t *)ip;
+
+ if(!strncmp(cmd, "del", 3))
+ return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
+
+ /* next transmit values */
+ tmp = strstr(buff, "tx=");
+ if(tmp == NULL) return 0;
+ tmp += 3;
+ prev = tmp;
+ for( i = 0; i < 1; i++){
+ tmp = strchr(prev, ',');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
+ tmp ++;
+ prev = tmp;
+ }
+ tmp = strchr(prev, ' ');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
qos.txtp.traffic_class = ATM_CBR;
- qos.txtp.max_pcr = tx_pcr;
- qos.txtp.max_sdu = tx_sdu;
- qos.rxtp.traffic_class = ATM_CBR;
- qos.rxtp.max_pcr = rx_pcr;
- qos.rxtp.max_sdu = rx_sdu;
+ qos.txtp.max_pcr = value[0];
+ qos.txtp.max_sdu = value[1];
+
+ /* next receive values */
+ tmp = strstr(buff, "rx=");
+ if(tmp == NULL) return 0;
+ if (strstr(buff, "rx=tx")) { /* rx == tx */
+ qos.rxtp.traffic_class = qos.txtp.traffic_class;
+ qos.rxtp.max_pcr = qos.txtp.max_pcr;
+ qos.rxtp.max_cdv = qos.txtp.max_cdv;
+ qos.rxtp.max_sdu = qos.txtp.max_sdu;
+ } else {
+ tmp += 3;
+ prev = tmp;
+ for( i = 0; i < 1; i++){
+ tmp = strchr(prev, ',');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
+ tmp ++;
+ prev = tmp;
+ }
+ tmp = strchr(prev, '\0');
+ if (tmp == NULL) return 0;
+ memset(temp, '\0', 256);
+ memcpy(temp, prev, tmp-prev);
+ value[i] = (int)simple_strtoul(temp, NULL, 0);
+ qos.rxtp.traffic_class = ATM_CBR;
+ qos.rxtp.max_pcr = value[0];
+ qos.rxtp.max_sdu = value[1];
+ }
qos.aal = ATM_AAL5;
dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr,
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
- __module_get(THIS_MODULE);
+ (void) try_module_get(THIS_MODULE);
return 0;
}
? -EFAULT : 0;
goto done;
case ATM_SETLOOP:
- if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
- __ATM_LM_XTLOC((int) (unsigned long) buf) >
- __ATM_LM_XTRMT((int) (unsigned long) buf)) {
+ if (__ATM_LM_XTRMT((int) (long) buf) &&
+ __ATM_LM_XTLOC((int) (long) buf) >
+ __ATM_LM_XTRMT((int) (long) buf)) {
error = -EINVAL;
goto done;
}
RFCOMM Module (RFCOMM Protocol)
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
- HIDP Module (Human Interface Device Protocol)
Say Y here to compile Bluetooth support into the kernel or say M to
compile it as module (bluetooth).
struct bnep_connlist_req {
__u32 cnum;
- struct bnep_conninfo __user *ci;
+ struct bnep_conninfo *ci;
};
struct bnep_proto_filter {
#define BT_DBG(D...)
#endif
-#define VERSION "1.2"
+#define VERSION "1.0"
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
static int bnep_send(struct bnep_session *s, void *data, size_t len)
{
struct socket *sock = s->sock;
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
- return kernel_sendmsg(sock, &s->msg, &iv, 1, len);
+ s->msg.msg_iov = &iv;
+ s->msg.msg_iovlen = 1;
+ return sock_sendmsg(sock, &s->msg, len);
}
static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp)
return bnep_send(s, &rsp, sizeof(rsp));
}
-#ifdef CONFIG_BT_BNEP_PROTO_FILTER
-static inline void bnep_set_default_proto_filter(struct bnep_session *s)
-{
- /* (IPv4, ARP) */
- s->proto_filter[0].start = htons(0x0800);
- s->proto_filter[0].end = htons(0x0806);
- /* (RARP, AppleTalk) */
- s->proto_filter[1].start = htons(0x8035);
- s->proto_filter[1].end = htons(0x80F3);
- /* (IPX, IPv6) */
- s->proto_filter[2].start = htons(0x8137);
- s->proto_filter[2].end = htons(0x86DD);
-}
-#endif
-
static int bnep_ctrl_set_netfilter(struct bnep_session *s, u16 *data, int len)
{
int n;
BT_DBG("proto filter start %d end %d",
f[i].start, f[i].end);
}
-
if (i < BNEP_MAX_PROTO_FILTERS)
memset(f + i, 0, sizeof(*f));
- if (n == 0)
- bnep_set_default_proto_filter(s);
-
bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_SUCCESS);
} else {
bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_LIMIT_REACHED);
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
- struct kvec iv[3];
+ struct iovec iv[3];
int len = 0, il = 0;
u8 type = 0;
goto send;
}
- iv[il++] = (struct kvec) { &type, 1 };
+ iv[il++] = (struct iovec) { &type, 1 };
len++;
if (!memcmp(eh->h_dest, s->eh.h_source, ETH_ALEN))
type = __bnep_tx_types[type];
switch (type) {
case BNEP_COMPRESSED_SRC_ONLY:
- iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN };
+ iv[il++] = (struct iovec) { eh->h_source, ETH_ALEN };
len += ETH_ALEN;
break;
case BNEP_COMPRESSED_DST_ONLY:
- iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN };
+ iv[il++] = (struct iovec) { eh->h_dest, ETH_ALEN };
len += ETH_ALEN;
break;
}
send:
- iv[il++] = (struct kvec) { skb->data, skb->len };
+ iv[il++] = (struct iovec) { skb->data, skb->len };
len += skb->len;
/* FIXME: linearize skb */
{
- len = kernel_sendmsg(sock, &s->msg, iv, il, len);
+ s->msg.msg_iov = iv;
+ s->msg.msg_iovlen = il;
+ len = sock_sendmsg(sock, &s->msg, len);
}
kfree_skb(skb);
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&s->killed)) {
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
#endif
-
+
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Set default protocol filter */
- bnep_set_default_proto_filter(s);
-#endif
+ /* (IPv4, ARP) */
+ s->proto_filter[0].start = htons(0x0800);
+ s->proto_filter[0].end = htons(0x0806);
+ /* (RARP, AppleTalk) */
+ s->proto_filter[1].start = htons(0x8035);
+ s->proto_filter[1].end = htons(0x80F3);
+ /* (IPX, IPv6) */
+ s->proto_filter[2].start = htons(0x8137);
+ s->proto_filter[2].end = htons(0x86DD);
+#endif
+
err = register_netdev(dev);
if (err) {
goto failed;
static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, int len)
{
struct socket *sock = session->sock;
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
struct msghdr msg;
BT_DBG("session %p data %p len %d", session, data, len);
return 0;
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iv;
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return sock_sendmsg(sock, &msg, len);
}
static int cmtp_process_transmit(struct cmtp_session *session)
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&session->terminate)) {
static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
{
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
struct msghdr msg;
BT_DBG("sock %p data %p len %d", sock, data, len);
return 0;
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iv;
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return sock_sendmsg(sock, &msg, len);
}
static int hidp_process_transmit(struct hidp_session *session)
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel)
{
+ mm_segment_t fs;
int r;
rfcomm_lock();
+ fs = get_fs(); set_fs(KERNEL_DS);
r = __rfcomm_dlc_open(d, src, dst, channel);
+ set_fs(fs);
rfcomm_unlock();
return r;
int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
{
+ mm_segment_t fs;
int r;
rfcomm_lock();
+ fs = get_fs(); set_fs(KERNEL_DS);
r = __rfcomm_dlc_close(d, err);
+ set_fs(fs);
rfcomm_unlock();
return r;
{
struct rfcomm_session *s = NULL;
struct sockaddr_l2 addr;
+ struct l2cap_options opts;
struct socket *sock;
- struct sock *sk;
+ int size;
BT_DBG("%s %s", batostr(src), batostr(dst));
goto failed;
/* Set L2CAP options */
- sk = sock->sk;
- lock_sock(sk);
- l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU;
- release_sock(sk);
+ size = sizeof(opts);
+ sock->ops->getsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, &size);
+
+ opts.imtu = RFCOMM_MAX_L2CAP_MTU;
+ sock->ops->setsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, size);
s = rfcomm_session_add(sock, BT_BOUND);
if (!s) {
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
struct socket *sock = s->sock;
- struct kvec iv = { data, len };
+ struct iovec iv = { data, len };
struct msghdr msg;
BT_DBG("session %p len %d", s, len);
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iv;
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return sock_sendmsg(sock, &msg, len);
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
{
struct socket *sock = s->sock;
- struct kvec iv[3];
+ struct iovec iv[3];
struct msghdr msg;
unsigned char hdr[5], crc[1];
iv[2].iov_len = 1;
memset(&msg, 0, sizeof(msg));
+ msg.msg_iovlen = 3;
+ msg.msg_iov = iv;
- return kernel_sendmsg(sock, &msg, iv, 3, 6 + len);
+ return sock_sendmsg(sock, &msg, 6 + len);
}
static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
static int rfcomm_add_listener(bdaddr_t *ba)
{
struct sockaddr_l2 addr;
+ struct l2cap_options opts;
struct socket *sock;
- struct sock *sk;
struct rfcomm_session *s;
- int err = 0;
+ int size, err = 0;
/* Create socket */
err = rfcomm_l2sock_create(&sock);
}
/* Set L2CAP options */
- sk = sock->sk;
- lock_sock(sk);
- l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU;
- release_sock(sk);
+ size = sizeof(opts);
+ sock->ops->getsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, &size);
+
+ opts.imtu = RFCOMM_MAX_L2CAP_MTU;
+ sock->ops->setsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, size);
/* Start listening on the socket */
err = sock->ops->listen(sock, 10);
set_user_nice(current, -10);
current->flags |= PF_NOFREEZE;
+ set_fs(KERNEL_DS);
+
BT_DBG("");
rfcomm_add_listener(BDADDR_ANY);
return &br->statistics;
}
-int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+static int __br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_bridge *br = netdev_priv(dev);
- const unsigned char *dest = skb->data;
+ struct net_bridge *br;
+ unsigned char *dest;
struct net_bridge_fdb_entry *dst;
+ br = dev->priv;
br->statistics.tx_packets++;
br->statistics.tx_bytes += skb->len;
- skb->mac.raw = skb->data;
+ dest = skb->mac.raw = skb->data;
skb_pull(skb, ETH_HLEN);
- rcu_read_lock();
- if (dest[0] & 1)
+ if (dest[0] & 1) {
br_flood_deliver(br, skb, 0);
- else if ((dst = __br_fdb_get(br, dest)) != NULL)
+ return 0;
+ }
+
+ if ((dst = br_fdb_get(br, dest)) != NULL) {
br_deliver(dst->dst, skb);
- else
- br_flood_deliver(br, skb, 0);
+ br_fdb_put(dst);
+ return 0;
+ }
- rcu_read_unlock();
+ br_flood_deliver(br, skb, 0);
return 0;
}
+int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int ret;
+
+ rcu_read_lock();
+ ret = __br_dev_xmit(skb, dev);
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int br_dev_open(struct net_device *dev)
{
netif_start_queue(dev);
static __inline__ void fdb_delete(struct net_bridge_fdb_entry *f)
{
- hlist_del_rcu(&f->hlist);
+ hlist_del(&f->hlist);
if (!f->is_static)
- list_del(&f->u.age_list);
+ list_del(&f->age_list);
br_fdb_put(f);
}
struct net_bridge *br = p->br;
int i;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
fdb_insert(br, p, newaddr, 1);
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(unsigned long _data)
struct list_head *l, *n;
unsigned long delay;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
delay = hold_time(br);
list_for_each_safe(l, n, &br->age_list) {
- struct net_bridge_fdb_entry *f;
- unsigned long expires;
-
- f = list_entry(l, struct net_bridge_fdb_entry, u.age_list);
- expires = f->ageing_timer + delay;
+ struct net_bridge_fdb_entry *f
+ = list_entry(l, struct net_bridge_fdb_entry, age_list);
+ unsigned long expires = f->ageing_timer + delay;
if (time_before_eq(expires, jiffies)) {
WARN_ON(f->is_static);
break;
}
}
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
}
void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
{
int i;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
skip_delete: ;
}
}
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
}
-/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
-struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
- const unsigned char *addr)
+struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, unsigned char *addr)
{
struct hlist_node *h;
- struct net_bridge_fdb_entry *fdb;
- hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
+ read_lock_bh(&br->hash_lock);
+
+ hlist_for_each(h, &br->hash[br_mac_hash(addr)]) {
+ struct net_bridge_fdb_entry *fdb
+ = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
+
if (!memcmp(fdb->addr.addr, addr, ETH_ALEN)) {
- if (unlikely(has_expired(br, fdb)))
- break;
+ if (has_expired(br, fdb))
+ goto ret_null;
+
+ atomic_inc(&fdb->use_count);
+ read_unlock_bh(&br->hash_lock);
return fdb;
}
}
-
+ ret_null:
+ read_unlock_bh(&br->hash_lock);
return NULL;
}
-/* Interface used by ATM hook that keeps a ref count */
-struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
- unsigned char *addr)
-{
- struct net_bridge_fdb_entry *fdb;
-
- rcu_read_lock();
- fdb = __br_fdb_get(br, addr);
- if (fdb)
- atomic_inc(&fdb->use_count);
- rcu_read_unlock();
- return fdb;
-}
-
-static void fdb_rcu_free(struct rcu_head *head)
-{
- struct net_bridge_fdb_entry *ent
- = container_of(head, struct net_bridge_fdb_entry, u.rcu);
- kmem_cache_free(br_fdb_cache, ent);
-}
-
-/* Set entry up for deletion with RCU */
void br_fdb_put(struct net_bridge_fdb_entry *ent)
{
if (atomic_dec_and_test(&ent->use_count))
- call_rcu(&ent->u.rcu, fdb_rcu_free);
+ kmem_cache_free(br_fdb_cache, ent);
}
/*
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
- rcu_read_lock();
+ read_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+ hlist_for_each_entry(f, h, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
}
out:
- rcu_read_unlock();
+ read_unlock_bh(&br->hash_lock);
return num;
}
return 0;
/* move to end of age list */
- list_del(&fdb->u.age_list);
+ list_del(&fdb->age_list);
goto update;
}
}
memcpy(fdb->addr.addr, addr, ETH_ALEN);
atomic_set(&fdb->use_count, 1);
- hlist_add_head_rcu(&fdb->hlist, &br->hash[hash]);
+ hlist_add_head(&fdb->hlist, &br->hash[hash]);
if (!timer_pending(&br->gc_timer)) {
br->gc_timer.expires = jiffies + hold_time(br);
fdb->is_static = is_local;
fdb->ageing_timer = jiffies;
if (!is_local)
- list_add_tail(&fdb->u.age_list, &br->age_list);
+ list_add_tail(&fdb->age_list, &br->age_list);
return 0;
}
{
int ret;
- spin_lock_bh(&br->hash_lock);
+ write_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, is_local);
- spin_unlock_bh(&br->hash_lock);
+ write_unlock_bh(&br->hash_lock);
return ret;
}
const struct sk_buff *skb)
{
if (skb->dev == p->dev ||
+ skb->len > p->dev->mtu ||
p->state != BR_STATE_FORWARDING)
return 0;
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
- if (skb->len > skb->dev->mtu)
- kfree_skb(skb);
- else {
#ifdef CONFIG_BRIDGE_NETFILTER
- /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
- nf_bridge_maybe_copy_header(skb);
+ /* ip_refrag calls ip_fragment, which doesn't copy the MAC header. */
+ nf_bridge_maybe_copy_header(skb);
#endif
- skb_push(skb, ETH_HLEN);
+ skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
- }
+ dev_queue_xmit(skb);
return 0;
}
br->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&br->port_list);
- br->hash_lock = SPIN_LOCK_UNLOCKED;
+ br->hash_lock = RW_LOCK_UNLOCKED;
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
return ret;
}
-/* Mtu of the bridge pseudo-device 1500 or the minimum of the ports */
int br_min_mtu(const struct net_bridge *br)
{
const struct net_bridge_port *p;
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
- if ((br->dev->flags & IFF_UP)
- && (dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ if ((br->dev->flags & IFF_UP) && (dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
- dev_set_mtu(br->dev, br_min_mtu(br));
+ br->dev->mtu = br_min_mtu(br);
}
return err;
goto out;
}
- dst = __br_fdb_get(br, dest);
+ dst = br_fdb_get(br, dest);
if (dst != NULL && dst->is_local) {
if (!passedup)
br_pass_frame_up(br, skb);
else
kfree_skb(skb);
+ br_fdb_put(dst);
goto out;
}
if (dst != NULL) {
br_forward(dst->dst, skb);
+ br_fdb_put(dst);
goto out;
}
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
-struct notifier_block br_device_notifier = {
+struct notifier_block br_device_notifier =
+{
.notifier_call = br_device_event
};
-/*
- * Handle changes in state of network devices enslaved to a bridge.
- *
- * Note: don't care about up/down if bridge itself is down, because
- * port state is checked when bridge is brought up.
- */
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
- struct net_bridge_port *p = dev->br_port;
+ struct net_device *dev;
+ struct net_bridge_port *p;
struct net_bridge *br;
- /* not a port of a bridge */
+ dev = ptr;
+ p = dev->br_port;
+
if (p == NULL)
return NOTIFY_DONE;
br = p->br;
- spin_lock_bh(&br->lock);
switch (event) {
- case NETDEV_CHANGEMTU:
- dev_set_mtu(br->dev, br_min_mtu(br));
- break;
-
case NETDEV_CHANGEADDR:
+ spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
- br_stp_recalculate_bridge_id(br);
+ if (br->dev->flags & IFF_UP)
+ br_stp_recalculate_bridge_id(br);
+ spin_unlock_bh(&br->lock);
break;
- case NETDEV_CHANGE: /* device is up but carrier changed */
- if (!(br->dev->flags & IFF_UP))
- break;
-
- if (netif_carrier_ok(dev)) {
- if (p->state == BR_STATE_DISABLED)
- br_stp_enable_port(p);
- } else {
- if (p->state != BR_STATE_DISABLED)
- br_stp_disable_port(p);
- }
+ case NETDEV_CHANGEMTU:
+ br->dev->mtu = br_min_mtu(br);
break;
case NETDEV_DOWN:
- if (br->dev->flags & IFF_UP)
+ if (br->dev->flags & IFF_UP) {
+ spin_lock_bh(&br->lock);
br_stp_disable_port(p);
+ spin_unlock_bh(&br->lock);
+ }
break;
case NETDEV_UP:
- if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP))
+ if (br->dev->flags & IFF_UP) {
+ spin_lock_bh(&br->lock);
br_stp_enable_port(p);
+ spin_unlock_bh(&br->lock);
+ }
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
- }
- spin_unlock_bh(&br->lock);
+ }
return NOTIFY_DONE;
}
{
struct hlist_node hlist;
struct net_bridge_port *dst;
- union {
- struct list_head age_list;
- struct rcu_head rcu;
- } u;
+ struct list_head age_list;
atomic_t use_count;
unsigned long ageing_timer;
mac_addr addr;
struct list_head port_list;
struct net_device *dev;
struct net_device_stats statistics;
- spinlock_t hash_lock;
+ rwlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
struct list_head age_list;
extern void br_fdb_cleanup(unsigned long arg);
extern void br_fdb_delete_by_port(struct net_bridge *br,
struct net_bridge_port *p);
-extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
- const unsigned char *addr);
extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
- unsigned char *addr);
+ unsigned char *addr);
extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long count, unsigned long off);
int isroot = br_is_root_bridge(br);
pr_info("%s: topology change detected, %s\n", br->dev->name,
- isroot ? "propagating" : "sending tcn bpdu");
+ isroot ? "propgating" : "sending tcn bpdu");
if (isroot) {
br->topology_change = 1;
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
- if ((p->dev->flags & IFF_UP) && netif_carrier_ok(p->dev))
+ if (p->dev->flags & IFF_UP)
br_stp_enable_port(p);
}
#include <net/iw_handler.h>
#endif /* CONFIG_NET_RADIO */
#include <asm/current.h>
-#include <linux/vs_network.h>
/* This define, if set, will randomly drop a packet when congestion
* is more than moderate. It helps fairness in the multi-interface
*/
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
+#ifdef CONFIG_NET_FASTROUTE
+int netdev_fastroute;
+int netdev_fastroute_obstacles;
+#endif
+
#ifdef CONFIG_SYSFS
extern int netdev_sysfs_init(void);
extern int netdev_register_sysfs(struct net_device *);
int hash;
spin_lock_bh(&ptype_lock);
+#ifdef CONFIG_NET_FASTROUTE
+ if (pt->af_packet_priv) {
+ netdev_fastroute_obstacles++;
+ dev_clear_fastroute(pt->dev);
+ }
+#endif
if (pt->type == htons(ETH_P_ALL)) {
netdev_nit++;
list_add_rcu(&pt->list, &ptype_all);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
+#ifdef CONFIG_NET_FASTROUTE
+ if (pt->af_packet_priv)
+ netdev_fastroute_obstacles--;
+#endif
list_del_rcu(&pt->list);
goto out;
}
return ret;
}
+#ifdef CONFIG_NET_FASTROUTE
+
+static void dev_do_clear_fastroute(struct net_device *dev)
+{
+ if (dev->accept_fastpath) {
+ int i;
+
+ for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
+ struct dst_entry *dst;
+
+ write_lock_irq(&dev->fastpath_lock);
+ dst = dev->fastpath[i];
+ dev->fastpath[i] = NULL;
+ write_unlock_irq(&dev->fastpath_lock);
+
+ dst_release(dst);
+ }
+ }
+}
+
+void dev_clear_fastroute(struct net_device *dev)
+{
+ if (dev) {
+ dev_do_clear_fastroute(dev);
+ } else {
+ read_lock(&dev_base_lock);
+ for (dev = dev_base; dev; dev = dev->next)
+ dev_do_clear_fastroute(dev);
+ read_unlock(&dev_base_lock);
+ }
+}
+#endif
+
/**
* dev_close - shutdown an interface.
* @dev: device to shutdown
*/
dev->flags &= ~IFF_UP;
+#ifdef CONFIG_NET_FASTROUTE
+ dev_clear_fastroute(dev);
+#endif
/*
* Tell people we are down
} \
}
-static inline void qdisc_run(struct net_device *dev)
-{
- while (!netif_queue_stopped(dev) &&
- qdisc_restart(dev)<0)
- /* NOTHING */;
-}
-
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
__get_cpu_var(netdev_rx_stat).total++;
+#ifdef CONFIG_NET_FASTROUTE
+ if (skb->pkt_type == PACKET_FASTROUTE) {
+ __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++;
+ return dev_queue_xmit(skb);
+ }
+#endif
+
skb->h.raw = skb->nh.raw = skb->data;
skb->mac_len = skb->nh.raw - skb->mac.raw;
if ((dev->promiscuity += inc) == 0)
dev->flags &= ~IFF_PROMISC;
if (dev->flags ^ old_flags) {
+#ifdef CONFIG_NET_FASTROUTE
+ if (dev->flags & IFF_PROMISC) {
+ netdev_fastroute_obstacles++;
+ dev_clear_fastroute(dev);
+ } else
+ netdev_fastroute_obstacles--;
+#endif
dev_mc_upload(dev);
printk(KERN_INFO "device %s %s promiscuous mode\n",
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
spin_lock_init(&dev->ingress_lock);
#endif
+#ifdef CONFIG_NET_FASTROUTE
+ dev->fastpath_lock = RW_LOCK_UNLOCKED;
+#endif
+
ret = alloc_divert_blk(dev);
if (ret)
goto out;
while (atomic_read(&dev->refcnt) != 0) {
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
rtnl_shlock();
+ rtnl_exlock();
/* Rebroadcast unregister notification */
notifier_call_chain(&netdev_chain,
linkwatch_run_queue();
}
+ rtnl_exunlock();
rtnl_shunlock();
rebroadcast_time = jiffies;
synchronize_net();
+#ifdef CONFIG_NET_FASTROUTE
+ dev_clear_fastroute(dev);
+#endif
+
/* Shutdown queueing discipline. */
dev_shutdown(dev);
EXPORT_SYMBOL(dev_remove_pack);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
-EXPORT_SYMBOL(dev_change_flags);
-EXPORT_SYMBOL(dev_set_mtu);
EXPORT_SYMBOL(free_netdev);
EXPORT_SYMBOL(netdev_boot_setup_check);
EXPORT_SYMBOL(netdev_set_master);
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
EXPORT_SYMBOL(br_handle_frame_hook);
#endif
-
+/* for 801q VLAN support */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+EXPORT_SYMBOL(dev_change_flags);
+#endif
#ifdef CONFIG_KMOD
EXPORT_SYMBOL(dev_load);
#endif
EXPORT_SYMBOL(netdev_register_fc);
EXPORT_SYMBOL(netdev_unregister_fc);
#endif
+#ifdef CONFIG_NET_FASTROUTE
+EXPORT_SYMBOL(netdev_fastroute);
+EXPORT_SYMBOL(netdev_fastroute_obstacles);
+#endif
#ifdef CONFIG_NET_CLS_ACT
EXPORT_SYMBOL(ing_filter);
if (copy_to_user(useraddr, ®s, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
- if (copy_to_user(useraddr, regbuf, regs.len))
+ if (copy_to_user(useraddr, regbuf, reglen))
goto out;
ret = 0;
clear_bit(LW_RUNNING, &linkwatch_flags);
rtnl_shlock();
+ rtnl_exlock();
linkwatch_run_queue();
+ rtnl_exunlock();
rtnl_shunlock();
}
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
MODULE_DESCRIPTION("Packet Generator tool");
MODULE_LICENSE("GPL");
-module_param(count_d, int, 0);
-module_param(ipg_d, int, 0);
-module_param(cpu_speed, int, 0);
-module_param(clone_skb_d, int, 0);
+MODULE_PARM(count_d, "i");
+MODULE_PARM(ipg_d, "i");
+MODULE_PARM(cpu_speed, "i");
+MODULE_PARM(clone_skb_d, "i");
void rtnl_lock(void)
{
rtnl_shlock();
+ rtnl_exlock();
}
void rtnl_unlock(void)
{
+ rtnl_exunlock();
rtnl_shunlock();
netdev_run_todo();
struct rtnetlink_link *link_tab;
struct rtattr *rta[RTATTR_MAX];
+ int exclusive = 0;
int sz_idx, kind;
int min_len;
int family;
return -1;
}
+ if (kind != 2) {
+ if (rtnl_exlock_nowait()) {
+ *errp = 0;
+ return -1;
+ }
+ exclusive = 1;
+ }
+
memset(&rta, 0, sizeof(rta));
min_len = rtm_min[sz_idx];
goto err_inval;
err = link->doit(skb, nlh, (void *)&rta);
+ if (exclusive)
+ rtnl_exunlock();
*errp = err;
return err;
err_inval:
+ if (exclusive)
+ rtnl_exunlock();
*errp = -EINVAL;
return -1;
}
return -EFAULT;
}
-/* Keep iterating until skb_iter_next returns false. */
-void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
-{
- i->len = skb_headlen(skb);
- i->data = (unsigned char *)skb->data;
- i->nextfrag = 0;
- i->fraglist = NULL;
-}
-
-int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
-{
- /* Unmap previous, if not head fragment. */
- if (i->nextfrag)
- kunmap_skb_frag(i->data);
-
- if (i->fraglist) {
- fraglist:
- /* We're iterating through fraglist. */
- if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
- i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
- ->frags[i->nextfrag]);
- i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
- .size;
- i->nextfrag++;
- return 1;
- }
- /* Fragments with fragments? Too hard! */
- BUG_ON(skb_shinfo(i->fraglist)->frag_list);
- i->fraglist = i->fraglist->next;
- if (!i->fraglist)
- goto end;
-
- i->len = skb_headlen(i->fraglist);
- i->data = i->fraglist->data;
- i->nextfrag = 0;
- return 1;
- }
-
- if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
- i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
- i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
- i->nextfrag++;
- return 1;
- }
-
- i->fraglist = skb_shinfo(skb)->frag_list;
- if (i->fraglist)
- goto fraglist;
-
-end:
- /* Bug trap for callers */
- i->data = NULL;
- return 0;
-}
-
-void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
-{
- /* Unmap previous, if not head fragment. */
- if (i->data && i->nextfrag)
- kunmap_skb_frag(i->data);
- /* Bug trap for callers */
- i->data = NULL;
-}
-
/* Checksum skb data. */
unsigned int skb_checksum(const struct sk_buff *skb, int offset,
EXPORT_SYMBOL(skb_unlink);
EXPORT_SYMBOL(skb_append);
EXPORT_SYMBOL(skb_split);
-EXPORT_SYMBOL(skb_iter_first);
-EXPORT_SYMBOL(skb_iter_next);
-EXPORT_SYMBOL(skb_iter_abort);
clear_bit(SOCK_PASS_CRED, &sock->flags);
break;
- case SO_SETXID:
- if (current->xid) {
- ret = -EPERM;
- break;
- }
- if (val < 0 || val > MAX_S_CONTEXT) {
- ret = -EINVAL;
- break;
- }
- sk->sk_xid = val;
- break;
-
case SO_TIMESTAMP:
sk->sk_rcvtstamp = valbool;
if (valbool)
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
+ /*
+ * If sendmsg cached page exists, toss it.
+ */
+ if (sk->sk_sndmsg_page) {
+ __free_page(sk->sk_sndmsg_page);
+ sk->sk_sndmsg_page = NULL;
+ }
+
security_sk_free(sk);
BUG_ON(sk->sk_vx_info);
BUG_ON(sk->sk_nx_info);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
- struct msghdr msg = {.msg_flags = flags};
- struct kvec iov;
- char *kaddr = kmap(page);
- iov.iov_base = kaddr + offset;
+ struct msghdr msg;
+ struct iovec iov;
+ mm_segment_t old_fs;
+ char *kaddr;
+
+ kaddr = kmap(page);
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = flags;
+
+ /* This cast is ok because of the "set_fs(KERNEL_DS)" */
+ iov.iov_base = (void __user *) (kaddr + offset);
iov.iov_len = size;
- res = kernel_sendmsg(sock, &msg, &iov, 1, size);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ res = sock_sendmsg(sock, &msg, size);
+ set_fs(old_fs);
+
kunmap(page);
return res;
}
.mode = 0644,
.proc_handler = &proc_dointvec
},
+#ifdef CONFIG_NET_FASTROUTE
+ {
+ .ctl_name = NET_CORE_FASTROUTE,
+ .procname = "netdev_fastroute",
+ .data = &netdev_fastroute,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+#endif
{
.ctl_name = NET_CORE_MSG_COST,
.procname = "message_cost",
static int max_priority[] = { 127 }; /* From DECnet spec */
static int dn_forwarding_proc(ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
+ void __user *, size_t *);
static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
static int dn_forwarding_proc(ctl_table *table, int write,
struct file *filep,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
#ifdef CONFIG_DECNET_ROUTER
struct net_device *dev = table->extra1;
dn_db = dev->dn_ptr;
old = dn_db->parms.forwarding;
- err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
+ err = proc_dointvec(table, write, filep, buffer, lenp);
if ((err >= 0) && write) {
if (dn_db->parms.forwarding < 0)
static int dn_node_address_handler(ctl_table *table, int write,
struct file *filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
char addr[DN_ASCBUF_LEN];
size_t len;
dn_address dnaddr;
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
dn_dev_devices_on();
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
return -EFAULT;
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static int dn_def_dev_handler(ctl_table *table, int write,
struct file * filp,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
size_t len;
struct net_device *dev;
char devname[17];
- if (!*lenp || (*ppos && !write)) {
+ if (!*lenp || (filp->f_pos && !write)) {
*lenp = 0;
return 0;
}
dev_put(dev);
return -ENODEV;
}
- *ppos += *lenp;
+ filp->f_pos += *lenp;
return 0;
}
return -EFAULT;
*lenp = len;
- *ppos += len;
+ filp->f_pos += len;
return 0;
}
static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_port = htons(AUN_PORT),
- .sin_addr = {.s_addr = addr}
- };
- struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
- struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
+ struct sockaddr_in sin;
+ struct iovec iov;
+ struct aunhdr ah;
struct msghdr udpmsg;
+ int err;
+ mm_segment_t oldfs;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(AUN_PORT);
+ sin.sin_addr.s_addr = addr;
+
+ ah.code = code;
+ ah.pad = 0;
+ ah.port = 0;
+ ah.cb = cb;
+ ah.handle = seq;
+
+ iov.iov_base = (void *)&ah;
+ iov.iov_len = sizeof(ah);
+
udpmsg.msg_name = (void *)&sin;
udpmsg.msg_namelen = sizeof(sin);
+ udpmsg.msg_iov = &iov;
+ udpmsg.msg_iovlen = 1;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
- kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ err = sock_sendmsg(udpsock, &udpmsg, sizeof(ah));
+ set_fs(oldfs);
}
ip_input.o ip_fragment.o ip_forward.o ip_options.o \
ip_output.o ip_sockglue.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o tcp_minisocks.o \
- tcp_diag.o datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
+ tcp_diag.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
sysctl_net_ipv4.o fib_frontend.o fib_semantics.o fib_hash.o
obj-$(CONFIG_PROC_FS) += proc.o
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
-#include <linux/vs_limit.h>
+#include <linux/vs_base.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
DEFINE_SNMP_STAT(struct linux_mib, net_statistics);
if (inet->opt)
kfree(inet->opt);
- vx_sock_dec(sk);
clr_vx_info(&sk->sk_vx_info);
sk->sk_xid = -1;
clr_nx_info(&sk->sk_nx_info);
set_vx_info(&sk->sk_vx_info, current->vx_info);
sk->sk_xid = vx_current_xid();
- vx_sock_inc(sk);
set_nx_info(&sk->sk_nx_info, current->nx_info);
sk->sk_nid = nx_current_nid();
!(current->flags & PF_EXITING))
timeout = sk->sk_lingertime;
sock->sk = NULL;
- vx_sock_dec(sk);
clr_vx_info(&sk->sk_vx_info);
sk->sk_xid = -1;
clr_nx_info(&sk->sk_nx_info);
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ah.h>
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
iph->frag_off = top_iph->frag_off;
+ iph->daddr = top_iph->daddr;
if (top_iph->ihl != 5) {
- iph->daddr = top_iph->daddr;
memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
if (err)
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
top_iph->frag_off = iph->frag_off;
- if (top_iph->ihl != 5) {
- top_iph->daddr = iph->daddr;
+ top_iph->daddr = iph->daddr;
+ if (top_iph->ihl != 5)
memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
- }
ip_send_check(top_iph);
if (ip_route_output_key(&rt, &fl) < 0)
return 1;
if (rt->u.dst.dev != dev) {
- NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
+ NET_INC_STATS_BH(ArpFilter);
flag = 1;
}
ip_rt_put(rt);
+++ /dev/null
-/*
- * common UDP/RAW code
- * Linux INET implementation
- *
- * Authors:
- * Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <net/route.h>
-
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
- struct inet_opt *inet = inet_sk(sk);
- struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
- struct rtable *rt;
- u32 saddr;
- int oif;
- int err;
-
-
- if (addr_len < sizeof(*usin))
- return -EINVAL;
-
- if (usin->sin_family != AF_INET)
- return -EAFNOSUPPORT;
-
- sk_dst_reset(sk);
-
- oif = sk->sk_bound_dev_if;
- saddr = inet->saddr;
- if (MULTICAST(usin->sin_addr.s_addr)) {
- if (!oif)
- oif = inet->mc_index;
- if (!saddr)
- saddr = inet->mc_addr;
- }
- err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
- RT_CONN_FLAGS(sk), oif,
- sk->sk_protocol,
- inet->sport, usin->sin_port, sk);
- if (err)
- return err;
- if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
- ip_rt_put(rt);
- return -EACCES;
- }
- if (!inet->saddr)
- inet->saddr = rt->rt_src; /* Update source address */
- if (!inet->rcv_saddr)
- inet->rcv_saddr = rt->rt_src;
- inet->daddr = rt->rt_dst;
- inet->dport = usin->sin_port;
- sk->sk_state = TCP_ESTABLISHED;
- inet->id = jiffies;
-
- sk_dst_set(sk, &rt->u.dst);
- return(0);
-}
-
-EXPORT_SYMBOL(ip4_datagram_connect);
-
static int devinet_sysctl_forward(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val) {
if (valp == &ipv4_devconf.forwarding)
int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val)
rt_cache_flush(0);
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
*/
struct icmp_control {
- int output_entry; /* Field for increment on output */
- int input_entry; /* Field for increment on input */
+ int output_off; /* Field offset for increment on output */
+ int input_off; /* Field offset for increment on input */
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static void icmp_out_count(int type)
{
if (type <= NR_ICMP_TYPES) {
- ICMP_INC_STATS(icmp_pointers[type].output_entry);
- ICMP_INC_STATS(ICMP_MIB_OUTMSGS);
+ ICMP_INC_STATS_FIELD(icmp_pointers[type].output_off);
+ ICMP_INC_STATS(IcmpOutMsgs);
}
}
.saddr = saddr,
.tos = RT_TOS(tos) } },
.proto = IPPROTO_ICMP };
+
if (ip_route_output_key(&rt, &fl))
goto out_unlock;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
struct icmphdr *icmph;
struct rtable *rt = (struct rtable *)skb->dst;
- ICMP_INC_STATS_BH(ICMP_MIB_INMSGS);
+ ICMP_INC_STATS_BH(IcmpInMsgs);
switch (skb->ip_summed) {
case CHECKSUM_HW:
}
}
- ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry);
+ ICMP_INC_STATS_BH_FIELD(icmp_pointers[icmph->type].input_off);
icmp_pointers[icmph->type].handler(skb);
drop:
kfree_skb(skb);
return 0;
error:
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto drop;
}
*/
static struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
- .output_entry = ICMP_MIB_OUTECHOREPS,
- .input_entry = ICMP_MIB_INECHOREPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutEchoReps),
+ .input_off = offsetof(struct icmp_mib, IcmpInEchoReps),
.handler = icmp_discard,
},
[1] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[2] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib,IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
- .output_entry = ICMP_MIB_OUTDESTUNREACHS,
- .input_entry = ICMP_MIB_INDESTUNREACHS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutDestUnreachs),
+ .input_off = offsetof(struct icmp_mib, IcmpInDestUnreachs),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
- .output_entry = ICMP_MIB_OUTSRCQUENCHS,
- .input_entry = ICMP_MIB_INSRCQUENCHS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutSrcQuenchs),
+ .input_off = offsetof(struct icmp_mib, IcmpInSrcQuenchs),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
- .output_entry = ICMP_MIB_OUTREDIRECTS,
- .input_entry = ICMP_MIB_INREDIRECTS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutRedirects),
+ .input_off = offsetof(struct icmp_mib, IcmpInRedirects),
.handler = icmp_redirect,
.error = 1,
},
#ifdef CONFIG_ICMP_IPOD
[6] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_DUMMY,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_ping_of_death,
.error = 1,
},
#else
[6] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
#endif
[7] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
- .output_entry = ICMP_MIB_OUTECHOS,
- .input_entry = ICMP_MIB_INECHOS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutEchos),
+ .input_off = offsetof(struct icmp_mib, IcmpInEchos),
.handler = icmp_echo,
},
[9] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[10] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_INERRORS,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, IcmpInErrors),
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
- .output_entry = ICMP_MIB_OUTTIMEEXCDS,
- .input_entry = ICMP_MIB_INTIMEEXCDS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutTimeExcds),
+ .input_off = offsetof(struct icmp_mib,IcmpInTimeExcds),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
- .output_entry = ICMP_MIB_OUTPARMPROBS,
- .input_entry = ICMP_MIB_INPARMPROBS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutParmProbs),
+ .input_off = offsetof(struct icmp_mib, IcmpInParmProbs),
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
- .output_entry = ICMP_MIB_OUTTIMESTAMPS,
- .input_entry = ICMP_MIB_INTIMESTAMPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutTimestamps),
+ .input_off = offsetof(struct icmp_mib, IcmpInTimestamps),
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
- .output_entry = ICMP_MIB_OUTTIMESTAMPREPS,
- .input_entry = ICMP_MIB_INTIMESTAMPREPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutTimestampReps),
+ .input_off = offsetof(struct icmp_mib, IcmpInTimestampReps),
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_DUMMY,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
- .output_entry = ICMP_MIB_DUMMY,
- .input_entry = ICMP_MIB_DUMMY,
+ .output_off = offsetof(struct icmp_mib, dummy),
+ .input_off = offsetof(struct icmp_mib, dummy),
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
- .output_entry = ICMP_MIB_OUTADDRMASKS,
- .input_entry = ICMP_MIB_INADDRMASKS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutAddrMasks),
+ .input_off = offsetof(struct icmp_mib, IcmpInAddrMasks),
.handler = icmp_address,
},
[ICMP_ADDRESSREPLY] = {
- .output_entry = ICMP_MIB_OUTADDRMASKREPS,
- .input_entry = ICMP_MIB_INADDRMASKREPS,
+ .output_off = offsetof(struct icmp_mib, IcmpOutAddrMaskReps),
+ .input_off = offsetof(struct icmp_mib, IcmpInAddrMaskReps),
.handler = icmp_address_reply,
},
};
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq,
- "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
+ seq_printf(seq,
+ "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
else {
struct ip_mc_list *im = (struct ip_mc_list *)v;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
{
struct ip_options * opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP_INC_STATS_BH(OutForwDatagrams);
if (unlikely(opt->optlen))
ip_forward_options(skb);
spin_unlock(&qp->lock);
ipq_put(qp);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmFails);
}
}
ipq_kill(qp);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmTimeout);
+ IP_INC_STATS_BH(ReasmFails);
if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) {
struct sk_buff *head = qp->fragments;
iph = head->nh.iph;
iph->frag_off = 0;
iph->tot_len = htons(len);
- IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+ IP_INC_STATS_BH(ReasmOKs);
qp->fragments = NULL;
return head;
"Oversized IP packet from %d.%d.%d.%d.\n",
NIPQUAD(qp->saddr));
out_fail:
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmFails);
return NULL;
}
struct ipq *qp;
struct net_device *dev;
- IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
+ IP_INC_STATS_BH(ReasmReqds);
/* Start by cleaning up the memory. */
if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
return ret;
}
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(ReasmFails);
kfree_skb(skb);
return NULL;
}
protocol = -ret;
goto resubmit;
}
- IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP_INC_STATS_BH(InDelivers);
} else {
if (!raw_sk) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+ IP_INC_STATS_BH(InUnknownProtos);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
- IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP_INC_STATS_BH(InDelivers);
kfree_skb(skb);
}
}
*/
if (skb_cow(skb, skb_headroom(skb))) {
- IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP_INC_STATS_BH(InDiscards);
goto drop;
}
iph = skb->nh.iph;
return dst_input(skb);
inhdr_error:
- IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP_INC_STATS_BH(InHdrErrors);
drop:
kfree_skb(skb);
return NET_RX_DROP;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- IP_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
+ IP_INC_STATS_BH(InReceives);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP_INC_STATS_BH(InDiscards);
goto out;
}
ip_rcv_finish);
inhdr_error:
- IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP_INC_STATS_BH(InHdrErrors);
drop:
kfree_skb(skb);
out:
/*
* If the indicated interface is up and running, send the packet.
*/
- IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP_INC_STATS(OutRequests);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
{
struct sk_buff *skb = *pskb;
- IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP_INC_STATS(OutRequests);
if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list) &&
!skb_shinfo(skb)->tso_size)
dst_output);
no_route:
- IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(OutNoRoutes);
kfree_skb(skb);
return -EHOSTUNREACH;
}
}
if (err == 0) {
- IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP_INC_STATS(FragOKs);
return 0;
}
kfree_skb(frag);
frag = skb;
}
- IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS(FragFails);
return err;
}
* Put this fragment into the sending queue.
*/
- IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
+ IP_INC_STATS(FragCreates);
iph->tot_len = htons(len + hlen);
goto fail;
}
kfree_skb(skb);
- IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP_INC_STATS(FragOKs);
return err;
fail:
kfree_skb(skb);
- IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS(FragFails);
return err;
}
error:
inet->cork.length -= length;
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
return err;
}
error:
inet->cork.length -= size;
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
return err;
}
return err;
error:
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
goto out;
}
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
{
struct ip_options * opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP_INC_STATS_BH(OutForwDatagrams);
if (unlikely(opt->optlen))
ip_forward_options(skb);
to blackhole.
*/
- IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS_BH(FragFails);
ip_rt_put(rt);
goto out_free;
}
static int
proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = table->data;
int val = *valp;
int rc;
- rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(table, write, filp, buffer, lenp);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
static int
proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = table->data;
int val[2];
/* backup the value first */
memcpy(val, valp, sizeof(val));
- rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(table, write, filp, buffer, lenp);
if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
* First port is set to the default port.
*/
static int ports[IP_VS_APP_MAX_PORTS] = {21, 0};
-static int ports_c;
-module_param_array(ports, int, ports_c, 0);
/*
* Debug level
*/
#ifdef CONFIG_IP_VS_DEBUG
static int debug=0;
-module_param(debug, int, 0);
+MODULE_PARM(debug, "i");
#endif
+MODULE_PARM(ports, "1-" __MODULE_STRING(IP_VS_APP_MAX_PORTS) "i");
/* Dummy variable */
static int ip_vs_ftp_pasv;
while (data <= data_limit - 6) {
if (strnicmp(data, "PASV\r\n", 6) == 0) {
/* Passive mode on */
- IP_VS_DBG(1-debug, "got PASV at %zd of %zd\n",
+ IP_VS_DBG(1-debug, "got PASV at %d of %d\n",
data - data_start,
data_limit - data_start);
cp->app_data = &ip_vs_ftp_pasv;
(*pskb)->len - tcphoff,
cp->protocol,
(*pskb)->csum);
- IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
+ IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
pp->name, tcph->check,
(char*)&(tcph->check) - (char*)tcph);
}
(*pskb)->csum);
if (udph->check == 0)
udph->check = 0xFFFF;
- IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
+ IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
pp->name, udph->check,
(char*)&(udph->check) - (char*)udph);
}
static int
ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
{
- struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
- struct kvec iov;
+ struct msghdr msg;
+ mm_segment_t oldfs;
+ struct iovec iov;
int len;
EnterFunction(7);
iov.iov_base = (void *)buffer;
iov.iov_len = length;
-
- len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
+ msg.msg_name = 0;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL;
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ len = sock_sendmsg(sock, &msg, (size_t)(length));
+ set_fs(oldfs);
LeaveFunction(7);
return len;
static int
ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
{
- struct msghdr msg = {NULL,};
- struct kvec iov;
+ struct msghdr msg;
+ struct iovec iov;
int len;
+ mm_segment_t oldfs;
EnterFunction(7);
/* Receive a packet */
iov.iov_base = buffer;
iov.iov_len = (size_t)buflen;
-
- len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ len = sock_recvmsg(sock, &msg, buflen, 0);
+ set_fs(oldfs);
if (len < 0)
return -1;
conntrack->ct_general.destroy = destroy_conntrack;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;
- conntrack->xid[IP_CT_DIR_ORIGINAL] = -1;
conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;
conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;
- conntrack->xid[IP_CT_DIR_REPLY] = -1;
for (i=0; i < IP_CT_NUMBER; i++)
conntrack->infos[i].master = &conntrack->ct_general;
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
+#endif
static int loose;
MODULE_PARM(loose, "i");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
MODULE_LICENSE("GPL");
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
MODULE_PARM(max_dcc_channels, "i");
MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per IRC session");
MODULE_PARM(dcc_timeout, "i");
MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
+#endif
static char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " };
#define MINMATCHLEN 5
+++ /dev/null
-/*
- * ip_conntrack_pptp.c - Version 2.0
- *
- * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
- * It is a specification defined by Microsoft and some vendors
- * working with Microsoft. PPTP is built on top of a modified
- * version of the Internet Generic Routing Encapsulation Protocol.
- * GRE is defined in RFC 1701 and RFC 1702. Documentation of
- * PPTP can be found in RFC 2637
- *
- * (C) 2000-2003 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * Limitations:
- * - We blindly assume that control connections are always
- * established in PNS->PAC direction. This is a violation
- * of RFFC2673
- *
- * TODO: - finish support for multiple calls within one session
- * (needs expect reservations in newnat)
- * - testing of incoming PPTP calls
- *
- * Changes:
- * 2002-02-05 - Version 1.3
- * - Call ip_conntrack_unexpect_related() from
- * pptp_timeout_related() to destroy expectations in case
- * CALL_DISCONNECT_NOTIFY or tcp fin packet was seen
- * (Philip Craig <philipc@snapgear.com>)
- * - Add Version information at module loadtime
- * 2002-02-10 - Version 1.6
- * - move to C99 style initializers
- * - remove second expectation if first arrives
- * 2004-10-22 - Version 2.0
- * - merge Mandrake's 2.6.x port with recent 2.6.x API changes
- * - fix lots of linear skb assumptions from Mandrake's port
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/lockhelp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-#define IP_CT_PPTP_VERSION "2.0"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
-
-DECLARE_LOCK(ip_pptp_lock);
-
-#if 0
-#include "ip_conntrack_pptp_priv.h"
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-#define SECS *HZ
-#define MINS * 60 SECS
-#define HOURS * 60 MINS
-#define DAYS * 24 HOURS
-
-#define PPTP_GRE_TIMEOUT (10 MINS)
-#define PPTP_GRE_STREAM_TIMEOUT (5 DAYS)
-
-static int pptp_expectfn(struct ip_conntrack *ct)
-{
- struct ip_conntrack *master;
- struct ip_conntrack_expect *exp;
-
- DEBUGP("increasing timeouts\n");
- /* increase timeout of GRE data channel conntrack entry */
- ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
- ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
-
- master = master_ct(ct);
- if (!master) {
- DEBUGP(" no master!!!\n");
- return 0;
- }
-
- exp = ct->master;
- if (!exp) {
- DEBUGP("no expectation!!\n");
- return 0;
- }
-
- DEBUGP("completing tuples with ct info\n");
- /* we can do this, since we're unconfirmed */
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.gre.key ==
- htonl(master->help.ct_pptp_info.pac_call_id)) {
- /* assume PNS->PAC */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(master->help.ct_pptp_info.pns_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(master->help.ct_pptp_info.pns_call_id);
- } else {
- /* assume PAC->PNS */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(master->help.ct_pptp_info.pac_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(master->help.ct_pptp_info.pac_call_id);
- }
-
- /* delete other expectation */
- if (exp->expected_list.next != &exp->expected_list) {
- struct ip_conntrack_expect *other_exp;
- struct list_head *cur_item, *next;
-
- for (cur_item = master->sibling_list.next;
- cur_item != &master->sibling_list; cur_item = next) {
- next = cur_item->next;
- other_exp = list_entry(cur_item,
- struct ip_conntrack_expect,
- expected_list);
- /* remove only if occurred at same sequence number */
- if (other_exp != exp && other_exp->seq == exp->seq) {
- DEBUGP("unexpecting other direction\n");
- ip_ct_gre_keymap_destroy(other_exp);
- ip_conntrack_unexpect_related(other_exp);
- }
- }
- }
-
- return 0;
-}
-
-/* timeout GRE data connections */
-static int pptp_timeout_related(struct ip_conntrack *ct)
-{
- struct list_head *cur_item, *next;
- struct ip_conntrack_expect *exp;
-
- /* FIXME: do we have to lock something ? */
- for (cur_item = ct->sibling_list.next;
- cur_item != &ct->sibling_list; cur_item = next) {
- next = cur_item->next;
- exp = list_entry(cur_item, struct ip_conntrack_expect,
- expected_list);
-
- ip_ct_gre_keymap_destroy(exp);
- if (!exp->sibling) {
- ip_conntrack_unexpect_related(exp);
- continue;
- }
-
- DEBUGP("setting timeout of conntrack %p to 0\n",
- exp->sibling);
- exp->sibling->proto.gre.timeout = 0;
- exp->sibling->proto.gre.stream_timeout = 0;
- /* refresh_acct will not modify counters if skb == NULL */
- ip_ct_refresh_acct(exp->sibling, 0, NULL, 0);
- }
-
- return 0;
-}
-
-/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
-static inline int
-exp_gre(struct ip_conntrack *master,
- u_int32_t seq,
- u_int16_t callid,
- u_int16_t peer_callid)
-{
- struct ip_conntrack_tuple inv_tuple;
- struct ip_conntrack_tuple exp_tuples[] = {
- /* tuple in original direction, PNS->PAC */
- { .src = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip,
- .u = { .gre = { .key = htonl(ntohs(peer_callid)) } }
- },
- .dst = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip,
- .u = { .gre = { .key = htonl(ntohs(callid)) } },
- .protonum = IPPROTO_GRE
- },
- },
- /* tuple in reply direction, PAC->PNS */
- { .src = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip,
- .u = { .gre = { .key = htonl(ntohs(callid)) } }
- },
- .dst = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip,
- .u = { .gre = { .key = htonl(ntohs(peer_callid)) } },
- .protonum = IPPROTO_GRE
- },
- }
- }, *exp_tuple;
-
- for (exp_tuple = exp_tuples; exp_tuple < &exp_tuples[2]; exp_tuple++) {
- struct ip_conntrack_expect *exp;
-
- exp = ip_conntrack_expect_alloc();
- if (exp == NULL)
- return 1;
-
- memcpy(&exp->tuple, exp_tuple, sizeof(exp->tuple));
-
- exp->mask.src.ip = 0xffffffff;
- exp->mask.src.u.all = 0;
- exp->mask.dst.u.all = 0;
- exp->mask.dst.u.gre.key = 0xffffffff;
- exp->mask.dst.ip = 0xffffffff;
- exp->mask.dst.protonum = 0xffff;
-
- exp->seq = seq;
- exp->expectfn = pptp_expectfn;
-
- exp->help.exp_pptp_info.pac_call_id = ntohs(callid);
- exp->help.exp_pptp_info.pns_call_id = ntohs(peer_callid);
-
- DEBUGP("calling expect_related ");
- DUMP_TUPLE_RAW(&exp->tuple);
-
- /* Add GRE keymap entries */
- if (ip_ct_gre_keymap_add(exp, &exp->tuple, 0) != 0) {
- kfree(exp);
- return 1;
- }
-
- invert_tuplepr(&inv_tuple, &exp->tuple);
- if (ip_ct_gre_keymap_add(exp, &inv_tuple, 1) != 0) {
- ip_ct_gre_keymap_destroy(exp);
- kfree(exp);
- return 1;
- }
-
- if (ip_conntrack_expect_related(exp, master) != 0) {
- ip_ct_gre_keymap_destroy(exp);
- kfree(exp);
- DEBUGP("cannot expect_related()\n");
- return 1;
- }
- }
-
- return 0;
-}
-
-static inline int
-pptp_inbound_pkt(struct sk_buff *skb,
- struct tcphdr *tcph,
- unsigned int ctlhoff,
- size_t datalen,
- struct ip_conntrack *ct)
-{
- struct PptpControlHeader _ctlh, *ctlh;
- unsigned int reqlen;
- union pptp_ctrl_union _pptpReq, *pptpReq;
- struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
- u_int16_t msg, *cid, *pcid;
- u_int32_t seq;
-
- ctlh = skb_header_pointer(skb, ctlhoff, sizeof(_ctlh), &_ctlh);
- if (unlikely(!ctlh)) {
- DEBUGP("error during skb_header_pointer\n");
- return NF_ACCEPT;
- }
-
- reqlen = datalen - sizeof(struct pptp_pkt_hdr) - sizeof(_ctlh);
- pptpReq = skb_header_pointer(skb, ctlhoff+sizeof(struct pptp_pkt_hdr),
- reqlen, &_pptpReq);
- if (unlikely(!pptpReq)) {
- DEBUGP("error during skb_header_pointer\n");
- return NF_ACCEPT;
- }
-
- msg = ntohs(ctlh->messageType);
- DEBUGP("inbound control message %s\n", strMName[msg]);
-
- switch (msg) {
- case PPTP_START_SESSION_REPLY:
- if (reqlen < sizeof(_pptpReq.srep)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server confirms new control session */
- if (info->sstate < PPTP_SESSION_REQUESTED) {
- DEBUGP("%s without START_SESS_REQUEST\n",
- strMName[msg]);
- break;
- }
- if (pptpReq->srep.resultCode == PPTP_START_OK)
- info->sstate = PPTP_SESSION_CONFIRMED;
- else
- info->sstate = PPTP_SESSION_ERROR;
- break;
-
- case PPTP_STOP_SESSION_REPLY:
- if (reqlen < sizeof(_pptpReq.strep)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server confirms end of control session */
- if (info->sstate > PPTP_SESSION_STOPREQ) {
- DEBUGP("%s without STOP_SESS_REQUEST\n",
- strMName[msg]);
- break;
- }
- if (pptpReq->strep.resultCode == PPTP_STOP_OK)
- info->sstate = PPTP_SESSION_NONE;
- else
- info->sstate = PPTP_SESSION_ERROR;
- break;
-
- case PPTP_OUT_CALL_REPLY:
- if (reqlen < sizeof(_pptpReq.ocack)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server accepted call, we now expect GRE frames */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n", strMName[msg]);
- break;
- }
- if (info->cstate != PPTP_CALL_OUT_REQ &&
- info->cstate != PPTP_CALL_OUT_CONF) {
- DEBUGP("%s without OUTCALL_REQ\n", strMName[msg]);
- break;
- }
- if (pptpReq->ocack.resultCode != PPTP_OUTCALL_CONNECT) {
- info->cstate = PPTP_CALL_NONE;
- break;
- }
-
- cid = &pptpReq->ocack.callID;
- pcid = &pptpReq->ocack.peersCallID;
-
- info->pac_call_id = ntohs(*cid);
-
- if (htons(info->pns_call_id) != *pcid) {
- DEBUGP("%s for unknown callid %u\n",
- strMName[msg], ntohs(*pcid));
- break;
- }
-
- DEBUGP("%s, CID=%X, PCID=%X\n", strMName[msg],
- ntohs(*cid), ntohs(*pcid));
-
- info->cstate = PPTP_CALL_OUT_CONF;
-
- seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
- + sizeof(struct PptpControlHeader)
- + ((void *)pcid - (void *)pptpReq);
-
- if (exp_gre(ct, seq, *cid, *pcid) != 0)
- printk("ip_conntrack_pptp: error during exp_gre\n");
- break;
-
- case PPTP_IN_CALL_REQUEST:
- if (reqlen < sizeof(_pptpReq.icack)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server tells us about incoming call request */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n", strMName[msg]);
- break;
- }
- pcid = &pptpReq->icack.peersCallID;
- DEBUGP("%s, PCID=%X\n", strMName[msg], ntohs(*pcid));
- info->cstate = PPTP_CALL_IN_REQ;
- info->pac_call_id = ntohs(*pcid);
- break;
-
- case PPTP_IN_CALL_CONNECT:
- if (reqlen < sizeof(_pptpReq.iccon)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server tells us about incoming call established */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n", strMName[msg]);
- break;
- }
- if (info->sstate != PPTP_CALL_IN_REP
- && info->sstate != PPTP_CALL_IN_CONF) {
- DEBUGP("%s but never sent IN_CALL_REPLY\n",
- strMName[msg]);
- break;
- }
-
- pcid = &pptpReq->iccon.peersCallID;
- cid = &info->pac_call_id;
-
- if (info->pns_call_id != ntohs(*pcid)) {
- DEBUGP("%s for unknown CallID %u\n",
- strMName[msg], ntohs(*cid));
- break;
- }
-
- DEBUGP("%s, PCID=%X\n", strMName[msg], ntohs(*pcid));
- info->cstate = PPTP_CALL_IN_CONF;
-
- /* we expect a GRE connection from PAC to PNS */
- seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
- + sizeof(struct PptpControlHeader)
- + ((void *)pcid - (void *)pptpReq);
-
- if (exp_gre(ct, seq, *cid, *pcid) != 0)
- printk("ip_conntrack_pptp: error during exp_gre\n");
-
- break;
-
- case PPTP_CALL_DISCONNECT_NOTIFY:
- if (reqlen < sizeof(_pptpReq.disc)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* server confirms disconnect */
- cid = &pptpReq->disc.callID;
- DEBUGP("%s, CID=%X\n", strMName[msg], ntohs(*cid));
- info->cstate = PPTP_CALL_NONE;
-
- /* untrack this call id, unexpect GRE packets */
- pptp_timeout_related(ct);
- break;
-
- case PPTP_WAN_ERROR_NOTIFY:
- break;
-
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* I don't have to explain these ;) */
- break;
- default:
- DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)
- ? strMName[msg]:strMName[0], msg);
- break;
- }
-
- return NF_ACCEPT;
-
-}
-
-static inline int
-pptp_outbound_pkt(struct sk_buff *skb,
- struct tcphdr *tcph,
- unsigned int ctlhoff,
- size_t datalen,
- struct ip_conntrack *ct)
-{
- struct PptpControlHeader _ctlh, *ctlh;
- unsigned int reqlen;
- union pptp_ctrl_union _pptpReq, *pptpReq;
- struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
- u_int16_t msg, *cid, *pcid;
-
- ctlh = skb_header_pointer(skb, ctlhoff, sizeof(_ctlh), &_ctlh);
- if (!ctlh)
- return NF_ACCEPT;
-
- reqlen = datalen - sizeof(struct pptp_pkt_hdr) - sizeof(_ctlh);
- pptpReq = skb_header_pointer(skb, ctlhoff+sizeof(_ctlh), reqlen,
- &_pptpReq);
- if (!pptpReq)
- return NF_ACCEPT;
-
- msg = ntohs(ctlh->messageType);
- DEBUGP("outbound control message %s\n", strMName[msg]);
-
- switch (msg) {
- case PPTP_START_SESSION_REQUEST:
- /* client requests for new control session */
- if (info->sstate != PPTP_SESSION_NONE) {
- DEBUGP("%s but we already have one",
- strMName[msg]);
- }
- info->sstate = PPTP_SESSION_REQUESTED;
- break;
- case PPTP_STOP_SESSION_REQUEST:
- /* client requests end of control session */
- info->sstate = PPTP_SESSION_STOPREQ;
- break;
-
- case PPTP_OUT_CALL_REQUEST:
- if (reqlen < sizeof(_pptpReq.ocreq)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* client initiating connection to server */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("%s but no session\n",
- strMName[msg]);
- break;
- }
- info->cstate = PPTP_CALL_OUT_REQ;
- /* track PNS call id */
- cid = &pptpReq->ocreq.callID;
- DEBUGP("%s, CID=%X\n", strMName[msg], ntohs(*cid));
- info->pns_call_id = ntohs(*cid);
- break;
- case PPTP_IN_CALL_REPLY:
- if (reqlen < sizeof(_pptpReq.icack)) {
- DEBUGP("%s: short packet\n", strMName[msg]);
- break;
- }
-
- /* client answers incoming call */
- if (info->cstate != PPTP_CALL_IN_REQ
- && info->cstate != PPTP_CALL_IN_REP) {
- DEBUGP("%s without incall_req\n",
- strMName[msg]);
- break;
- }
- if (pptpReq->icack.resultCode != PPTP_INCALL_ACCEPT) {
- info->cstate = PPTP_CALL_NONE;
- break;
- }
- pcid = &pptpReq->icack.peersCallID;
- if (info->pac_call_id != ntohs(*pcid)) {
- DEBUGP("%s for unknown call %u\n",
- strMName[msg], ntohs(*pcid));
- break;
- }
- DEBUGP("%s, CID=%X\n", strMName[msg], ntohs(*pcid));
- /* part two of the three-way handshake */
- info->cstate = PPTP_CALL_IN_REP;
- info->pns_call_id = ntohs(pptpReq->icack.callID);
- break;
-
- case PPTP_CALL_CLEAR_REQUEST:
- /* client requests hangup of call */
- if (info->sstate != PPTP_SESSION_CONFIRMED) {
- DEBUGP("CLEAR_CALL but no session\n");
- break;
- }
- /* FUTURE: iterate over all calls and check if
- * call ID is valid. We don't do this without newnat,
- * because we only know about last call */
- info->cstate = PPTP_CALL_CLEAR_REQ;
- break;
- case PPTP_SET_LINK_INFO:
- break;
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* I don't have to explain these ;) */
- break;
- default:
- DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)?
- strMName[msg]:strMName[0], msg);
- /* unknown: no need to create GRE masq table entry */
- break;
- }
-
- return NF_ACCEPT;
-}
-
-
-/* track caller id inside control connection, call expect_related */
-static int
-conntrack_pptp_help(struct sk_buff *skb,
- struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
-
-{
- struct pptp_pkt_hdr _pptph, *pptph;
-
- struct tcphdr _tcph, *tcph;
- u_int32_t tcplen = skb->len - skb->nh.iph->ihl * 4;
- u_int32_t datalen;
- void *datalimit;
- int dir = CTINFO2DIR(ctinfo);
- struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
- unsigned int nexthdr_off;
-
- int oldsstate, oldcstate;
- int ret;
-
- /* don't do any tracking before tcp handshake complete */
- if (ctinfo != IP_CT_ESTABLISHED
- && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
- DEBUGP("ctinfo = %u, skipping\n", ctinfo);
- return NF_ACCEPT;
- }
-
- nexthdr_off = skb->nh.iph->ihl*4;
- tcph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_tcph),
- &_tcph);
- if (!tcph)
- return NF_ACCEPT;
-
- /* not a complete TCP header? */
- if (tcplen < sizeof(struct tcphdr) || tcplen < tcph->doff * 4) {
- DEBUGP("tcplen = %u\n", tcplen);
- return NF_ACCEPT;
- }
-
-
- datalen = tcplen - tcph->doff * 4;
-
- /* checksum invalid? */
- if (tcp_v4_check(tcph, tcplen, skb->nh.iph->saddr, skb->nh.iph->daddr,
- csum_partial((char *) tcph, tcplen, 0))) {
- printk(KERN_NOTICE __FILE__ ": bad csum\n");
- /* W2K PPTP server sends TCP packets with wrong checksum :(( */
- //return NF_ACCEPT;
- }
-
- if (tcph->fin || tcph->rst) {
- DEBUGP("RST/FIN received, timeouting GRE\n");
- /* can't do this after real newnat */
- info->cstate = PPTP_CALL_NONE;
-
- /* untrack this call id, unexpect GRE packets */
- pptp_timeout_related(ct);
- }
-
- nexthdr_off += tcph->doff*4;
- pptph = skb_header_pointer(skb, skb->nh.iph->ihl*4 + tcph->doff*4,
- sizeof(_pptph), &_pptph);
- if (!pptph) {
- DEBUGP("no full PPTP header, can't track\n");
- return NF_ACCEPT;
- }
-
- datalimit = (void *) pptph + datalen;
-
- /* if it's not a control message we can't do anything with it */
- if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
- ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
- DEBUGP("not a control packet\n");
- return NF_ACCEPT;
- }
-
- oldsstate = info->sstate;
- oldcstate = info->cstate;
-
- LOCK_BH(&ip_pptp_lock);
-
- nexthdr_off += sizeof(_pptph);
- /* FIXME: We just blindly assume that the control connection is always
- * established from PNS->PAC. However, RFC makes no guarantee */
- if (dir == IP_CT_DIR_ORIGINAL)
- /* client -> server (PNS -> PAC) */
- ret = pptp_outbound_pkt(skb, tcph, nexthdr_off, datalen, ct);
- else
- /* server -> client (PAC -> PNS) */
- ret = pptp_inbound_pkt(skb, tcph, nexthdr_off, datalen, ct);
- DEBUGP("sstate: %d->%d, cstate: %d->%d\n",
- oldsstate, info->sstate, oldcstate, info->cstate);
- UNLOCK_BH(&ip_pptp_lock);
-
- return ret;
-}
-
-/* control protocol helper */
-static struct ip_conntrack_helper pptp = {
- .list = { NULL, NULL },
- .name = "pptp",
- .flags = IP_CT_HELPER_F_REUSE_EXPECT,
- .me = THIS_MODULE,
- .max_expected = 2,
- .timeout = 0,
- .tuple = { .src = { .ip = 0,
- .u = { .tcp = { .port =
- __constant_htons(PPTP_CONTROL_PORT) } }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = IPPROTO_TCP
- }
- },
- .mask = { .src = { .ip = 0,
- .u = { .tcp = { .port = 0xffff } }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = 0xffff
- }
- },
- .help = conntrack_pptp_help
-};
-
-/* ip_conntrack_pptp initialization */
-static int __init init(void)
-{
- int retcode;
-
- DEBUGP(__FILE__ ": registering helper\n");
- if ((retcode = ip_conntrack_helper_register(&pptp))) {
- printk(KERN_ERR "Unable to register conntrack application "
- "helper for pptp: %d\n", retcode);
- return -EIO;
- }
-
- printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION);
- return 0;
-}
-
-static void __exit fini(void)
-{
- ip_conntrack_helper_unregister(&pptp);
- printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION);
-}
-
-module_init(init);
-module_exit(fini);
-
-EXPORT_SYMBOL(ip_pptp_lock);
+++ /dev/null
-#ifndef _IP_CT_PPTP_PRIV_H
-#define _IP_CT_PPTP_PRIV_H
-
-/* PptpControlMessageType names */
-static const char *strMName[] = {
- "UNKNOWN_MESSAGE",
- "START_SESSION_REQUEST",
- "START_SESSION_REPLY",
- "STOP_SESSION_REQUEST",
- "STOP_SESSION_REPLY",
- "ECHO_REQUEST",
- "ECHO_REPLY",
- "OUT_CALL_REQUEST",
- "OUT_CALL_REPLY",
- "IN_CALL_REQUEST",
- "IN_CALL_REPLY",
- "IN_CALL_CONNECT",
- "CALL_CLEAR_REQUEST",
- "CALL_DISCONNECT_NOTIFY",
- "WAN_ERROR_NOTIFY",
- "SET_LINK_INFO"
-};
-
-#endif
+++ /dev/null
-/*
- * ip_conntrack_proto_gre.c - Version 2.0
- *
- * Connection tracking protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2004 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/netfilter.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/list.h>
-
-#include <linux/netfilter_ipv4/lockhelp.h>
-
-DECLARE_RWLOCK(ip_ct_gre_lock);
-#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_ct_gre_lock)
-#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_ct_gre_lock)
-
-#include <linux/netfilter_ipv4/listhelp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE");
-
-/* shamelessly stolen from ip_conntrack_proto_udp.c */
-#define GRE_TIMEOUT (30*HZ)
-#define GRE_STREAM_TIMEOUT (180*HZ)
-
-#if 0
-#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
-#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \
- NIPQUAD((x)->src.ip), ntohl((x)->src.u.gre.key), \
- NIPQUAD((x)->dst.ip), ntohl((x)->dst.u.gre.key))
-#else
-#define DEBUGP(x, args...)
-#define DUMP_TUPLE_GRE(x)
-#endif
-
-/* GRE KEYMAP HANDLING FUNCTIONS */
-static LIST_HEAD(gre_keymap_list);
-
-static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km,
- const struct ip_conntrack_tuple *t)
-{
- return ((km->tuple.src.ip == t->src.ip) &&
- (km->tuple.dst.ip == t->dst.ip) &&
- (km->tuple.dst.protonum == t->dst.protonum) &&
- (km->tuple.dst.u.all == t->dst.u.all));
-}
-
-/* look up the source key for a given tuple */
-static u_int32_t gre_keymap_lookup(struct ip_conntrack_tuple *t)
-{
- struct ip_ct_gre_keymap *km;
- u_int32_t key;
-
- READ_LOCK(&ip_ct_gre_lock);
- km = LIST_FIND(&gre_keymap_list, gre_key_cmpfn,
- struct ip_ct_gre_keymap *, t);
- if (!km) {
- READ_UNLOCK(&ip_ct_gre_lock);
- return 0;
- }
-
- key = km->tuple.src.u.gre.key;
- READ_UNLOCK(&ip_ct_gre_lock);
-
- return key;
-}
-
-/* add a single keymap entry, associate with specified expect */
-int ip_ct_gre_keymap_add(struct ip_conntrack_expect *exp,
- struct ip_conntrack_tuple *t, int reply)
-{
- struct ip_ct_gre_keymap *km;
-
- km = kmalloc(sizeof(*km), GFP_ATOMIC);
- if (!km)
- return -1;
-
- /* initializing list head should be sufficient */
- memset(km, 0, sizeof(*km));
-
- memcpy(&km->tuple, t, sizeof(*t));
-
- if (!reply)
- exp->proto.gre.keymap_orig = km;
- else
- exp->proto.gre.keymap_reply = km;
-
- DEBUGP("adding new entry %p: ", km);
- DUMP_TUPLE_GRE(&km->tuple);
-
- WRITE_LOCK(&ip_ct_gre_lock);
- list_append(&gre_keymap_list, km);
- WRITE_UNLOCK(&ip_ct_gre_lock);
-
- return 0;
-}
-
-/* change the tuple of a keymap entry (used by nat helper) */
-void ip_ct_gre_keymap_change(struct ip_ct_gre_keymap *km,
- struct ip_conntrack_tuple *t)
-{
- if (!km)
- {
- printk(KERN_WARNING
- "NULL GRE conntrack keymap change requested\n");
- return;
- }
-
- DEBUGP("changing entry %p to: ", km);
- DUMP_TUPLE_GRE(t);
-
- WRITE_LOCK(&ip_ct_gre_lock);
- memcpy(&km->tuple, t, sizeof(km->tuple));
- WRITE_UNLOCK(&ip_ct_gre_lock);
-}
-
-/* destroy the keymap entries associated with specified expect */
-void ip_ct_gre_keymap_destroy(struct ip_conntrack_expect *exp)
-{
- DEBUGP("entering for exp %p\n", exp);
- WRITE_LOCK(&ip_ct_gre_lock);
- if (exp->proto.gre.keymap_orig) {
- DEBUGP("removing %p from list\n", exp->proto.gre.keymap_orig);
- list_del(&exp->proto.gre.keymap_orig->list);
- kfree(exp->proto.gre.keymap_orig);
- exp->proto.gre.keymap_orig = NULL;
- }
- if (exp->proto.gre.keymap_reply) {
- DEBUGP("removing %p from list\n", exp->proto.gre.keymap_reply);
- list_del(&exp->proto.gre.keymap_reply->list);
- kfree(exp->proto.gre.keymap_reply);
- exp->proto.gre.keymap_reply = NULL;
- }
- WRITE_UNLOCK(&ip_ct_gre_lock);
-}
-
-
-/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
-
-/* invert gre part of tuple */
-static int gre_invert_tuple(struct ip_conntrack_tuple *tuple,
- const struct ip_conntrack_tuple *orig)
-{
- tuple->dst.u.gre.key = orig->src.u.gre.key;
- tuple->src.u.gre.key = orig->dst.u.gre.key;
-
- return 1;
-}
-
-/* gre hdr info to tuple */
-static int gre_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct ip_conntrack_tuple *tuple)
-{
- struct gre_hdr _grehdr, *grehdr;
- struct gre_hdr_pptp _pgrehdr, *pgrehdr;
- u_int32_t srckey;
-
- grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
- /* PPTP header is variable length, only need up to the call_id field */
- pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
-
- if (!grehdr || !pgrehdr)
- return 0;
-
- switch (grehdr->version) {
- case GRE_VERSION_1701:
- if (!grehdr->key) {
- DEBUGP("Can't track GRE without key\n");
- return 0;
- }
- tuple->dst.u.gre.key = *(gre_key(grehdr));
- break;
-
- case GRE_VERSION_PPTP:
- if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
- DEBUGP("GRE_VERSION_PPTP but unknown proto\n");
- return 0;
- }
- tuple->dst.u.gre.key = htonl(ntohs(pgrehdr->call_id));
- break;
-
- default:
- printk(KERN_WARNING "unknown GRE version %hu\n",
- grehdr->version);
- return 0;
- }
-
- srckey = gre_keymap_lookup(tuple);
-
- tuple->src.u.gre.key = srckey;
-#if 0
- DEBUGP("found src key %x for tuple ", ntohl(srckey));
- DUMP_TUPLE_GRE(tuple);
-#endif
-
- return 1;
-}
-
-/* print gre part of tuple */
-static unsigned int gre_print_tuple(char *buffer,
- const struct ip_conntrack_tuple *tuple)
-{
- return sprintf(buffer, "srckey=0x%x dstkey=0x%x ",
- ntohl(tuple->src.u.gre.key),
- ntohl(tuple->dst.u.gre.key));
-}
-
-/* print private data for conntrack */
-static unsigned int gre_print_conntrack(char *buffer,
- const struct ip_conntrack *ct)
-{
- return sprintf(buffer, "timeout=%u, stream_timeout=%u ",
- (ct->proto.gre.timeout / HZ),
- (ct->proto.gre.stream_timeout / HZ));
-}
-
-/* Returns verdict for packet, and may modify conntrack */
-static int gre_packet(struct ip_conntrack *ct,
- const struct sk_buff *skb,
- enum ip_conntrack_info conntrackinfo)
-{
- /* If we've seen traffic both ways, this is a GRE connection.
- * Extend timeout. */
- if (ct->status & IPS_SEEN_REPLY) {
- ip_ct_refresh_acct(ct, conntrackinfo, skb,
- ct->proto.gre.stream_timeout);
- /* Also, more likely to be important, and not a probe. */
- set_bit(IPS_ASSURED_BIT, &ct->status);
- } else
- ip_ct_refresh_acct(ct, conntrackinfo, skb,
- ct->proto.gre.timeout);
-
- return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static int gre_new(struct ip_conntrack *ct,
- const struct sk_buff *skb)
-{
- DEBUGP(": ");
- DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-
- /* initialize to sane value. Ideally a conntrack helper
- * (e.g. in case of pptp) is increasing them */
- ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
- ct->proto.gre.timeout = GRE_TIMEOUT;
-
- return 1;
-}
-
-/* Called when a conntrack entry has already been removed from the hashes
- * and is about to be deleted from memory */
-static void gre_destroy(struct ip_conntrack *ct)
-{
- struct ip_conntrack_expect *master = ct->master;
-
- DEBUGP(" entering\n");
-
- if (!master) {
- DEBUGP("no master exp for ct %p\n", ct);
- return;
- }
-
- ip_ct_gre_keymap_destroy(master);
-}
-
-/* protocol helper struct */
-static struct ip_conntrack_protocol gre = {
- .proto = IPPROTO_GRE,
- .name = "gre",
- .pkt_to_tuple = gre_pkt_to_tuple,
- .invert_tuple = gre_invert_tuple,
- .print_tuple = gre_print_tuple,
- .print_conntrack = gre_print_conntrack,
- .packet = gre_packet,
- .new = gre_new,
- .destroy = gre_destroy,
- .exp_matches_pkt = NULL,
- .me = THIS_MODULE
-};
-
-/* ip_conntrack_proto_gre initialization */
-static int __init init(void)
-{
- int retcode;
-
- if ((retcode = ip_conntrack_protocol_register(&gre))) {
- printk(KERN_ERR "Unable to register conntrack protocol "
- "helper for gre: %d\n", retcode);
- return -EIO;
- }
-
- return 0;
-}
-
-static void __exit fini(void)
-{
- struct list_head *pos, *n;
-
- /* delete all keymap entries */
- WRITE_LOCK(&ip_ct_gre_lock);
- list_for_each_safe(pos, n, &gre_keymap_list) {
- DEBUGP("deleting keymap %p at module unload time\n", pos);
- list_del(pos);
- kfree(pos);
- }
- WRITE_UNLOCK(&ip_ct_gre_lock);
-
- ip_conntrack_protocol_unregister(&gre);
-}
-
-EXPORT_SYMBOL(ip_ct_gre_keymap_add);
-EXPORT_SYMBOL(ip_ct_gre_keymap_change);
-EXPORT_SYMBOL(ip_ct_gre_keymap_destroy);
-
-module_init(init);
-module_exit(fini);
len += print_tuple(buffer + len,
&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
proto);
- len += sprintf(buffer + len, "xid=%d ", conntrack->xid[IP_CT_DIR_ORIGINAL]);
if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
len += sprintf(buffer + len, "[UNREPLIED] ");
len += print_tuple(buffer + len,
&conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
proto);
- len += sprintf(buffer + len, "xid=%d ", conntrack->xid[IP_CT_DIR_REPLY]);
if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
len += sprintf(buffer + len, "[ASSURED] ");
len += sprintf(buffer + len, "use=%u ",
atomic_read(&conntrack->ct_general.use));
+ len += sprintf(buffer + len, "xid=%d ",
+ conntrack->xid);
len += sprintf(buffer + len, "\n");
return len;
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of tftp servers");
+#endif
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
static int ports[MAX_PORTS];
static int ports_c;
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
+#endif
DECLARE_LOCK_EXTERN(ip_ftp_lock);
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("IRC (DCC) NAT helper");
MODULE_LICENSE("GPL");
+#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
+#endif
/* protects irc part of conntracks */
DECLARE_LOCK_EXTERN(ip_irc_lock);
+++ /dev/null
-/*
- * ip_nat_pptp.c - Version 2.0
- *
- * NAT support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
- * It is a specification defined by Microsoft and some vendors
- * working with Microsoft. PPTP is built on top of a modified
- * version of the Internet Generic Routing Encapsulation Protocol.
- * GRE is defined in RFC 1701 and RFC 1702. Documentation of
- * PPTP can be found in RFC 2637
- *
- * (C) 2000-2004 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * TODO: - Support for multiple calls within one session
- * (needs netfilter newnat code)
- * - NAT to a unique tuple, not to TCP source port
- * (needs netfilter tuple reservation)
- *
- * Changes:
- * 2002-02-10 - Version 1.3
- * - Use ip_nat_mangle_tcp_packet() because of cloned skb's
- * in local connections (Philip Craig <philipc@snapgear.com>)
- * - add checks for magicCookie and pptp version
- * - make argument list of pptp_{out,in}bound_packet() shorter
- * - move to C99 style initializers
- * - print version number at module loadtime
- * 2003-09-22 - Version 1.5
- * - use SNATed tcp sourceport as callid, since we get called before
- * TCP header is mangled (Philip Craig <philipc@snapgear.com>)
- * 2004-10-22 - Version 2.0
- * - kernel 2.6.x version
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_nat_pptp.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
-
-#define IP_NAT_PPTP_VERSION "2.0"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
-
-
-#if 0
-#include "ip_conntrack_pptp_priv.h"
-#define DEBUGP(format, args...) printk(KERN_DEBUG __FILE__ ":" __FUNCTION__ \
- ": " format, ## args)
-#else
-#define DEBUGP(format, args...)
-#endif
-
-static unsigned int
-pptp_nat_expected(struct sk_buff **pskb,
- unsigned int hooknum,
- struct ip_conntrack *ct,
- struct ip_nat_info *info)
-{
- struct ip_conntrack *master = master_ct(ct);
- struct ip_nat_multi_range mr;
- struct ip_ct_pptp_master *ct_pptp_info;
- struct ip_nat_pptp *nat_pptp_info;
- u_int32_t newip, newcid;
- int ret;
-
- IP_NF_ASSERT(info);
- IP_NF_ASSERT(master);
- IP_NF_ASSERT(!(info->initialized & (1 << HOOK2MANIP(hooknum))));
-
- DEBUGP("we have a connection!\n");
-
- LOCK_BH(&ip_pptp_lock);
- ct_pptp_info = &master->help.ct_pptp_info;
- nat_pptp_info = &master->nat.help.nat_pptp_info;
-
- /* need to alter GRE tuple because conntrack expectfn() used 'wrong'
- * (unmanipulated) values */
- if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST) {
- DEBUGP("completing tuples with NAT info \n");
- /* we can do this, since we're unconfirmed */
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.gre.key ==
- htonl(ct_pptp_info->pac_call_id)) {
- /* assume PNS->PAC */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(nat_pptp_info->pns_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(nat_pptp_info->pns_call_id);
- newip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
- newcid = htonl(nat_pptp_info->pac_call_id);
- } else {
- /* assume PAC->PNS */
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.gre.key =
- htonl(nat_pptp_info->pac_call_id);
- ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.gre.key =
- htonl(nat_pptp_info->pac_call_id);
- newip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
- newcid = htonl(nat_pptp_info->pns_call_id);
- }
- } else {
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.gre.key ==
- htonl(ct_pptp_info->pac_call_id)) {
- /* assume PNS->PAC */
- newip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
- newcid = htonl(ct_pptp_info->pns_call_id);
- }
- else {
- /* assume PAC->PNS */
- newip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
- newcid = htonl(ct_pptp_info->pac_call_id);
- }
- }
-
- mr.rangesize = 1;
- mr.range[0].flags = IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED;
- mr.range[0].min_ip = mr.range[0].max_ip = newip;
- mr.range[0].min = mr.range[0].max =
- ((union ip_conntrack_manip_proto ) { newcid });
- DEBUGP("change ip to %u.%u.%u.%u\n",
- NIPQUAD(newip));
- DEBUGP("change key to 0x%x\n", ntohl(newcid));
- ret = ip_nat_setup_info(ct, &mr, hooknum);
-
- UNLOCK_BH(&ip_pptp_lock);
-
- return ret;
-
-}
-
-/* outbound packets == from PNS to PAC */
-static inline unsigned int
-pptp_outbound_pkt(struct sk_buff **pskb,
- struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
- struct ip_conntrack_expect *exp)
-
-{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph = (void *) iph + iph->ihl*4;
- struct pptp_pkt_hdr *pptph = (struct pptp_pkt_hdr *)
- ((void *)tcph + tcph->doff*4);
-
- struct PptpControlHeader *ctlh;
- union pptp_ctrl_union *pptpReq;
- struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
- struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-
- u_int16_t msg, *cid = NULL, new_callid;
-
- /* FIXME: size checks !!! */
- ctlh = (struct PptpControlHeader *) ((void *) pptph + sizeof(*pptph));
- pptpReq = (void *) ((void *) ctlh + sizeof(*ctlh));
-
- new_callid = htons(ct_pptp_info->pns_call_id);
-
- switch (msg = ntohs(ctlh->messageType)) {
- case PPTP_OUT_CALL_REQUEST:
- cid = &pptpReq->ocreq.callID;
- /* FIXME: ideally we would want to reserve a call ID
- * here. current netfilter NAT core is not able to do
- * this :( For now we use TCP source port. This breaks
- * multiple calls within one control session */
-
- /* save original call ID in nat_info */
- nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
-
- /* don't use tcph->source since we are at a DSTmanip
- * hook (e.g. PREROUTING) and pkt is not mangled yet */
- new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
-
- /* save new call ID in ct info */
- ct_pptp_info->pns_call_id = ntohs(new_callid);
- break;
- case PPTP_IN_CALL_REPLY:
- cid = &pptpReq->icreq.callID;
- break;
- case PPTP_CALL_CLEAR_REQUEST:
- cid = &pptpReq->clrreq.callID;
- break;
- default:
- DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
- (msg <= PPTP_MSG_MAX)? strMName[msg]:strMName[0]);
- /* fall through */
-
- case PPTP_SET_LINK_INFO:
- /* only need to NAT in case PAC is behind NAT box */
- case PPTP_START_SESSION_REQUEST:
- case PPTP_START_SESSION_REPLY:
- case PPTP_STOP_SESSION_REQUEST:
- case PPTP_STOP_SESSION_REPLY:
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* no need to alter packet */
- return NF_ACCEPT;
- }
-
- IP_NF_ASSERT(cid);
-
- DEBUGP("altering call id from 0x%04x to 0x%04x\n",
- ntohs(*cid), ntohs(new_callid));
-
- /* mangle packet */
- ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, (void *)cid - (void *)pptph,
- sizeof(new_callid), (char *)&new_callid,
- sizeof(new_callid));
-
- return NF_ACCEPT;
-}
-
-/* inbound packets == from PAC to PNS */
-static inline unsigned int
-pptp_inbound_pkt(struct sk_buff **pskb,
- struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
- struct ip_conntrack_expect *oldexp)
-{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph = (void *) iph + iph->ihl*4;
- struct pptp_pkt_hdr *pptph = (struct pptp_pkt_hdr *)
- ((void *)tcph + tcph->doff*4);
-
- struct PptpControlHeader *ctlh;
- union pptp_ctrl_union *pptpReq;
- struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
- struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-
- u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL;
- u_int32_t old_dst_ip;
-
- struct ip_conntrack_tuple t, inv_t;
- struct ip_conntrack_tuple *orig_t, *reply_t;
-
- /* FIXME: size checks !!! */
- ctlh = (struct PptpControlHeader *) ((void *) pptph + sizeof(*pptph));
- pptpReq = (void *) ((void *) ctlh + sizeof(*ctlh));
-
- new_pcid = htons(nat_pptp_info->pns_call_id);
-
- switch (msg = ntohs(ctlh->messageType)) {
- case PPTP_OUT_CALL_REPLY:
- pcid = &pptpReq->ocack.peersCallID;
- cid = &pptpReq->ocack.callID;
- if (!oldexp) {
- DEBUGP("outcall but no expectation\n");
- break;
- }
- old_dst_ip = oldexp->tuple.dst.ip;
- t = oldexp->tuple;
- invert_tuplepr(&inv_t, &t);
-
- /* save original PAC call ID in nat_info */
- nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
-
- /* alter expectation */
- orig_t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- reply_t = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- if (t.src.ip == orig_t->src.ip && t.dst.ip == orig_t->dst.ip) {
- /* expectation for PNS->PAC direction */
- t.src.u.gre.key = htonl(nat_pptp_info->pns_call_id);
- t.dst.u.gre.key = htonl(ct_pptp_info->pac_call_id);
- inv_t.src.ip = reply_t->src.ip;
- inv_t.dst.ip = reply_t->dst.ip;
- inv_t.src.u.gre.key = htonl(nat_pptp_info->pac_call_id);
- inv_t.dst.u.gre.key = htonl(ct_pptp_info->pns_call_id);
- } else {
- /* expectation for PAC->PNS direction */
- t.src.u.gre.key = htonl(nat_pptp_info->pac_call_id);
- t.dst.u.gre.key = htonl(ct_pptp_info->pns_call_id);
- inv_t.src.ip = orig_t->src.ip;
- inv_t.dst.ip = orig_t->dst.ip;
- inv_t.src.u.gre.key = htonl(nat_pptp_info->pns_call_id);
- inv_t.dst.u.gre.key = htonl(ct_pptp_info->pac_call_id);
- }
-
- if (!ip_conntrack_change_expect(oldexp, &t)) {
- DEBUGP("successfully changed expect\n");
- } else {
- DEBUGP("can't change expect\n");
- }
- ip_ct_gre_keymap_change(oldexp->proto.gre.keymap_orig, &t);
- ip_ct_gre_keymap_change(oldexp->proto.gre.keymap_reply, &inv_t);
- break;
- case PPTP_IN_CALL_CONNECT:
- pcid = &pptpReq->iccon.peersCallID;
- if (!oldexp)
- break;
- old_dst_ip = oldexp->tuple.dst.ip;
- t = oldexp->tuple;
-
- /* alter expectation, no need for callID */
- if (t.dst.ip == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip) {
- /* expectation for PNS->PAC direction */
- t.src.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
- } else {
- /* expectation for PAC->PNS direction */
- t.dst.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
- }
-
- if (!ip_conntrack_change_expect(oldexp, &t)) {
- DEBUGP("successfully changed expect\n");
- } else {
- DEBUGP("can't change expect\n");
- }
- break;
- case PPTP_IN_CALL_REQUEST:
- /* only need to nat in case PAC is behind NAT box */
- break;
- case PPTP_WAN_ERROR_NOTIFY:
- pcid = &pptpReq->wanerr.peersCallID;
- break;
- case PPTP_CALL_DISCONNECT_NOTIFY:
- pcid = &pptpReq->disc.callID;
- break;
-
- default:
- DEBUGP("unknown inbound packet %s\n",
- (msg <= PPTP_MSG_MAX)? strMName[msg]:strMName[0]);
- /* fall through */
-
- case PPTP_START_SESSION_REQUEST:
- case PPTP_START_SESSION_REPLY:
- case PPTP_STOP_SESSION_REQUEST:
- case PPTP_STOP_SESSION_REPLY:
- case PPTP_ECHO_REQUEST:
- case PPTP_ECHO_REPLY:
- /* no need to alter packet */
- return NF_ACCEPT;
- }
-
- /* mangle packet */
- IP_NF_ASSERT(pcid);
- DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
- ntohs(*pcid), ntohs(new_pcid));
- ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, (void *)pcid - (void *)pptph,
- sizeof(new_pcid), (char *)&new_pcid,
- sizeof(new_pcid));
-
- if (new_cid) {
- IP_NF_ASSERT(cid);
- DEBUGP("altering call id from 0x%04x to 0x%04x\n",
- ntohs(*cid), ntohs(new_cid));
- ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
- (void *)cid - (void *)pptph,
- sizeof(new_cid), (char *)&new_cid,
- sizeof(new_cid));
- }
-
- /* great, at least we don't need to resize packets */
- return NF_ACCEPT;
-}
-
-
-static unsigned int tcp_help(struct ip_conntrack *ct,
- struct ip_conntrack_expect *exp,
- struct ip_nat_info *info,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum, struct sk_buff **pskb)
-{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph = (void *) iph + iph->ihl*4;
- unsigned int datalen = (*pskb)->len - iph->ihl*4 - tcph->doff*4;
- struct pptp_pkt_hdr *pptph;
-
- int dir;
-
- DEBUGP("entering\n");
-
- /* Only mangle things once: DST for original direction
- and SRC for reply direction. */
- dir = CTINFO2DIR(ctinfo);
- if (!((HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
- && dir == IP_CT_DIR_ORIGINAL)
- || (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST
- && dir == IP_CT_DIR_REPLY))) {
- DEBUGP("Not touching dir %s at hook %s\n",
- dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY",
- hooknum == NF_IP_POST_ROUTING ? "POSTROUTING"
- : hooknum == NF_IP_PRE_ROUTING ? "PREROUTING"
- : hooknum == NF_IP_LOCAL_OUT ? "OUTPUT"
- : hooknum == NF_IP_LOCAL_IN ? "INPUT" : "???");
- return NF_ACCEPT;
- }
-
- /* if packet is too small, just skip it */
- if (datalen < sizeof(struct pptp_pkt_hdr)+
- sizeof(struct PptpControlHeader)) {
- DEBUGP("pptp packet too short\n");
- return NF_ACCEPT;
- }
-
- pptph = (struct pptp_pkt_hdr *) ((void *)tcph + tcph->doff*4);
-
- /* if it's not a control message, we can't handle it */
- if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
- ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
- DEBUGP("not a pptp control packet\n");
- return NF_ACCEPT;
- }
-
- LOCK_BH(&ip_pptp_lock);
-
- if (dir == IP_CT_DIR_ORIGINAL) {
- /* reuqests sent by client to server (PNS->PAC) */
- pptp_outbound_pkt(pskb, ct, ctinfo, exp);
- } else {
- /* response from the server to the client (PAC->PNS) */
- pptp_inbound_pkt(pskb, ct, ctinfo, exp);
- }
-
- UNLOCK_BH(&ip_pptp_lock);
-
- return NF_ACCEPT;
-}
-
-/* nat helper struct for control connection */
-static struct ip_nat_helper pptp_tcp_helper = {
- .list = { NULL, NULL },
- .name = "pptp",
- .flags = IP_NAT_HELPER_F_ALWAYS,
- .me = THIS_MODULE,
- .tuple = { .src = { .ip = 0,
- .u = { .tcp = { .port =
- __constant_htons(PPTP_CONTROL_PORT) }
- }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = IPPROTO_TCP
- }
- },
-
- .mask = { .src = { .ip = 0,
- .u = { .tcp = { .port = 0xFFFF } }
- },
- .dst = { .ip = 0,
- .u = { .all = 0 },
- .protonum = 0xFFFF
- }
- },
- .help = tcp_help,
- .expect = pptp_nat_expected
-};
-
-
-static int __init init(void)
-{
- DEBUGP("%s: registering NAT helper\n", __FILE__);
- if (ip_nat_helper_register(&pptp_tcp_helper)) {
- printk(KERN_ERR "Unable to register NAT application helper "
- "for pptp\n");
- return -EIO;
- }
-
- printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION);
- return 0;
-}
-
-static void __exit fini(void)
-{
- DEBUGP("cleanup_module\n" );
- ip_nat_helper_unregister(&pptp_tcp_helper);
- printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION);
-}
-
-module_init(init);
-module_exit(fini);
+++ /dev/null
-/*
- * ip_nat_proto_gre.c - Version 2.0
- *
- * NAT protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2004 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/ip.h>
-#include <linux/netfilter_ipv4/ip_nat.h>
-#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
-
-#if 0
-#define DEBUGP(format, args...) printk(KERN_DEBUG __FILE__ ":" __FUNCTION__ \
- ": " format, ## args)
-#else
-#define DEBUGP(x, args...)
-#endif
-
-/* is key in given range between min and max */
-static int
-gre_in_range(const struct ip_conntrack_tuple *tuple,
- enum ip_nat_manip_type maniptype,
- const union ip_conntrack_manip_proto *min,
- const union ip_conntrack_manip_proto *max)
-{
- u_int32_t key;
-
- if (maniptype == IP_NAT_MANIP_SRC)
- key = tuple->src.u.gre.key;
- else
- key = tuple->dst.u.gre.key;
-
- return ntohl(key) >= ntohl(min->gre.key)
- && ntohl(key) <= ntohl(max->gre.key);
-}
-
-/* generate unique tuple ... */
-static int
-gre_unique_tuple(struct ip_conntrack_tuple *tuple,
- const struct ip_nat_range *range,
- enum ip_nat_manip_type maniptype,
- const struct ip_conntrack *conntrack)
-{
- u_int32_t min, i, range_size;
- u_int32_t key = 0, *keyptr;
-
- if (maniptype == IP_NAT_MANIP_SRC)
- keyptr = &tuple->src.u.gre.key;
- else
- keyptr = &tuple->dst.u.gre.key;
-
- if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
- DEBUGP("%p: NATing GRE PPTP\n", conntrack);
- min = 1;
- range_size = 0xffff;
- } else {
- min = ntohl(range->min.gre.key);
- range_size = ntohl(range->max.gre.key) - min + 1;
- }
-
- DEBUGP("min = %u, range_size = %u\n", min, range_size);
-
- for (i = 0; i < range_size; i++, key++) {
- *keyptr = htonl(min + key % range_size);
- if (!ip_nat_used_tuple(tuple, conntrack))
- return 1;
- }
-
- DEBUGP("%p: no NAT mapping\n", conntrack);
-
- return 0;
-}
-
-/* manipulate a GRE packet according to maniptype */
-static int
-gre_manip_pkt(struct sk_buff **pskb,
- unsigned int hdroff,
- const struct ip_conntrack_manip *manip,
- enum ip_nat_manip_type maniptype)
-{
- struct gre_hdr *greh;
- struct gre_hdr_pptp *pgreh;
-
- if (!skb_ip_make_writable(pskb, hdroff + sizeof(*pgreh)))
- return 0;
-
- greh = (void *)(*pskb)->data + hdroff;
- pgreh = (struct gre_hdr_pptp *) greh;
-
- /* we only have destination manip of a packet, since 'source key'
- * is not present in the packet itself */
- if (maniptype == IP_NAT_MANIP_DST) {
- /* key manipulation is always dest */
- switch (greh->version) {
- case 0:
- if (!greh->key) {
- DEBUGP("can't nat GRE w/o key\n");
- break;
- }
- if (greh->csum) {
- /* FIXME: Never tested this code... */
- *(gre_csum(greh)) =
- ip_nat_cheat_check(~*(gre_key(greh)),
- manip->u.gre.key,
- *(gre_csum(greh)));
- }
- *(gre_key(greh)) = manip->u.gre.key;
- break;
- case GRE_VERSION_PPTP:
- DEBUGP("call_id -> 0x%04x\n",
- ntohl(manip->u.gre.key));
- pgreh->call_id = htons(ntohl(manip->u.gre.key));
- break;
- default:
- DEBUGP("can't nat unknown GRE version\n");
- return 0;
- break;
- }
- }
- return 1;
-}
-
-/* print out a nat tuple */
-static unsigned int
-gre_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- unsigned int len = 0;
-
- if (mask->src.u.gre.key)
- len += sprintf(buffer + len, "srckey=0x%x ",
- ntohl(match->src.u.gre.key));
-
- if (mask->dst.u.gre.key)
- len += sprintf(buffer + len, "dstkey=0x%x ",
- ntohl(match->src.u.gre.key));
-
- return len;
-}
-
-/* print a range of keys */
-static unsigned int
-gre_print_range(char *buffer, const struct ip_nat_range *range)
-{
- if (range->min.gre.key != 0
- || range->max.gre.key != 0xFFFF) {
- if (range->min.gre.key == range->max.gre.key)
- return sprintf(buffer, "key 0x%x ",
- ntohl(range->min.gre.key));
- else
- return sprintf(buffer, "keys 0x%u-0x%u ",
- ntohl(range->min.gre.key),
- ntohl(range->max.gre.key));
- } else
- return 0;
-}
-
-/* nat helper struct */
-static struct ip_nat_protocol gre = {
- .name = "GRE",
- .protonum = IPPROTO_GRE,
- .manip_pkt = gre_manip_pkt,
- .in_range = gre_in_range,
- .unique_tuple = gre_unique_tuple,
- .print = gre_print,
- .print_range = gre_print_range
-};
-
-static int __init init(void)
-{
- if (ip_nat_protocol_register(&gre))
- return -EIO;
-
- return 0;
-}
-
-static void __exit fini(void)
-{
- ip_nat_protocol_unregister(&gre);
-}
-
-module_init(init);
-module_exit(fini);
return 1;
}
-/*
- * Fast checksum update for possibly oddly-aligned UDP byte, from the
- * code example in the draft.
- */
-static void fast_csum(unsigned char *csum,
- const unsigned char *optr,
- const unsigned char *nptr,
- int odd)
-{
- long x, old, new;
-
- x = csum[0] * 256 + csum[1];
-
- x =~ x & 0xFFFF;
-
- if (odd) old = optr[0] * 256;
- else old = optr[0];
-
- x -= old & 0xFFFF;
- if (x <= 0) {
- x--;
- x &= 0xFFFF;
- }
-
- if (odd) new = nptr[0] * 256;
- else new = nptr[0];
-
- x += new & 0xFFFF;
- if (x & 0x10000) {
- x++;
- x &= 0xFFFF;
- }
-
- x =~ x & 0xFFFF;
- csum[0] = x / 256;
- csum[1] = x & 0xFF;
-}
-
-/*
- * Mangle IP address.
- * - begin points to the start of the snmp messgae
- * - addr points to the start of the address
- */
-static inline void mangle_address(unsigned char *begin,
- unsigned char *addr,
- const struct oct1_map *map,
- u_int16_t *check)
-{
- if (map->from == NOCT1(*addr)) {
- u_int32_t old;
-
- if (debug)
- memcpy(&old, (unsigned char *)addr, sizeof(old));
-
- *addr = map->to;
-
- /* Update UDP checksum if being used */
- if (*check) {
- unsigned char odd = !((addr - begin) % 2);
-
- fast_csum((unsigned char *)check,
- &map->from, &map->to, odd);
-
- }
-
- if (debug)
- printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
- "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
- }
-}
-
static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
struct snmp_v1_trap *trap,
const struct oct1_map *map,
printk("\n");
}
+/*
+ * Fast checksum update for possibly oddly-aligned UDP byte, from the
+ * code example in the draft.
+ */
+static void fast_csum(unsigned char *csum,
+ const unsigned char *optr,
+ const unsigned char *nptr,
+ int odd)
+{
+ long x, old, new;
+
+ x = csum[0] * 256 + csum[1];
+
+ x =~ x & 0xFFFF;
+
+ if (odd) old = optr[0] * 256;
+ else old = optr[0];
+
+ x -= old & 0xFFFF;
+ if (x <= 0) {
+ x--;
+ x &= 0xFFFF;
+ }
+
+ if (odd) new = nptr[0] * 256;
+ else new = nptr[0];
+
+ x += new & 0xFFFF;
+ if (x & 0x10000) {
+ x++;
+ x &= 0xFFFF;
+ }
+
+ x =~ x & 0xFFFF;
+ csum[0] = x / 256;
+ csum[1] = x & 0xFF;
+}
+
+/*
+ * Mangle IP address.
+ * - begin points to the start of the snmp messgae
+ * - addr points to the start of the address
+ */
+static inline void mangle_address(unsigned char *begin,
+ unsigned char *addr,
+ const struct oct1_map *map,
+ u_int16_t *check)
+{
+ if (map->from == NOCT1(*addr)) {
+ u_int32_t old;
+
+ if (debug)
+ memcpy(&old, (unsigned char *)addr, sizeof(old));
+
+ *addr = map->to;
+
+ /* Update UDP checksum if being used */
+ if (*check) {
+ unsigned char odd = !((addr - begin) % 2);
+
+ fast_csum((unsigned char *)check,
+ &map->from, &map->to, odd);
+
+ }
+
+ if (debug)
+ printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
+ "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
+ }
+}
+
/*
* Parse and mangle SNMP message according to mapping.
* (And this is the fucking 'basic' method).
static int ports[MAX_PORTS];
static int ports_c = 0;
+#ifdef MODULE_PARM
MODULE_PARM(ports,"1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of tftp servers");
+#endif
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
};
static unsigned long
-fold_field(void *mib[], int offt)
+__fold_field(void *mib[], int offt)
{
unsigned long res = 0;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
- res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
+ res +=
+ *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
+ offt));
+ res +=
+ *((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) +
+ offt));
}
return res;
}
-/* snmp items */
-static struct snmp_mib snmp4_ipstats_list[] = {
- SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES),
- SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS),
- SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS),
- SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
- SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
- SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
- SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
- SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS),
- SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
- SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
- SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
- SNMP_MIB_ITEM("ReasmReqds", IPSTATS_MIB_REASMREQDS),
- SNMP_MIB_ITEM("ReasmOKs", IPSTATS_MIB_REASMOKS),
- SNMP_MIB_ITEM("ReasmFails", IPSTATS_MIB_REASMFAILS),
- SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS),
- SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS),
- SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES),
- SNMP_MIB_SENTINEL
-};
-
-static struct snmp_mib snmp4_icmp_list[] = {
- SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
- SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
- SNMP_MIB_ITEM("InDestUnreachs", ICMP_MIB_INDESTUNREACHS),
- SNMP_MIB_ITEM("InTimeExcds", ICMP_MIB_INTIMEEXCDS),
- SNMP_MIB_ITEM("InParmProbs", ICMP_MIB_INPARMPROBS),
- SNMP_MIB_ITEM("InSrcQuenchs", ICMP_MIB_INSRCQUENCHS),
- SNMP_MIB_ITEM("InRedirects", ICMP_MIB_INREDIRECTS),
- SNMP_MIB_ITEM("InEchos", ICMP_MIB_INECHOS),
- SNMP_MIB_ITEM("InEchoReps", ICMP_MIB_INECHOREPS),
- SNMP_MIB_ITEM("InTimestamps", ICMP_MIB_INTIMESTAMPS),
- SNMP_MIB_ITEM("InTimestampReps", ICMP_MIB_INTIMESTAMPREPS),
- SNMP_MIB_ITEM("InAddrMasks", ICMP_MIB_INADDRMASKS),
- SNMP_MIB_ITEM("InAddrMaskReps", ICMP_MIB_INADDRMASKREPS),
- SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS),
- SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS),
- SNMP_MIB_ITEM("OutDestUnreachs", ICMP_MIB_OUTDESTUNREACHS),
- SNMP_MIB_ITEM("OutTimeExcds", ICMP_MIB_OUTTIMEEXCDS),
- SNMP_MIB_ITEM("OutParmProbs", ICMP_MIB_OUTPARMPROBS),
- SNMP_MIB_ITEM("OutSrcQuenchs", ICMP_MIB_OUTSRCQUENCHS),
- SNMP_MIB_ITEM("OutRedirects", ICMP_MIB_OUTREDIRECTS),
- SNMP_MIB_ITEM("OutEchos", ICMP_MIB_OUTECHOS),
- SNMP_MIB_ITEM("OutEchoReps", ICMP_MIB_OUTECHOREPS),
- SNMP_MIB_ITEM("OutTimestamps", ICMP_MIB_OUTTIMESTAMPS),
- SNMP_MIB_ITEM("OutTimestampReps", ICMP_MIB_OUTTIMESTAMPREPS),
- SNMP_MIB_ITEM("OutAddrMasks", ICMP_MIB_OUTADDRMASKS),
- SNMP_MIB_ITEM("OutAddrMaskReps", ICMP_MIB_OUTADDRMASKREPS),
- SNMP_MIB_SENTINEL
-};
-
-static struct snmp_mib snmp4_tcp_list[] = {
- SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM),
- SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN),
- SNMP_MIB_ITEM("RtoMax", TCP_MIB_RTOMAX),
- SNMP_MIB_ITEM("MaxConn", TCP_MIB_MAXCONN),
- SNMP_MIB_ITEM("ActiveOpens", TCP_MIB_ACTIVEOPENS),
- SNMP_MIB_ITEM("PassiveOpens", TCP_MIB_PASSIVEOPENS),
- SNMP_MIB_ITEM("AttemptFails", TCP_MIB_ATTEMPTFAILS),
- SNMP_MIB_ITEM("EstabResets", TCP_MIB_ESTABRESETS),
- SNMP_MIB_ITEM("CurrEstab", TCP_MIB_CURRESTAB),
- SNMP_MIB_ITEM("InSegs", TCP_MIB_INSEGS),
- SNMP_MIB_ITEM("OutSegs", TCP_MIB_OUTSEGS),
- SNMP_MIB_ITEM("RetransSegs", TCP_MIB_RETRANSSEGS),
- SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS),
- SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS),
- SNMP_MIB_SENTINEL
-};
-
-static struct snmp_mib snmp4_udp_list[] = {
- SNMP_MIB_ITEM("InDatagrams", UDP_MIB_INDATAGRAMS),
- SNMP_MIB_ITEM("NoPorts", UDP_MIB_NOPORTS),
- SNMP_MIB_ITEM("InErrors", UDP_MIB_INERRORS),
- SNMP_MIB_ITEM("OutDatagrams", UDP_MIB_OUTDATAGRAMS),
- SNMP_MIB_SENTINEL
-};
+#define fold_field(_mib, _nr) __fold_field(_mib, (sizeof(unsigned long) * (_nr)))
-static struct snmp_mib snmp4_net_list[] = {
- SNMP_MIB_ITEM("SyncookiesSent", LINUX_MIB_SYNCOOKIESSENT),
- SNMP_MIB_ITEM("SyncookiesRecv", LINUX_MIB_SYNCOOKIESRECV),
- SNMP_MIB_ITEM("SyncookiesFailed", LINUX_MIB_SYNCOOKIESFAILED),
- SNMP_MIB_ITEM("EmbryonicRsts", LINUX_MIB_EMBRYONICRSTS),
- SNMP_MIB_ITEM("PruneCalled", LINUX_MIB_PRUNECALLED),
- SNMP_MIB_ITEM("RcvPruned", LINUX_MIB_RCVPRUNED),
- SNMP_MIB_ITEM("OfoPruned", LINUX_MIB_OFOPRUNED),
- SNMP_MIB_ITEM("OutOfWindowIcmps", LINUX_MIB_OUTOFWINDOWICMPS),
- SNMP_MIB_ITEM("LockDroppedIcmps", LINUX_MIB_LOCKDROPPEDICMPS),
- SNMP_MIB_ITEM("ArpFilter", LINUX_MIB_ARPFILTER),
- SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED),
- SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED),
- SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
- SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED),
- SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
- SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
- SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
- SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED),
- SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST),
- SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS),
- SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS),
- SNMP_MIB_ITEM("TCPPrequeued", LINUX_MIB_TCPPREQUEUED),
- SNMP_MIB_ITEM("TCPDirectCopyFromBacklog", LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG),
- SNMP_MIB_ITEM("TCPDirectCopyFromPrequeue", LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE),
- SNMP_MIB_ITEM("TCPPrequeueDropped", LINUX_MIB_TCPPREQUEUEDROPPED),
- SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS),
- SNMP_MIB_ITEM("TCPHPHitsToUser", LINUX_MIB_TCPHPHITSTOUSER),
- SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS),
- SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS),
- SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY),
- SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY),
- SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING),
- SNMP_MIB_ITEM("TCPFACKReorder", LINUX_MIB_TCPFACKREORDER),
- SNMP_MIB_ITEM("TCPSACKReorder", LINUX_MIB_TCPSACKREORDER),
- SNMP_MIB_ITEM("TCPRenoReorder", LINUX_MIB_TCPRENOREORDER),
- SNMP_MIB_ITEM("TCPTSReorder", LINUX_MIB_TCPTSREORDER),
- SNMP_MIB_ITEM("TCPFullUndo", LINUX_MIB_TCPFULLUNDO),
- SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
- SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
- SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
- SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
- SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
- SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
- SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
- SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES),
- SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS),
- SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS),
- SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS),
- SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS),
- SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL),
- SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL),
- SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED),
- SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED),
- SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT),
- SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
- SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
- SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
- SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
- SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
- SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
- SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
- SNMP_MIB_ITEM("TCPAbortOnTimeout", LINUX_MIB_TCPABORTONTIMEOUT),
- SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER),
- SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED),
- SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES),
- SNMP_MIB_SENTINEL
+/* snmp items */
+static struct snmp_item snmp4_ipstats_list[] = {
+#define __SNMP_GEN(x,y) SNMP_ITEM(struct ipstats_mib, x, y)
+#define SNMP_GEN(x) __SNMP_GEN(x, #x)
+ SNMP_GEN(InReceives),
+ SNMP_GEN(InHdrErrors),
+ SNMP_GEN(InAddrErrors),
+ __SNMP_GEN(OutForwDatagrams,"ForwDatagrams"), /* for backward compatibility */
+ SNMP_GEN(InUnknownProtos),
+ SNMP_GEN(InDiscards),
+ SNMP_GEN(InDelivers),
+ SNMP_GEN(OutRequests),
+ SNMP_GEN(OutDiscards),
+ SNMP_GEN(OutNoRoutes),
+ SNMP_GEN(ReasmTimeout),
+ SNMP_GEN(ReasmReqds),
+ SNMP_GEN(ReasmOKs),
+ SNMP_GEN(ReasmFails),
+ SNMP_GEN(FragOKs),
+ SNMP_GEN(FragFails),
+ SNMP_GEN(FragCreates),
+ SNMP_ITEM_SENTINEL
+#undef SNMP_GEN
};
/*
{
int i;
- seq_puts(seq, "Ip: Forwarding DefaultTTL");
+ seq_printf(seq, "Ip: Forwarding DefaultTTL");
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- fold_field((void **) ip_statistics,
- snmp4_ipstats_list[i].entry));
-
- seq_puts(seq, "\nIcmp:");
- for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_icmp_list[i].name);
-
- seq_puts(seq, "\nIcmp:");
- for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
+ __fold_field((void **) ip_statistics,
+ snmp4_ipstats_list[i].offset));
+
+ seq_printf(seq, "\nIcmp: InMsgs InErrors InDestUnreachs InTimeExcds "
+ "InParmProbs InSrcQuenchs InRedirects InEchos "
+ "InEchoReps InTimestamps InTimestampReps InAddrMasks "
+ "InAddrMaskReps OutMsgs OutErrors OutDestUnreachs "
+ "OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects "
+ "OutEchos OutEchoReps OutTimestamps OutTimestampReps "
+ "OutAddrMasks OutAddrMaskReps\nIcmp:");
+
+ for (i = 0;
+ i < offsetof(struct icmp_mib, dummy) / sizeof(unsigned long); i++)
seq_printf(seq, " %lu",
- fold_field((void **) icmp_statistics,
- snmp4_icmp_list[i].entry));
-
- seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_tcp_list[i].name);
-
- seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name != NULL; i++) {
- /* MaxConn field is signed, RFC 2012 */
- if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
- seq_printf(seq, " %ld",
- fold_field((void **) tcp_statistics,
- snmp4_tcp_list[i].entry));
+ fold_field((void **) icmp_statistics, i));
+
+ seq_printf(seq, "\nTcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens "
+ "PassiveOpens AttemptFails EstabResets CurrEstab "
+ "InSegs OutSegs RetransSegs InErrs OutRsts\nTcp:");
+
+ for (i = 0;
+ i < offsetof(struct tcp_mib, __pad) / sizeof(unsigned long); i++) {
+ if (i == (offsetof(struct tcp_mib, TcpMaxConn) / sizeof(unsigned long)))
+ /* MaxConn field is negative, RFC 2012 */
+ seq_printf(seq, " %ld",
+ fold_field((void **) tcp_statistics, i));
else
- seq_printf(seq, " %lu",
- fold_field((void **) tcp_statistics,
- snmp4_tcp_list[i].entry));
+ seq_printf(seq, " %lu",
+ fold_field((void **) tcp_statistics, i));
}
- seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_udp_list[i].name);
+ seq_printf(seq, "\nUdp: InDatagrams NoPorts InErrors OutDatagrams\n"
+ "Udp:");
- seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- fold_field((void **) udp_statistics,
- snmp4_udp_list[i].entry));
+ for (i = 0;
+ i < offsetof(struct udp_mib, __pad) / sizeof(unsigned long); i++)
+ seq_printf(seq, " %lu",
+ fold_field((void **) udp_statistics, i));
seq_putc(seq, '\n');
return 0;
{
int i;
- seq_puts(seq, "\nTcpExt:");
- for (i = 0; snmp4_net_list[i].name != NULL; i++)
- seq_printf(seq, " %s", snmp4_net_list[i].name);
-
- seq_puts(seq, "\nTcpExt:");
- for (i = 0; snmp4_net_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- fold_field((void **) net_statistics,
- snmp4_net_list[i].entry));
-
+ seq_puts(seq, "TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed"
+ " EmbryonicRsts PruneCalled RcvPruned OfoPruned"
+ " OutOfWindowIcmps LockDroppedIcmps ArpFilter"
+ " TW TWRecycled TWKilled"
+ " PAWSPassive PAWSActive PAWSEstab"
+ " DelayedACKs DelayedACKLocked DelayedACKLost"
+ " ListenOverflows ListenDrops"
+ " TCPPrequeued TCPDirectCopyFromBacklog"
+ " TCPDirectCopyFromPrequeue TCPPrequeueDropped"
+ " TCPHPHits TCPHPHitsToUser"
+ " TCPPureAcks TCPHPAcks"
+ " TCPRenoRecovery TCPSackRecovery"
+ " TCPSACKReneging"
+ " TCPFACKReorder TCPSACKReorder TCPRenoReorder"
+ " TCPTSReorder"
+ " TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo"
+ " TCPLoss TCPLostRetransmit"
+ " TCPRenoFailures TCPSackFailures TCPLossFailures"
+ " TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans"
+ " TCPTimeouts"
+ " TCPRenoRecoveryFail TCPSackRecoveryFail"
+ " TCPSchedulerFailed TCPRcvCollapsed"
+ " TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv"
+ " TCPDSACKOfoRecv"
+ " TCPAbortOnSyn TCPAbortOnData TCPAbortOnClose"
+ " TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger"
+ " TCPAbortFailed TCPMemoryPressures\n"
+ "TcpExt:");
+ for (i = 0;
+ i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long);
+ i++)
+ seq_printf(seq, " %lu",
+ fold_field((void **) net_statistics, i));
seq_putc(seq, '\n');
return 0;
}
err = -EFAULT;
kfree_skb(skb);
error:
- IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP_INC_STATS(OutDiscards);
return err;
}
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- if (flags & MSG_TRUNC)
- copied = skb->len;
done:
skb_free_datagram(sk, skb);
-out: return err ? err : copied;
+out: return err ? : copied;
}
static int raw_init(struct sock *sk)
struct proto raw_prot = {
.name = "RAW",
.close = raw_close,
- .connect = ip4_datagram_connect,
+ .connect = udp_connect,
.disconnect = udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
rth->rt_flags = flags;
+#ifdef CONFIG_NET_FASTROUTE
+ if (netdev_fastroute && !(flags&(RTCF_NAT|RTCF_MASQ|RTCF_DOREDIRECT))) {
+ struct net_device *odev = rth->u.dst.dev;
+ if (odev != dev &&
+ dev->accept_fastpath &&
+ odev->mtu >= dev->mtu &&
+ dev->accept_fastpath(dev, &rth->u.dst) == 0)
+ rth->rt_flags |= RTCF_FAST;
+ }
+#endif
+
intern:
err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
done:
static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
struct file *filp, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ size_t *lenp)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ proc_dointvec(ctl, write, filp, buffer, lenp);
rt_cache_flush(flush_delay);
return 0;
}
;
*mssp = msstab[mssind] + 1;
- NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
+ NET_INC_STATS_BH(SyncookiesSent);
return secure_tcp_syn_cookie(skb->nh.iph->saddr, skb->nh.iph->daddr,
skb->h.th->source, skb->h.th->dest,
if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
(mss = cookie_check(skb, cookie)) == 0) {
- NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
+ NET_INC_STATS_BH(SyncookiesFailed);
goto out;
}
- NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
+ NET_INC_STATS_BH(SyncookiesRecv);
req = tcp_openreq_alloc();
ret = NULL;
static
int ipv4_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int val = ipv4_devconf.forwarding;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && ipv4_devconf.forwarding != val)
inet_forward_change();
void tcp_enter_memory_pressure(void)
{
if (!tcp_memory_pressure) {
- NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
+ NET_INC_STATS(TCPMemoryPressures);
tcp_memory_pressure = 1;
}
}
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-void cleanup_rbuf(struct sock *sk, int copied)
+static void cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_opt *tp = tcp_sk(sk);
int time_to_ack = 0;
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
- NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
/* RX process wants to run with disabled BHs, though it is not
* necessary */
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
+ NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+ NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+ NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
len -= chunk;
copied += chunk;
}
*/
if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
+ NET_INC_STATS_USER(TCPAbortOnClose);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_USER(TCPAbortOnData);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
* zapping the connection.
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
+ NET_INC_STATS_BH(TCPAbortOnLinger);
} else {
int tmo = tcp_fin_time(tp);
"sockets\n");
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+ NET_INC_STATS_BH(TCPAbortOnMemory);
}
}
atomic_inc(&tcp_orphan_count);
EXPORT_SYMBOL(tcp_shutdown);
EXPORT_SYMBOL(tcp_statistics);
EXPORT_SYMBOL(tcp_timewait_cachep);
-EXPORT_SYMBOL_GPL(cleanup_rbuf);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static void init_bictcp(struct tcp_opt *tp)
-{
- tp->bictcp.cnt = 0;
-
- tp->bictcp.last_max_cwnd = 0;
- tp->bictcp.last_cwnd = 0;
- tp->bictcp.last_stamp = 0;
-}
-
/* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
{
/* This exciting event is worth to be remembered. 8) */
if (ts)
- NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
+ NET_INC_STATS_BH(TCPTSReorder);
else if (IsReno(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
+ NET_INC_STATS_BH(TCPRenoReorder);
else if (IsFack(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
+ NET_INC_STATS_BH(TCPFACKReorder);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
+ NET_INC_STATS_BH(TCPSACKReorder);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->sack_ok, tp->ca_state,
if (before(start_seq, ack)) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+ NET_INC_STATS_BH(TCPDSACKRecv);
} else if (num_sacks > 1 &&
!after(end_seq, ntohl(sp[1].end_seq)) &&
!before(start_seq, ntohl(sp[1].start_seq))) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+ NET_INC_STATS_BH(TCPDSACKOfoRecv);
}
/* D-SACK for already forgotten data...
tp->lost_out++;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
flag |= FLAG_DATA_SACKED;
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
+ NET_INC_STATS_BH(TCPLostRetransmit);
}
}
}
tcp_set_ca_state(tp, TCP_CA_Loss);
tp->high_seq = tp->frto_highmark;
TCP_ECN_queue_cwr(tp);
-
- init_bictcp(tp);
}
void tcp_clear_retrans(struct tcp_opt *tp)
*/
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
+ NET_INC_STATS_BH(TCPSACKReneging);
tcp_enter_loss(sk, 1);
tp->retransmits++;
DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(tp, 1);
if (tp->ca_state == TCP_CA_Loss)
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+ NET_INC_STATS_BH(TCPLossUndo);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
+ NET_INC_STATS_BH(TCPFullUndo);
tp->undo_marker = 0;
}
if (tp->snd_una == tp->high_seq && IsReno(tp)) {
DBGUNDO(sk, tp, "D-SACK");
tcp_undo_cwr(tp, 1);
tp->undo_marker = 0;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
+ NET_INC_STATS_BH(TCPDSACKUndo);
}
}
DBGUNDO(sk, tp, "Hoe");
tcp_undo_cwr(tp, 0);
- NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
+ NET_INC_STATS_BH(TCPPartialUndo);
/* So... Do not make Hoe's retransmit yet.
* If the first packet was delayed, the rest
tp->lost_out = 0;
tp->left_out = tp->sacked_out;
tcp_undo_cwr(tp, 1);
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+ NET_INC_STATS_BH(TCPLossUndo);
tp->retransmits = 0;
tp->undo_marker = 0;
if (!IsReno(tp))
tp->ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
+ NET_INC_STATS_BH(TCPLoss);
}
/* D. Synchronize left_out to current state. */
/* Otherwise enter Recovery state */
if (IsReno(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
+ NET_INC_STATS_BH(TCPRenoRecovery);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
+ NET_INC_STATS_BH(TCPSackRecovery);
tp->high_seq = tp->snd_nxt;
tp->prior_ssthresh = 0;
if (!sysctl_tcp_bic)
return tp->snd_cwnd;
- if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
- (s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5))
- return tp->bictcp.cnt;
-
+ if (tp->bictcp.last_cwnd == tp->snd_cwnd)
+ return tp->bictcp.cnt; /* same cwnd, no update */
+
tp->bictcp.last_cwnd = tp->snd_cwnd;
- tp->bictcp.last_stamp = tcp_time_stamp;
/* start off normal */
if (tp->snd_cwnd <= sysctl_tcp_bic_low_window)
tcp_westwood_fast_bw(sk, skb);
flag |= FLAG_WIN_UPDATE;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
+ NET_INC_STATS_BH(TCPHPAcks);
} else {
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
- NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
+ NET_INC_STATS_BH(TCPPureAcks);
flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
{
if (tp->sack_ok && sysctl_tcp_dsack) {
if (before(seq, tp->rcv_nxt))
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
+ NET_INC_STATS_BH(TCPDSACKOldSent);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
+ NET_INC_STATS_BH(TCPDSACKOfoSent);
tp->dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+ NET_INC_STATS_BH(DelayedACKLost);
tcp_enter_quickack_mode(tp);
if (tp->sack_ok && sysctl_tcp_dsack) {
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
/* A retransmit, 2nd most common case. Force an immediate ack. */
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+ NET_INC_STATS_BH(DelayedACKLost);
tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window:
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+ NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
continue;
}
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+ NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
return;
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
- NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
+ NET_INC_STATS_BH(PruneCalled);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp);
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
- NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED,
+ NET_ADD_STATS_BH(OfoPruned,
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue);
* drop receive data on the floor. It will get retransmitted
* and hopefully then we'll have sufficient space.
*/
- NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
+ NET_INC_STATS_BH(RcvPruned);
/* Massive buffer overcommit. */
tp->pred_flags = 0;
tcp_data_snd_check(sk);
return 0;
} else { /* Header too small */
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
}
} else {
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
+ NET_INC_STATS_BH(TCPHPHitsToUser);
eaten = 1;
}
}
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
+ NET_INC_STATS_BH(TCPHPHits);
/* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len);
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
tcp_send_dupack(sk, skb);
goto discard;
}
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+ TCP_INC_STATS_BH(TcpInErrs);
+ NET_INC_STATS_BH(TCPAbortOnSyn);
tcp_reset(sk);
return 1;
}
return 0;
csum_error:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
discard:
__kfree_skb(skb);
if (tp->saw_tstamp && tp->rcv_tsecr &&
!between(tp->rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
+ NET_INC_STATS_BH(PAWSActiveRejected);
goto reset_and_undo;
}
return 1;
init_westwood(sk);
- init_bictcp(tp);
/* Now we have several options: In theory there is
* nothing else in the frame. KA9Q has an option to
case TCP_SYN_SENT:
init_westwood(sk);
- init_bictcp(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
if (queued >= 0)
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
tcp_send_dupack(sk, skb);
goto discard;
}
* Check for a SYN in window.
*/
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+ NET_INC_STATS_BH(TCPAbortOnSyn);
tcp_reset(sk);
return 1;
}
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_BH(TCPAbortOnData);
return 1;
}
if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_BH(TCPAbortOnData);
tcp_reset(sk);
return 1;
}
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/vserver/debug.h>
+
+#include <linux/vs_base.h>
extern int sysctl_ip_dynaddr;
int sysctl_tcp_tw_reuse;
if (twp) {
*twp = tw;
- NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(TimeWaitRecycled);
} else if (tw) {
/* Silly. Should hash-dance instead... */
tcp_tw_deschedule(tw);
- NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(TimeWaitRecycled);
tcp_tw_put(tw);
}
int err;
if (skb->len < (iph->ihl << 2) + 8) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
th->source, tcp_v4_iif(skb));
if (!sk) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+ NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->sk_state == TCP_CLOSE)
goto out;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS(OutOfWindowIcmps);
goto out;
}
BUG_TRAP(!req->sk);
if (seq != req->snt_isn) {
- NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
It can f.e. if SYNs crossed.
*/
if (!sock_owned_by_user(sk)) {
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
sk->sk_err = err;
sk->sk_error_report(sk);
ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
- TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
+ TCP_INC_STATS_BH(TcpOutSegs);
+ TCP_INC_STATS_BH(TcpOutRsts);
}
/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TcpOutSegs);
}
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
.dport = req->rmt_port } } };
if (ip_route_output_flow(&rt, &fl, sk, 0)) {
- IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS_BH(OutNoRoutes);
return NULL;
}
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
ip_rt_put(rt);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS_BH(OutNoRoutes);
return NULL;
}
return &rt->u.dst;
if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
+ NET_INC_STATS_BH(PAWSPassiveRejected);
dst_release(dst);
goto drop_and_free;
}
drop_and_free:
tcp_openreq_free(req);
drop:
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
return 0;
}
return newsk;
exit_overflow:
- NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+ NET_INC_STATS_BH(ListenOverflows);
exit:
- NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+ NET_INC_STATS_BH(ListenDrops);
dst_release(dst);
return NULL;
}
return 0;
csum_err:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
}
goto discard_it;
/* Count it even if it's bad */
- TCP_INC_STATS_BH(TCP_MIB_INSEGS);
+ TCP_INC_STATS_BH(TcpInSegs);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
* packet.
*/
if (inet_stream_ops.bind != inet_bind &&
- (int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
+ (int) sk->sk_xid >= 0 && sk->sk_xid != skb->xid)
goto discard_it;
if (sk->sk_state == TCP_TIME_WAIT)
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
} else {
tcp_v4_send_reset(skb);
}
}
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
tcp_tw_put((struct tcp_tw_bucket *) sk);
goto discard_it;
}
if (tp->bind_hash)
tcp_put_port(sk);
- /*
- * If sendmsg cached page exists, toss it.
- */
- if (sk->sk_sndmsg_page) {
- __free_page(sk->sk_sndmsg_page);
- sk->sk_sndmsg_page = NULL;
- }
-
atomic_dec(&tcp_sockets_allocated);
return 0;
req = req->dl_next;
while (1) {
while (req) {
- vxdprintk(VXD_CBIT(net, 6),
- "sk,req: %p [#%d] (from %d)",
- req->sk, req->sk->sk_xid, current->xid);
if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (req->class->family == st->family) {
sk = sk_next(sk);
get_sk:
sk_for_each_from(sk, node) {
- vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
- sk, sk->sk_xid, current->xid);
if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (sk->sk_family == st->family) {
read_lock(&tcp_ehash[st->bucket].lock);
sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
- vxdprintk(VXD_CBIT(net, 6),
- "sk,egf: %p [#%d] (from %d)",
- sk, sk->sk_xid, current->xid);
if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (sk->sk_family != st->family)
st->state = TCP_SEQ_STATE_TIME_WAIT;
tw_for_each(tw, node,
&tcp_ehash[st->bucket + tcp_ehash_size].chain) {
- vxdprintk(VXD_CBIT(net, 6),
- "tw: %p [#%d] (from %d)",
- tw, tw->tw_xid, current->xid);
if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
continue;
if (tw->tw_family != st->family)
tw = cur;
tw = tw_next(tw);
get_tw:
- while (tw && (tw->tw_family != st->family ||
- !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
+ while (tw && tw->tw_family != st->family &&
+ !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH)) {
tw = tw_next(tw);
}
if (tw) {
sk = sk_next(sk);
sk_for_each_from(sk, node) {
- vxdprintk(VXD_CBIT(net, 6),
- "sk,egn: %p [#%d] (from %d)",
- sk, sk->sk_xid, current->xid);
if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
continue;
if (sk->sk_family == st->family)
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
-#include <linux/vs_limit.h>
#include <linux/vs_socket.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
}
if (paws_reject)
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
if(!th->rst) {
/* In this case we must reset the TIMEWAIT timer.
}
tcp_tw_count -= killed;
- NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
+ NET_ADD_STATS_BH(TimeWaited, killed);
return ret;
}
out:
if ((tcp_tw_count -= killed) == 0)
del_timer(&tcp_tw_timer);
- NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
+ NET_ADD_STATS_BH(TimeWaitKilled, killed);
spin_unlock(&tw_death_lock);
}
if ((filter = newsk->sk_filter) != NULL)
sk_filter_charge(newsk, filter);
- if (sk->sk_create_child)
- sk->sk_create_child(sk, newsk);
-
if (unlikely(xfrm_sk_clone_policy(newsk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newtp->snd_cwnd = 2;
newtp->snd_cwnd_cnt = 0;
+ newtp->bictcp.cnt = 0;
+ newtp->bictcp.last_max_cwnd = newtp->bictcp.last_cwnd = 0;
+
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
newsk->sk_xid = sk->sk_xid;
- vx_sock_inc(newsk);
set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
newsk->sk_nid = sk->sk_nid;
#ifdef INET_REFCNT_DEBUG
newsk->sk_no_largesend = 1;
tcp_vegas_init(newtp);
- TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
+ TCP_INC_STATS_BH(TcpPassiveOpens);
}
return newsk;
}
if (!(flg & TCP_FLAG_RST))
req->class->send_ack(skb, req);
if (paws_reject)
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
return NULL;
}
}
embryonic_reset:
- NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
+ NET_INC_STATS_BH(EmbryonicRsts);
if (!(flg & TCP_FLAG_RST))
req->class->send_reset(skb);
tp->rcv_wnd = new_win;
tp->rcv_wup = tp->rcv_nxt;
- /* Make sure we do not exceed the maximum possible
- * scaled window.
- */
- if (!tp->rcv_wscale)
- new_win = min(new_win, MAX_TCP_WINDOW);
- else
- new_win = min(new_win, (65535U << tp->rcv_wscale));
-
/* RFC1323 scaling applied */
new_win >>= tp->rcv_wscale;
if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, skb, sk);
- TCP_INC_STATS(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(TcpOutSegs);
err = tp->af_specific->queue_xmit(skb, 0);
if (err <= 0)
if (err == 0) {
/* Update global TCP statistics. */
- TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
+ TCP_INC_STATS(TcpRetransSegs);
#if FASTRETRANS_DEBUG > 0
if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
if (tcp_retransmit_skb(sk, skb))
return;
if (tp->ca_state != TCP_CA_Loss)
- NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
+ NET_INC_STATS_BH(TCPFastRetrans);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
+ NET_INC_STATS_BH(TCPSlowStartRetrans);
if (skb ==
skb_peek(&sk->sk_write_queue))
if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
- NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
+ NET_INC_STATS_BH(TCPForwardRetrans);
}
}
/* NOTE: No TCP options attached and we never retransmit this. */
skb = alloc_skb(MAX_TCP_HEADER, priority);
if (!skb) {
- NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
+ NET_INC_STATS(TCPAbortFailed);
return;
}
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb))
- NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
+ NET_INC_STATS(TCPAbortFailed);
}
/* WARNING: This routine must only be called when we have already sent
skb->csum = 0;
th->doff = (tcp_header_size >> 2);
- TCP_INC_STATS(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(TcpOutSegs);
return skb;
}
sk_charge_skb(sk, buff);
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
- TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
+ TCP_INC_STATS(TcpActiveOpens);
/* Timer for repeating the SYN until an answer. */
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
sk->sk_error_report(sk);
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
+ NET_INC_STATS_BH(TCPAbortOnTimeout);
}
/* Do not allow orphaned sockets to eat all our resources.
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+ NET_INC_STATS_BH(TCPAbortOnMemory);
return 1;
}
return 0;
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
+ NET_INC_STATS_BH(DelayedACKLocked);
sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
- NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
- skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_BH(TCPSchedulerFailed,
+ skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);
tp->ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
+ NET_INC_STATS_BH(DelayedACKs);
}
TCP_CHECK_TIMER(sk);
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->sack_ok) {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
+ NET_INC_STATS_BH(TCPSackRecoveryFail);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
+ NET_INC_STATS_BH(TCPSackFailures);
} else {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
+ NET_INC_STATS_BH(TCPRenoRecoveryFail);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
+ NET_INC_STATS_BH(TCPRenoFailures);
}
} else if (tp->ca_state == TCP_CA_Loss) {
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
+ NET_INC_STATS_BH(TCPLossFailures);
} else {
- NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
+ NET_INC_STATS_BH(TCPTimeouts);
}
}
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
+#include <linux/vs_base.h>
/*
* Snmp MIB for the UDP layer
sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
if (sk == NULL) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return; /* No socket for error */
}
if (free)
kfree(ipc.opt);
if (!err) {
- UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
+ UDP_INC_STATS_USER(UdpOutDatagrams);
return len;
}
return err;
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
-
err = copied;
- if (flags & MSG_TRUNC)
- err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
return err;
csum_copy_err:
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
/* Clear queue. */
if (flags&MSG_PEEK) {
goto try_again;
}
+int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_opt *inet = inet_sk(sk);
+ struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
+ struct rtable *rt;
+ u32 saddr;
+ int oif;
+ int err;
+
+
+ if (addr_len < sizeof(*usin))
+ return -EINVAL;
+
+ if (usin->sin_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ sk_dst_reset(sk);
+
+ oif = sk->sk_bound_dev_if;
+ saddr = inet->saddr;
+ if (MULTICAST(usin->sin_addr.s_addr)) {
+ if (!oif)
+ oif = inet->mc_index;
+ if (!saddr)
+ saddr = inet->mc_addr;
+ }
+ err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
+ RT_CONN_FLAGS(sk), oif,
+ IPPROTO_UDP,
+ inet->sport, usin->sin_port, sk);
+ if (err)
+ return err;
+ if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
+ ip_rt_put(rt);
+ return -EACCES;
+ }
+ if (!inet->saddr)
+ inet->saddr = rt->rt_src; /* Update source address */
+ if (!inet->rcv_saddr)
+ inet->rcv_saddr = rt->rt_src;
+ inet->daddr = rt->rt_dst;
+ inet->dport = usin->sin_port;
+ sk->sk_state = TCP_ESTABLISHED;
+ inet->id = jiffies;
+
+ sk_dst_set(sk, &rt->u.dst);
+ return(0);
+}
int udp_disconnect(struct sock *sk, int flags)
{
} else
/* Must be an IKE packet.. pass it through */
return 1;
- break;
+
case UDP_ENCAP_ESPINUDP_NON_IKE:
/* Check if this is a keepalive packet. If so, eat it. */
if (len == 1 && udpdata[0] == 0xff) {
} else
/* Must be an IKE packet.. pass it through */
return 1;
- break;
}
/* At this point we are sure that this is an ESPinUDP packet,
if (ret < 0) {
/* process the ESP packet */
ret = xfrm4_rcv_encap(skb, up->encap_type);
- UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+ UDP_INC_STATS_BH(UdpInDatagrams);
return -ret;
}
/* FALLTHROUGH -- it's a UDP Packet */
if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (__udp_checksum_complete(skb)) {
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return -1;
}
}
if (sock_queue_rcv_skb(sk,skb)<0) {
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return -1;
}
- UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+ UDP_INC_STATS_BH(UdpInDatagrams);
return 0;
}
if (udp_checksum_complete(skb))
goto csum_error;
- UDP_INC_STATS_BH(UDP_MIB_NOPORTS);
+ UDP_INC_STATS_BH(UdpNoPorts);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
NIPQUAD(daddr),
ntohs(uh->dest)));
no_header:
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
ntohs(uh->dest),
ulen));
drop:
- UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
}
struct proto udp_prot = {
.name = "UDP",
.close = udp_close,
- .connect = ip4_datagram_connect,
+ .connect = udp_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udp_destroy_sock,
}
#endif /* CONFIG_PROC_FS */
+EXPORT_SYMBOL(udp_connect);
EXPORT_SYMBOL(udp_disconnect);
EXPORT_SYMBOL(udp_hash);
EXPORT_SYMBOL(udp_hash_lock);
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
-#include <net/icmp.h>
/* Add encapsulation header.
*
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
}
-static int xfrm4_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
- struct dst_entry *dst;
- struct iphdr *iph = skb->nh.iph;
-
- if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
- goto out;
-
- IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
-
- if (!(iph->frag_off & htons(IP_DF)))
- goto out;
-
- dst = skb->dst;
- mtu = dst_pmtu(dst) - dst->header_len - dst->trailer_len;
- if (skb->len > mtu) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- ret = -EMSGSIZE;
- }
-out:
- return ret;
-}
-
int xfrm4_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
proto == x->id.proto &&
saddr->a4 == x->props.saddr.a4 &&
reqid == x->props.reqid &&
- x->km.state == XFRM_STATE_ACQ &&
- !x->id.spi) {
+ x->km.state == XFRM_STATE_ACQ) {
+ if (!x0)
+ x0 = x;
+ if (x->id.spi)
+ continue;
x0 = x;
break;
}
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include <net/ip.h>
-#include <net/protocol.h>
+#include <net/icmp.h>
+#include <net/inet_ecn.h>
+
+int xfrm4_tunnel_check_size(struct sk_buff *skb)
+{
+ int mtu, ret = 0;
+ struct dst_entry *dst;
+ struct iphdr *iph = skb->nh.iph;
+
+ if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
+ goto out;
+
+ IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+
+ if (!(iph->frag_off & htons(IP_DF)))
+ goto out;
+
+ dst = skb->dst;
+ mtu = dst_pmtu(dst) - dst->header_len - dst->trailer_len;
+ if (skb->len > mtu) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ ret = -EMSGSIZE;
+ }
+out:
+ return ret;
+}
static int ipip_output(struct sk_buff **pskb)
{
config IPV6_PRIVACY
bool "IPv6: Privacy Extensions (RFC 3041) support"
depends on IPV6
+ select CRYPTO
+ select CRYPTO_MD5
---help---
Privacy Extensions for Stateless Address Autoconfiguration in IPv6
support. With this option, additional periodically-alter
ip6_flowlabel.o ipv6_syms.o
ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
- xfrm6_tunnel.o xfrm6_output.o
+ xfrm6_tunnel.o
ipv6-objs += $(ipv6-y)
obj-$(CONFIG_INET6_AH) += ah6.o
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPV6;
p.iph.ttl = 64;
- ifr.ifr_ifru.ifru_data = (void __user *)&p;
+ ifr.ifr_ifru.ifru_data = (void*)&p;
oldfs = get_fs(); set_fs(KERNEL_DS);
err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
static
int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val && valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev = NULL;
.flags = INET_PROTOSW_REUSE,
};
+#define INETSW6_ARRAY_LEN (sizeof(inetsw6_array) / sizeof(struct inet_protosw))
+
void
inet6_register_protosw(struct inet_protosw *p)
{
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/ah.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
-#include <linux/string.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
return 0;
}
-/**
- * ipv6_rearrange_rthdr - rearrange IPv6 routing header
- * @iph: IPv6 header
- * @rthdr: routing header
- *
- * Rearrange the destination address in @iph and the addresses in @rthdr
- * so that they appear in the order they will at the final destination.
- * See Appendix A2 of RFC 2402 for details.
- */
-static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
+static int ipv6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir)
{
- int segments, segments_left;
- struct in6_addr *addrs;
- struct in6_addr final_addr;
-
- segments_left = rthdr->segments_left;
- if (segments_left == 0)
- return;
- rthdr->segments_left = 0;
-
- /* The value of rthdr->hdrlen has been verified either by the system
- * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
- * packets. So we can assume that it is even and that segments is
- * greater than or equal to segments_left.
- *
- * For the same reason we can assume that this option is of type 0.
- */
- segments = rthdr->hdrlen >> 1;
+ u16 offset = sizeof(struct ipv6hdr);
+ struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ unsigned int packet_len = skb->tail - skb->nh.raw;
+ u8 nexthdr = skb->nh.ipv6h->nexthdr;
+ u8 nextnexthdr = 0;
- addrs = ((struct rt0_hdr *)rthdr)->addr;
- ipv6_addr_copy(&final_addr, addrs + segments - 1);
+ *nh_offset = ((unsigned char *)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
- addrs += segments - segments_left;
- memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
+ while (offset + 1 <= packet_len) {
- ipv6_addr_copy(addrs, &iph->daddr);
- ipv6_addr_copy(&iph->daddr, &final_addr);
-}
-
-static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
-{
- union {
- struct ipv6hdr *iph;
- struct ipv6_opt_hdr *opth;
- struct ipv6_rt_hdr *rth;
- char *raw;
- } exthdr = { .iph = iph };
- char *end = exthdr.raw + len;
- int nexthdr = iph->nexthdr;
-
- exthdr.iph++;
-
- while (exthdr.raw < end) {
switch (nexthdr) {
+
case NEXTHDR_HOP:
- case NEXTHDR_DEST:
- if (!zero_out_mutable_opts(exthdr.opth)) {
- LIMIT_NETDEBUG(printk(
- KERN_WARNING "overrun %sopts\n",
- nexthdr == NEXTHDR_HOP ?
- "hop" : "dest"));
- return -EINVAL;
+ *nh_offset = offset;
+ offset += ipv6_optlen(exthdr);
+ if (!zero_out_mutable_opts(exthdr)) {
+ LIMIT_NETDEBUG(
+ printk(KERN_WARNING "overrun hopopts\n"));
+ return 0;
}
+ nexthdr = exthdr->nexthdr;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_ROUTING:
- ipv6_rearrange_rthdr(iph, exthdr.rth);
+ *nh_offset = offset;
+ offset += ipv6_optlen(exthdr);
+ ((struct ipv6_rt_hdr*)exthdr)->segments_left = 0;
+ nexthdr = exthdr->nexthdr;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ break;
+
+ case NEXTHDR_DEST:
+ *nh_offset = offset;
+ offset += ipv6_optlen(exthdr);
+ if (!zero_out_mutable_opts(exthdr)) {
+ LIMIT_NETDEBUG(
+ printk(KERN_WARNING "overrun destopt\n"));
+ return 0;
+ }
+ nexthdr = exthdr->nexthdr;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
+ case NEXTHDR_AUTH:
+ if (dir == XFRM_POLICY_OUT) {
+ memset(((struct ipv6_auth_hdr*)exthdr)->auth_data, 0,
+ (((struct ipv6_auth_hdr*)exthdr)->hdrlen - 1) << 2);
+ }
+ if (exthdr->nexthdr == NEXTHDR_DEST) {
+ offset += (((struct ipv6_auth_hdr*)exthdr)->hdrlen + 2) << 2;
+ exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ nextnexthdr = exthdr->nexthdr;
+ if (!zero_out_mutable_opts(exthdr)) {
+ LIMIT_NETDEBUG(
+ printk(KERN_WARNING "overrun destopt\n"));
+ return 0;
+ }
+ }
+ return nexthdr;
default :
- return 0;
+ return nexthdr;
}
-
- nexthdr = exthdr.opth->nexthdr;
- exthdr.raw += ipv6_optlen(exthdr.opth);
}
- return 0;
+ return nexthdr;
}
int ah6_output(struct sk_buff **pskb)
{
int err;
- int extlen;
+ int hdr_len = sizeof(struct ipv6hdr);
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *top_iph;
+ struct ipv6hdr *iph = NULL;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
+ u16 nh_offset = 0;
u8 nexthdr;
- char tmp_base[8];
- struct {
- struct in6_addr daddr;
- char hdrs[0];
- } *tmp_ext;
- top_iph = (struct ipv6hdr *)(*pskb)->data;
- top_iph->payload_len = htons((*pskb)->len - sizeof(*top_iph));
+ if ((*pskb)->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(pskb, 0);
+ if (err)
+ goto error_nolock;
+ }
- nexthdr = *(*pskb)->nh.raw;
- *(*pskb)->nh.raw = IPPROTO_AH;
+ spin_lock_bh(&x->lock);
+ err = xfrm_state_check(x, *pskb);
+ if (err)
+ goto error;
- /* When there are no extension headers, we only need to save the first
- * 8 bytes of the base IP header.
- */
- memcpy(tmp_base, top_iph, sizeof(tmp_base));
-
- tmp_ext = NULL;
- extlen = (*pskb)->h.raw - (unsigned char *)(top_iph + 1);
- if (extlen) {
- extlen += sizeof(*tmp_ext);
- tmp_ext = kmalloc(extlen, GFP_ATOMIC);
- if (!tmp_ext) {
+ if (x->props.mode) {
+ err = xfrm6_tunnel_check_size(*pskb);
+ if (err)
+ goto error;
+
+ iph = (*pskb)->nh.ipv6h;
+ (*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
+ (*pskb)->nh.ipv6h->version = 6;
+ (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ (*pskb)->nh.ipv6h->nexthdr = IPPROTO_AH;
+ ipv6_addr_copy(&(*pskb)->nh.ipv6h->saddr,
+ (struct in6_addr *) &x->props.saddr);
+ ipv6_addr_copy(&(*pskb)->nh.ipv6h->daddr,
+ (struct in6_addr *) &x->id.daddr);
+ ah = (struct ip_auth_hdr*)((*pskb)->nh.ipv6h+1);
+ ah->nexthdr = IPPROTO_IPV6;
+ } else {
+ hdr_len = (*pskb)->h.raw - (*pskb)->nh.raw;
+ iph = kmalloc(hdr_len, GFP_ATOMIC);
+ if (!iph) {
err = -ENOMEM;
goto error;
}
- memcpy(tmp_ext, &top_iph->daddr, extlen);
- err = ipv6_clear_mutable_options(top_iph,
- extlen - sizeof(*tmp_ext) +
- sizeof(*top_iph));
- if (err)
+ memcpy(iph, (*pskb)->data, hdr_len);
+ (*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
+ memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
+ nexthdr = ipv6_clear_mutable_options(*pskb, &nh_offset, XFRM_POLICY_OUT);
+ if (nexthdr == 0)
goto error_free_iph;
- }
- ah = (struct ip_auth_hdr *)(*pskb)->h.raw;
- ah->nexthdr = nexthdr;
+ (*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
+ (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ ah = (struct ip_auth_hdr*)((*pskb)->nh.raw+hdr_len);
+ (*pskb)->h.raw = (unsigned char*) ah;
+ ah->nexthdr = nexthdr;
+ }
- top_iph->priority = 0;
- top_iph->flow_lbl[0] = 0;
- top_iph->flow_lbl[1] = 0;
- top_iph->flow_lbl[2] = 0;
- top_iph->hop_limit = 0;
+ (*pskb)->nh.ipv6h->priority = 0;
+ (*pskb)->nh.ipv6h->flow_lbl[0] = 0;
+ (*pskb)->nh.ipv6h->flow_lbl[1] = 0;
+ (*pskb)->nh.ipv6h->flow_lbl[2] = 0;
+ (*pskb)->nh.ipv6h->hop_limit = 0;
ahp = x->data;
ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) +
ah->seq_no = htonl(++x->replay.oseq);
ahp->icv(ahp, *pskb, ah->auth_data);
- err = 0;
-
- memcpy(top_iph, tmp_base, sizeof(tmp_base));
- if (tmp_ext) {
- memcpy(&top_iph->daddr, tmp_ext, extlen);
-error_free_iph:
- kfree(tmp_ext);
+ if (x->props.mode) {
+ (*pskb)->nh.ipv6h->hop_limit = iph->hop_limit;
+ (*pskb)->nh.ipv6h->priority = iph->priority;
+ (*pskb)->nh.ipv6h->flow_lbl[0] = iph->flow_lbl[0];
+ (*pskb)->nh.ipv6h->flow_lbl[1] = iph->flow_lbl[1];
+ (*pskb)->nh.ipv6h->flow_lbl[2] = iph->flow_lbl[2];
+ if (x->props.flags & XFRM_STATE_NOECN)
+ IP6_ECN_clear((*pskb)->nh.ipv6h);
+ } else {
+ memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
+ (*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
+ (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ kfree (iph);
}
+ (*pskb)->nh.raw = (*pskb)->data;
+
+ x->curlft.bytes += (*pskb)->len;
+ x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
+ if (((*pskb)->dst = dst_pop(dst)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ return NET_XMIT_BYPASS;
+error_free_iph:
+ kfree(iph);
error:
+ spin_unlock_bh(&x->lock);
+error_nolock:
+ kfree_skb(*pskb);
return err;
}
* Before process AH
* [IPv6][Ext1][Ext2][AH][Dest][Payload]
* |<-------------->| hdr_len
+ * |<------------------------>| cleared_hlen
*
* To erase AH:
* Keeping copy of cleared headers. After AH processing,
unsigned char *tmp_hdr = NULL;
u16 hdr_len;
u16 ah_hlen;
- int nexthdr;
+ u16 cleared_hlen;
+ u16 nh_offset = 0;
+ u8 nexthdr = 0;
+ u8 *prevhdr;
if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
goto out;
goto out;
hdr_len = skb->data - skb->nh.raw;
+ cleared_hlen = hdr_len;
ah = (struct ipv6_auth_hdr*)skb->data;
ahp = x->data;
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
+ cleared_hlen += ah_hlen;
+
+ if (nexthdr == NEXTHDR_DEST) {
+ struct ipv6_opt_hdr *dsthdr = (struct ipv6_opt_hdr*)(skb->data + ah_hlen);
+ cleared_hlen += ipv6_optlen(dsthdr);
+ }
if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
if (!pskb_may_pull(skb, ah_hlen))
goto out;
- tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
+ tmp_hdr = kmalloc(cleared_hlen, GFP_ATOMIC);
if (!tmp_hdr)
goto out;
- memcpy(tmp_hdr, skb->nh.raw, hdr_len);
- if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
- goto out;
+ memcpy(tmp_hdr, skb->nh.raw, cleared_hlen);
+ ipv6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_IN);
skb->nh.ipv6h->priority = 0;
skb->nh.ipv6h->flow_lbl[0] = 0;
skb->nh.ipv6h->flow_lbl[1] = 0;
skb->nh.raw = skb_pull(skb, ah_hlen);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
+ if (nexthdr == NEXTHDR_DEST) {
+ memcpy(skb->nh.raw + hdr_len,
+ tmp_hdr + hdr_len + ah_hlen,
+ cleared_hlen - hdr_len - ah_hlen);
+ }
+ prevhdr = (u8*)(skb->nh.raw + nh_offset);
+ *prevhdr = nexthdr;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb_pull(skb, hdr_len);
skb->h.raw = skb->data;
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
-#include <net/ip6_route.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
- struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
- struct inet_opt *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr;
- struct dst_entry *dst;
- struct flowi fl;
- struct ip6_flowlabel *flowlabel = NULL;
- int addr_type;
- int err;
-
- if (usin->sin6_family == AF_INET) {
- if (__ipv6_only_sock(sk))
- return -EAFNOSUPPORT;
- err = ip4_datagram_connect(sk, uaddr, addr_len);
- goto ipv4_connected;
- }
-
- if (addr_len < SIN6_LEN_RFC2133)
- return -EINVAL;
-
- if (usin->sin6_family != AF_INET6)
- return -EAFNOSUPPORT;
-
- memset(&fl, 0, sizeof(fl));
- if (np->sndflow) {
- fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
- if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
- flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
- if (flowlabel == NULL)
- return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
- }
- }
-
- addr_type = ipv6_addr_type(&usin->sin6_addr);
-
- if (addr_type == IPV6_ADDR_ANY) {
- /*
- * connect to self
- */
- usin->sin6_addr.s6_addr[15] = 0x01;
- }
-
- daddr = &usin->sin6_addr;
-
- if (addr_type == IPV6_ADDR_MAPPED) {
- struct sockaddr_in sin;
-
- if (__ipv6_only_sock(sk)) {
- err = -ENETUNREACH;
- goto out;
- }
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = daddr->s6_addr32[3];
- sin.sin_port = usin->sin6_port;
-
- err = ip4_datagram_connect(sk,
- (struct sockaddr*) &sin,
- sizeof(sin));
-
-ipv4_connected:
- if (err)
- goto out;
-
- ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
-
- if (ipv6_addr_any(&np->saddr)) {
- ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
- inet->saddr);
- }
-
- if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
- inet->rcv_saddr);
- }
- goto out;
- }
-
- if (addr_type&IPV6_ADDR_LINKLOCAL) {
- if (addr_len >= sizeof(struct sockaddr_in6) &&
- usin->sin6_scope_id) {
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != usin->sin6_scope_id) {
- err = -EINVAL;
- goto out;
- }
- sk->sk_bound_dev_if = usin->sin6_scope_id;
- if (!sk->sk_bound_dev_if &&
- (addr_type & IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
- }
-
- /* Connect to link-local address requires an interface */
- if (!sk->sk_bound_dev_if) {
- err = -EINVAL;
- goto out;
- }
- }
-
- ipv6_addr_copy(&np->daddr, daddr);
- np->flow_label = fl.fl6_flowlabel;
-
- inet->dport = usin->sin6_port;
-
- /*
- * Check for a route to destination an obtain the
- * destination cache for it.
- */
-
- fl.proto = sk->sk_protocol;
- ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
- ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
- fl.fl_ip_dport = inet->dport;
- fl.fl_ip_sport = inet->sport;
-
- if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
-
- if (flowlabel) {
- if (flowlabel->opt && flowlabel->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- }
- } else if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- }
-
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
- goto out;
-
- /* source address lookup done in ip6_dst_lookup */
-
- if (ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&np->saddr, &fl.fl6_src);
-
- if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
- inet->rcv_saddr = LOOPBACK4_IPV6;
- }
-
- ip6_dst_store(sk, dst,
- !ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
- &np->daddr : NULL);
-
- sk->sk_state = TCP_ESTABLISHED;
-out:
- fl6_sock_release(flowlabel);
- return err;
-}
-
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
u16 port, u32 info, u8 *payload)
{
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
int esp6_output(struct sk_buff **pskb)
{
int err;
- int hdr_len;
+ int hdr_len = 0;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *top_iph;
+ struct ipv6hdr *iph = NULL, *top_iph;
struct ipv6_esp_hdr *esph;
struct crypto_tfm *tfm;
struct esp_data *esp;
int clen;
int alen;
int nfrags;
+ u8 *prevhdr;
+ u8 nexthdr = 0;
- esp = x->data;
- hdr_len = (*pskb)->h.raw - (*pskb)->data +
- sizeof(*esph) + esp->conf.ivlen;
+ if ((*pskb)->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(pskb, 0);
+ if (err)
+ goto error_nolock;
+ }
- /* Strip IP+ESP header. */
- __skb_pull(*pskb, hdr_len);
+ spin_lock_bh(&x->lock);
+ err = xfrm_state_check(x, *pskb);
+ if (err)
+ goto error;
+
+ if (x->props.mode) {
+ err = xfrm6_tunnel_check_size(*pskb);
+ if (err)
+ goto error;
+ } else {
+ /* Strip IP header in transport mode. Save it. */
+ hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
+ nexthdr = *prevhdr;
+ *prevhdr = IPPROTO_ESP;
+ iph = kmalloc(hdr_len, GFP_ATOMIC);
+ if (!iph) {
+ err = -ENOMEM;
+ goto error;
+ }
+ memcpy(iph, (*pskb)->nh.raw, hdr_len);
+ __skb_pull(*pskb, hdr_len);
+ }
/* Now skb is pure payload to encrypt */
err = -ENOMEM;
/* Round to block size */
clen = (*pskb)->len;
+ esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
if ((nfrags = skb_cow_data(*pskb, clen-(*pskb)->len+alen, &trailer)) < 0) {
+ if (!x->props.mode && iph) kfree(iph);
goto error;
}
*(u8*)(trailer->tail + clen-(*pskb)->len - 2) = (clen - (*pskb)->len)-2;
pskb_put(*pskb, trailer, clen - (*pskb)->len);
- top_iph = (struct ipv6hdr *)__skb_push(*pskb, hdr_len);
- esph = (struct ipv6_esp_hdr *)(*pskb)->h.raw;
- top_iph->payload_len = htons((*pskb)->len + alen - sizeof(*top_iph));
- *(u8*)(trailer->tail - 1) = *(*pskb)->nh.raw;
- *(*pskb)->nh.raw = IPPROTO_ESP;
+ if (x->props.mode) {
+ iph = (*pskb)->nh.ipv6h;
+ top_iph = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
+ esph = (struct ipv6_esp_hdr*)(top_iph+1);
+ *(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
+ top_iph->version = 6;
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ if (x->props.flags & XFRM_STATE_NOECN)
+ IP6_ECN_clear(top_iph);
+ top_iph->nexthdr = IPPROTO_ESP;
+ top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
+ top_iph->hop_limit = iph->hop_limit;
+ ipv6_addr_copy(&top_iph->saddr,
+ (struct in6_addr *)&x->props.saddr);
+ ipv6_addr_copy(&top_iph->daddr,
+ (struct in6_addr *)&x->id.daddr);
+ } else {
+ esph = (struct ipv6_esp_hdr*)skb_push(*pskb, x->props.header_len);
+ (*pskb)->h.raw = (unsigned char*)esph;
+ top_iph = (struct ipv6hdr*)skb_push(*pskb, hdr_len);
+ memcpy(top_iph, iph, hdr_len);
+ kfree(iph);
+ top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
+ *(u8*)(trailer->tail - 1) = nexthdr;
+ }
esph->spi = x->id.spi;
esph->seq_no = htonl(++x->replay.oseq);
pskb_put(*pskb, trailer, alen);
}
- err = 0;
+ (*pskb)->nh.raw = (*pskb)->data;
+
+ x->curlft.bytes += (*pskb)->len;
+ x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
+ if (((*pskb)->dst = dst_pop(dst)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ return NET_XMIT_BYPASS;
error:
+ spin_unlock_bh(&x->lock);
+error_nolock:
+ kfree_skb(*pskb);
return err;
}
u8 nexthdr[2];
struct scatterlist *sg = &esp->sgbuf[0];
u8 padlen;
+ u8 *prevhdr;
if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
- ret = nexthdr[1];
+ ip6_find_1stfragopt(skb, &prevhdr);
+ ret = *prevhdr = nexthdr[1];
}
out:
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
kfree_skb(skb);
return -1;
}
return 1;
}
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
return -1;
}
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
kfree_skb(skb);
return -1;
}
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
skb->pkt_type != PACKET_HOST) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS_BH(InAddrErrors);
kfree_skb(skb);
return -1;
}
}
if (hdr->type != IPV6_SRCRT_TYPE_0) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
return -1;
}
if (hdr->hdrlen & 0x01) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
return -1;
}
n = hdr->hdrlen >> 1;
if (hdr->segments_left > n) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw);
return -1;
}
kfree_skb(skb);
/* the copy is a forwarded packet */
if (skb2 == NULL) {
- IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS_BH(OutDiscards);
return -1;
}
*skbp = skb = skb2;
addr += i - 1;
if (ipv6_addr_is_multicast(addr)) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS_BH(InAddrErrors);
kfree_skb(skb);
return -1;
}
}
if (skb->dst->dev->flags&IFF_LOOPBACK) {
if (skb->nh.ipv6h->hop_limit <= 1) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
0, skb->dev);
kfree_skb(skb);
if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
LIMIT_NETDEBUG(
printk(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", skb->nh.raw[optoff+1]));
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
goto drop;
}
pkt_len = ntohl(*(u32*)(skb->nh.raw+optoff+2));
if (pkt_len <= IPV6_MAXPLEN) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
return 0;
}
if (skb->nh.ipv6h->payload_len) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
return 0;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+ IP6_INC_STATS_BH(InTruncatedPkts);
goto drop;
}
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
*/
dst = ip6_route_output(sk, fl);
if (dst->error) {
- IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(OutNoRoutes);
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
res = 1;
} else {
err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
- ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6OutDestUnreachs, type - ICMPV6_DEST_UNREACH);
+ ICMP6_INC_STATS_BH(idev, Icmp6OutMsgs);
out_put:
if (likely(idev != NULL))
}
err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS_BH(idev, Icmp6OutEchoReplies);
+ ICMP6_INC_STATS_BH(idev, Icmp6OutMsgs);
out_put:
if (likely(idev != NULL))
struct icmp6hdr *hdr;
int type;
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
+ ICMP6_INC_STATS_BH(idev, Icmp6InMsgs);
saddr = &skb->nh.ipv6h->saddr;
daddr = &skb->nh.ipv6h->daddr;
type = hdr->icmp6_type;
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
- ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
+ ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6InDestUnreachs, type - ICMPV6_DEST_UNREACH);
else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
- ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
+ ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6InEchos, type - ICMPV6_ECHO_REQUEST);
switch (type) {
case ICMPV6_ECHO_REQUEST:
break;
case ICMPV6_MGM_REDUCTION:
- case ICMPV6_NI_QUERY:
- case ICMPV6_NI_REPLY:
case ICMPV6_MLD2_REPORT:
- case ICMPV6_DHAAD_REQUEST:
- case ICMPV6_DHAAD_REPLY:
- case ICMPV6_MOBILE_PREFIX_SOL:
- case ICMPV6_MOBILE_PREFIX_ADV:
break;
default:
return 0;
discard_it:
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
+ ICMP6_INC_STATS_BH(idev, Icmp6InErrors);
kfree_skb(skb);
return 0;
}
static struct timer_list ip6_fib_timer = TIMER_INITIALIZER(fib6_run_gc, 0, 0);
-struct fib6_walker_t fib6_walker_list = {
+static struct fib6_walker_t fib6_walker_list = {
.prev = &fib6_walker_list,
.next = &fib6_walker_list,
};
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Label S Owner Users Linger Expires "
- "Dst Opt\n");
+ seq_printf(seq, "Label S Owner Users Linger Expires "
+ "Dst Opt\n");
else
ip6fl_fl_seq_show(seq, v);
return 0;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
+ IP6_INC_STATS_BH(InReceives);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(InDiscards);
goto out;
}
goto err;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
goto drop;
}
goto truncated;
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr))){
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
goto drop;
}
hdr = skb->nh.ipv6h;
if (hdr->nexthdr == NEXTHDR_HOP) {
skb->h.raw = (u8*)(hdr+1);
if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
return 0;
}
hdr = skb->nh.ipv6h;
return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish);
truncated:
- IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+ IP6_INC_STATS_BH(InTruncatedPkts);
err:
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
drop:
kfree_skb(skb);
out:
if (ret > 0)
goto resubmit;
else if (ret == 0)
- IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP6_INC_STATS_BH(InDelivers);
} else {
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+ IP6_INC_STATS_BH(InUnknownProtos);
icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
}
} else {
- IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+ IP6_INC_STATS_BH(InDelivers);
kfree_skb(skb);
}
}
return 0;
discard:
- IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS_BH(InDiscards);
rcu_read_unlock();
kfree_skb(skb);
return 0;
struct ipv6hdr *hdr;
int deliver;
- IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
+ IP6_INC_STATS_BH(InMcastPkts);
hdr = skb->nh.ipv6h;
deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
} else if (dst->neighbour)
return dst->neighbour->output(skb);
- IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS_BH(OutNoRoutes);
kfree_skb(skb);
return -EINVAL;
ip6_dev_loopback_xmit);
if (skb->nh.ipv6h->hop_limit == 0) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
kfree_skb(skb);
return 0;
}
}
- IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+ IP6_INC_STATS(OutMcastPkts);
}
return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
dst = ip6_route_output(skb->sk, &fl);
if (dst->error) {
- IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(OutNoRoutes);
LIMIT_NETDEBUG(
printk(KERN_DEBUG "ip6_route_me_harder: No more route.\n"));
dst_release(dst);
kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return -ENOBUFS;
}
if (sk)
mtu = dst_pmtu(dst);
if ((skb->len <= mtu) || ipfragok) {
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
}
printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
kfree_skb(skb);
return -EMSGSIZE;
}
goto error;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
- IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS(InDiscards);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
- IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+ IP6_INC_STATS(InDiscards);
goto drop;
}
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_pmtu(dst), skb->dev);
- IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
- IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS_BH(InTooBigErrors);
+ IP6_INC_STATS_BH(FragFails);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
goto drop;
}
hdr->hop_limit--;
- IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP6_INC_STATS_BH(OutForwDatagrams);
return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
error:
- IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+ IP6_INC_STATS_BH(InAddrErrors);
drop:
kfree_skb(skb);
return -EINVAL;
tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
if (!tmp_hdr) {
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
return -ENOMEM;
}
kfree(tmp_hdr);
if (err == 0) {
- IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP6_INC_STATS(FragOKs);
return 0;
}
frag = skb;
}
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
return err;
}
if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
NETDEBUG(printk(KERN_INFO "IPv6: frag: no memory for new fragment!\n"));
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
err = -ENOMEM;
goto fail;
}
* Put this fragment into the sending queue.
*/
- IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
+ IP6_INC_STATS(FragCreates);
err = output(&frag);
if (err)
goto fail;
}
kfree_skb(skb);
- IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
+ IP6_INC_STATS(FragOKs);
return err;
fail:
kfree_skb(skb);
- IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+ IP6_INC_STATS(FragFails);
return err;
}
return 0;
error:
inet->cork.length -= length;
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return err;
}
ipv6_addr_copy(&hdr->daddr, final_dst);
skb->dst = dst_clone(&rt->u.dst);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
if (err) {
if (err > 0)
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
kfree_skb(skb);
}
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ipcomp.h>
{
int err = 0;
u8 nexthdr = 0;
+ u8 *prevhdr;
int hdr_len = skb->h.raw - skb->nh.raw;
unsigned char *tmp_hdr = NULL;
struct ipv6hdr *iph;
iph = skb->nh.ipv6h;
iph->payload_len = htons(skb->len);
+ ip6_find_1stfragopt(skb, &prevhdr);
+ *prevhdr = nexthdr;
out:
if (tmp_hdr)
kfree(tmp_hdr);
int err;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *top_iph;
- int hdr_len;
+ struct ipv6hdr *iph, *top_iph;
+ int hdr_len = 0;
struct ipv6_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
+ u8 *prevhdr;
+ u8 nexthdr = 0;
int plen, dlen;
u8 *start, *scratch = ipcd->scratch;
- hdr_len = (*pskb)->h.raw - (*pskb)->data;
+ if ((*pskb)->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(pskb, 0);
+ if (err)
+ goto error_nolock;
+ }
+
+ spin_lock_bh(&x->lock);
+
+ err = xfrm_state_check(x, *pskb);
+ if (err)
+ goto error;
+
+ if (x->props.mode) {
+ err = xfrm6_tunnel_check_size(*pskb);
+ if (err)
+ goto error;
+
+ hdr_len = sizeof(struct ipv6hdr);
+ nexthdr = IPPROTO_IPV6;
+ iph = (*pskb)->nh.ipv6h;
+ top_iph = (struct ipv6hdr *)skb_push(*pskb, sizeof(struct ipv6hdr));
+ top_iph->version = 6;
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ top_iph->nexthdr = IPPROTO_IPV6; /* initial */
+ top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ top_iph->hop_limit = iph->hop_limit;
+ memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
+ memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
+ (*pskb)->nh.raw = (*pskb)->data; /* == top_iph */
+ (*pskb)->h.raw = (*pskb)->nh.raw + hdr_len;
+ } else {
+ hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
+ nexthdr = *prevhdr;
+ }
/* check whether datagram len is larger than threshold */
if (((*pskb)->len - hdr_len) < ipcd->threshold) {
/* compression */
plen = (*pskb)->len - hdr_len;
dlen = IPCOMP_SCRATCH_SIZE;
- start = (*pskb)->h.raw;
+ start = (*pskb)->data + hdr_len;
err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
if (err) {
pskb_trim(*pskb, hdr_len + dlen + sizeof(struct ip_comp_hdr));
/* insert ipcomp header and replace datagram */
- top_iph = (struct ipv6hdr *)(*pskb)->data;
+ top_iph = (*pskb)->nh.ipv6h;
+ if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
+ IP6_ECN_clear(top_iph);
top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
+ (*pskb)->nh.raw = (*pskb)->data; /* top_iph */
+ ip6_find_1stfragopt(*pskb, &prevhdr);
+ *prevhdr = IPPROTO_COMP;
- ipch = (struct ipv6_comp_hdr *)start;
- ipch->nexthdr = *(*pskb)->nh.raw;
+ ipch = (struct ipv6_comp_hdr *)((unsigned char *)top_iph + hdr_len);
+ ipch->nexthdr = nexthdr;
ipch->flags = 0;
ipch->cpi = htons((u16 )ntohl(x->id.spi));
- *(*pskb)->nh.raw = IPPROTO_COMP;
+ (*pskb)->h.raw = (unsigned char*)ipch;
out_ok:
- err = 0;
+ x->curlft.bytes += (*pskb)->len;
+ x->curlft.packets++;
+ spin_unlock_bh(&x->lock);
-error:
+ if (((*pskb)->dst = dst_pop(dst)) == NULL) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ err = NET_XMIT_BYPASS;
+
+out_exit:
return err;
+error:
+ spin_unlock_bh(&x->lock);
+error_nolock:
+ kfree_skb(*pskb);
+ goto out_exit;
}
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_comp_hdr *ipcomph = (struct ipv6_comp_hdr*)(skb->data+offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
+ if (type != ICMPV6_DEST_UNREACH || type != ICMPV6_PKT_TOOBIG)
return;
spi = ntohl(ntohs(ipcomph->cpi));
struct inet6_dev *idev = in6_dev_get(skb->dev);
int err;
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h -
sizeof(struct ipv6hdr);
mldlen = skb->tail - skb->h.raw;
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
if (!err) {
- ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS);
- IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+ ICMP6_INC_STATS(idev,Icmp6OutMsgs);
+ IP6_INC_STATS(OutMcastPkts);
} else
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
if (likely(idev != NULL))
in6_dev_put(idev);
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
snd_addr = addr;
if (type == ICMPV6_MGM_REDUCTION) {
snd_addr = &all_routers;
skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err);
if (skb == NULL) {
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return;
}
dev_queue_xmit);
if (!err) {
if (type == ICMPV6_MGM_REDUCTION)
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBREDUCTIONS);
+ ICMP6_INC_STATS(idev, Icmp6OutGroupMembReductions);
else
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
- IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+ ICMP6_INC_STATS(idev, Icmp6OutGroupMembResponses);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
+ IP6_INC_STATS(OutMcastPkts);
} else
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
if (likely(idev != NULL))
in6_dev_put(idev);
return;
out:
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
kfree_skb(skb);
}
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutNeighborAdvertisements);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
/* send it! */
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutNeighborSolicits);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
/* send it! */
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutRouterSolicits);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
buff->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS);
- ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ ICMP6_INC_STATS(idev, Icmp6OutRedirects);
+ ICMP6_INC_STATS(idev, Icmp6OutMsgs);
}
if (likely(idev != NULL))
};
#ifdef CONFIG_SYSCTL
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp)
{
struct net_device *dev = ctl->extra1;
struct inet6_dev *idev;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
- return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dointvec(ctl, write, filp, buffer, lenp);
}
#endif
return 0;
}
-static struct snmp_mib snmp6_ipstats_list[] = {
+static struct snmp_item snmp6_ipstats_list[] = {
/* ipv6 mib according to RFC 2465 */
- SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INRECEIVES),
- SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
- SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS),
- SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES),
- SNMP_MIB_ITEM("Ip6InAddrErrors", IPSTATS_MIB_INADDRERRORS),
- SNMP_MIB_ITEM("Ip6InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
- SNMP_MIB_ITEM("Ip6InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
- SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS),
- SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS),
- SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
- SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS),
- SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS),
- SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
- SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
- SNMP_MIB_ITEM("Ip6ReasmReqds", IPSTATS_MIB_REASMREQDS),
- SNMP_MIB_ITEM("Ip6ReasmOKs", IPSTATS_MIB_REASMOKS),
- SNMP_MIB_ITEM("Ip6ReasmFails", IPSTATS_MIB_REASMFAILS),
- SNMP_MIB_ITEM("Ip6FragOKs", IPSTATS_MIB_FRAGOKS),
- SNMP_MIB_ITEM("Ip6FragFails", IPSTATS_MIB_FRAGFAILS),
- SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES),
- SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
- SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
- SNMP_MIB_SENTINEL
+#define SNMP6_GEN(x) SNMP_ITEM(struct ipstats_mib, x, "Ip6" #x)
+ SNMP6_GEN(InReceives),
+ SNMP6_GEN(InHdrErrors),
+ SNMP6_GEN(InTooBigErrors),
+ SNMP6_GEN(InNoRoutes),
+ SNMP6_GEN(InAddrErrors),
+ SNMP6_GEN(InUnknownProtos),
+ SNMP6_GEN(InTruncatedPkts),
+ SNMP6_GEN(InDiscards),
+ SNMP6_GEN(InDelivers),
+ SNMP6_GEN(OutForwDatagrams),
+ SNMP6_GEN(OutRequests),
+ SNMP6_GEN(OutDiscards),
+ SNMP6_GEN(OutNoRoutes),
+ SNMP6_GEN(ReasmTimeout),
+ SNMP6_GEN(ReasmReqds),
+ SNMP6_GEN(ReasmOKs),
+ SNMP6_GEN(ReasmFails),
+ SNMP6_GEN(FragOKs),
+ SNMP6_GEN(FragFails),
+ SNMP6_GEN(FragCreates),
+ SNMP6_GEN(InMcastPkts),
+ SNMP6_GEN(OutMcastPkts),
+#undef SNMP6_GEN
+ SNMP_ITEM_SENTINEL
};
-static struct snmp_mib snmp6_icmp6_list[] = {
+static struct snmp_item snmp6_icmp6_list[] = {
/* icmpv6 mib according to RFC 2466
Exceptions: {In|Out}AdminProhibs are removed, because I see
OutRouterAdvertisements too.
OutGroupMembQueries too.
*/
- SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
- SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
- SNMP_MIB_ITEM("Icmp6InDestUnreachs", ICMP6_MIB_INDESTUNREACHS),
- SNMP_MIB_ITEM("Icmp6InPktTooBigs", ICMP6_MIB_INPKTTOOBIGS),
- SNMP_MIB_ITEM("Icmp6InTimeExcds", ICMP6_MIB_INTIMEEXCDS),
- SNMP_MIB_ITEM("Icmp6InParmProblems", ICMP6_MIB_INPARMPROBLEMS),
- SNMP_MIB_ITEM("Icmp6InEchos", ICMP6_MIB_INECHOS),
- SNMP_MIB_ITEM("Icmp6InEchoReplies", ICMP6_MIB_INECHOREPLIES),
- SNMP_MIB_ITEM("Icmp6InGroupMembQueries", ICMP6_MIB_INGROUPMEMBQUERIES),
- SNMP_MIB_ITEM("Icmp6InGroupMembResponses", ICMP6_MIB_INGROUPMEMBRESPONSES),
- SNMP_MIB_ITEM("Icmp6InGroupMembReductions", ICMP6_MIB_INGROUPMEMBREDUCTIONS),
- SNMP_MIB_ITEM("Icmp6InRouterSolicits", ICMP6_MIB_INROUTERSOLICITS),
- SNMP_MIB_ITEM("Icmp6InRouterAdvertisements", ICMP6_MIB_INROUTERADVERTISEMENTS),
- SNMP_MIB_ITEM("Icmp6InNeighborSolicits", ICMP6_MIB_INNEIGHBORSOLICITS),
- SNMP_MIB_ITEM("Icmp6InNeighborAdvertisements", ICMP6_MIB_INNEIGHBORADVERTISEMENTS),
- SNMP_MIB_ITEM("Icmp6InRedirects", ICMP6_MIB_INREDIRECTS),
- SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
- SNMP_MIB_ITEM("Icmp6OutDestUnreachs", ICMP6_MIB_OUTDESTUNREACHS),
- SNMP_MIB_ITEM("Icmp6OutPktTooBigs", ICMP6_MIB_OUTPKTTOOBIGS),
- SNMP_MIB_ITEM("Icmp6OutTimeExcds", ICMP6_MIB_OUTTIMEEXCDS),
- SNMP_MIB_ITEM("Icmp6OutParmProblems", ICMP6_MIB_OUTPARMPROBLEMS),
- SNMP_MIB_ITEM("Icmp6OutEchoReplies", ICMP6_MIB_OUTECHOREPLIES),
- SNMP_MIB_ITEM("Icmp6OutRouterSolicits", ICMP6_MIB_OUTROUTERSOLICITS),
- SNMP_MIB_ITEM("Icmp6OutNeighborSolicits", ICMP6_MIB_OUTNEIGHBORSOLICITS),
- SNMP_MIB_ITEM("Icmp6OutNeighborAdvertisements", ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS),
- SNMP_MIB_ITEM("Icmp6OutRedirects", ICMP6_MIB_OUTREDIRECTS),
- SNMP_MIB_ITEM("Icmp6OutGroupMembResponses", ICMP6_MIB_OUTGROUPMEMBRESPONSES),
- SNMP_MIB_ITEM("Icmp6OutGroupMembReductions", ICMP6_MIB_OUTGROUPMEMBREDUCTIONS),
- SNMP_MIB_SENTINEL
+#define SNMP6_GEN(x) SNMP_ITEM(struct icmpv6_mib, x, #x)
+ SNMP6_GEN(Icmp6InMsgs),
+ SNMP6_GEN(Icmp6InErrors),
+ SNMP6_GEN(Icmp6InDestUnreachs),
+ SNMP6_GEN(Icmp6InPktTooBigs),
+ SNMP6_GEN(Icmp6InTimeExcds),
+ SNMP6_GEN(Icmp6InParmProblems),
+ SNMP6_GEN(Icmp6InEchos),
+ SNMP6_GEN(Icmp6InEchoReplies),
+ SNMP6_GEN(Icmp6InGroupMembQueries),
+ SNMP6_GEN(Icmp6InGroupMembResponses),
+ SNMP6_GEN(Icmp6InGroupMembReductions),
+ SNMP6_GEN(Icmp6InRouterSolicits),
+ SNMP6_GEN(Icmp6InRouterAdvertisements),
+ SNMP6_GEN(Icmp6InNeighborSolicits),
+ SNMP6_GEN(Icmp6InNeighborAdvertisements),
+ SNMP6_GEN(Icmp6InRedirects),
+ SNMP6_GEN(Icmp6OutMsgs),
+ SNMP6_GEN(Icmp6OutDestUnreachs),
+ SNMP6_GEN(Icmp6OutPktTooBigs),
+ SNMP6_GEN(Icmp6OutTimeExcds),
+ SNMP6_GEN(Icmp6OutParmProblems),
+ SNMP6_GEN(Icmp6OutEchoReplies),
+ SNMP6_GEN(Icmp6OutRouterSolicits),
+ SNMP6_GEN(Icmp6OutNeighborSolicits),
+ SNMP6_GEN(Icmp6OutNeighborAdvertisements),
+ SNMP6_GEN(Icmp6OutRedirects),
+ SNMP6_GEN(Icmp6OutGroupMembResponses),
+ SNMP6_GEN(Icmp6OutGroupMembReductions),
+#undef SNMP6_GEN
+ SNMP_ITEM_SENTINEL
};
-static struct snmp_mib snmp6_udp6_list[] = {
- SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
- SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
- SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
- SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
- SNMP_MIB_SENTINEL
+static struct snmp_item snmp6_udp6_list[] = {
+#define SNMP6_GEN(x) SNMP_ITEM(struct udp_mib, Udp##x, "Udp6" #x)
+ SNMP6_GEN(InDatagrams),
+ SNMP6_GEN(NoPorts),
+ SNMP6_GEN(InErrors),
+ SNMP6_GEN(OutDatagrams),
+#undef SNMP6_GEN
+ SNMP_ITEM_SENTINEL
};
static unsigned long
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
- res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
+ res +=
+ *((unsigned long *) (((void *)per_cpu_ptr(mib[0], i)) +
+ offt));
+ res +=
+ *((unsigned long *) (((void *)per_cpu_ptr(mib[1], i)) +
+ offt));
}
return res;
}
static inline void
-snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist)
+snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_item *itemlist)
{
int i;
for (i=0; itemlist[i].name; i++)
seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
- fold_field(mib, itemlist[i].entry));
+ fold_field(mib, itemlist[i].offset));
}
static int snmp6_seq_show(struct seq_file *seq, void *v)
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
-
err = copied;
- if (flags & MSG_TRUNC)
- err = skb->len;
out_free:
skb_free_datagram(sk, skb);
if (err)
goto error_fault;
- IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+ IP6_INC_STATS(OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
err = -EFAULT;
kfree_skb(skb);
error:
- IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+ IP6_INC_STATS(OutDiscards);
return err;
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct proto rawv6_prot = {
.name = "RAW",
.close = rawv6_close,
- .connect = ip6_datagram_connect,
+ .connect = udpv6_connect,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
spin_unlock(&fq->lock);
fq_put(fq);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
}
}
fq_kill(fq);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmTimeout);
+ IP6_INC_STATS_BH(ReasmFails);
/* Send error only if the first segment arrived. */
if (fq->last_in&FIRST_IN && fq->fragments) {
return ip6_frag_intern(hash, fq);
oom:
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
return NULL;
}
((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
return;
}
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS_BH(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return;
return;
err:
- IP6_INC_STATS(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS(ReasmFails);
kfree_skb(skb);
}
if (head->ip_summed == CHECKSUM_HW)
head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+ IP6_INC_STATS_BH(ReasmOKs);
fq->fragments = NULL;
*nhoffp = nhoff;
return 1;
if (net_ratelimit())
printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
return -1;
}
hdr = skb->nh.ipv6h;
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
+ IP6_INC_STATS_BH(ReasmReqds);
/* Jumbo payload inhibits frag. header */
if (hdr->payload_len==0) {
- IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
return -1;
}
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
- IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS(InHdrErrors);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
return -1;
}
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
skb->h.raw += sizeof(struct frag_hdr);
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+ IP6_INC_STATS_BH(ReasmOKs);
*nhoffp = (u8*)fhdr - skb->nh.raw;
return 1;
return ret;
}
- IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+ IP6_INC_STATS_BH(ReasmFails);
kfree_skb(skb);
return -1;
}
/* Protected by rt6_lock. */
static struct dst_entry *ndisc_dst_gc_list;
static int ipv6_get_mtu(struct net_device *dev);
-
-static inline unsigned int ipv6_advmss(unsigned int mtu)
-{
- mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
-
- if (mtu < ip6_rt_min_advmss)
- mtu = ip6_rt_min_advmss;
-
- /*
- * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
- * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
- * IPV6_MAXPLEN is also valid and means: "any MSS,
- * rely only on pmtu discovery"
- */
- if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
- mtu = IPV6_MAXPLEN;
- return mtu;
-}
+static inline unsigned int ipv6_advmss(unsigned int mtu);
struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
return mtu;
}
+static inline unsigned int ipv6_advmss(unsigned int mtu)
+{
+ mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+
+ if (mtu < ip6_rt_min_advmss)
+ mtu = ip6_rt_min_advmss;
+
+ /*
+ * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+ * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+ * IPV6_MAXPLEN is also valid and means: "any MSS,
+ * rely only on pmtu discovery"
+ */
+ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
+ mtu = IPV6_MAXPLEN;
+ return mtu;
+}
+
static int ipv6_get_hoplimit(struct net_device *dev)
{
int hoplimit = ipv6_devconf.hop_limit;
int ip6_pkt_discard(struct sk_buff *skb)
{
- IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+ IP6_INC_STATS(OutNoRoutes);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
kfree_skb(skb);
return 0;
static
int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ proc_dointvec(ctl, write, filp, buffer, lenp);
if (flush_delay < 0)
flush_delay = 0;
fib6_run_gc((unsigned long)flush_delay);
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
-#include <net/addrconf.h>
-#include <net/snmp.h>
#include <asm/uaccess.h>
/* Silly. Should hash-dance instead... */
local_bh_disable();
tcp_tw_deschedule(tw);
- NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+ NET_INC_STATS_BH(TimeWaitRecycled);
local_bh_enable();
tcp_tw_put(tw);
sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
if (sk == NULL) {
- ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), Icmp6InErrors);
return;
}
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+ NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->sk_state == TCP_CLOSE)
goto out;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
BUG_TRAP(req->sk == NULL);
if (seq != req->snt_isn) {
- NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(OutOfWindowIcmps);
goto out;
}
case TCP_SYN_RECV: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
sk->sk_err = err;
sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
/* sk = NULL, but it is safe for now. RST socket required. */
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
- TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
+ TCP_INC_STATS_BH(TcpOutSegs);
+ TCP_INC_STATS_BH(TcpOutRsts);
return;
}
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TcpOutSegs);
return;
}
if (req)
tcp_openreq_free(req);
- TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS_BH(TcpAttemptFails);
return 0; /* don't send reset */
}
return newsk;
out_overflow:
- NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+ NET_INC_STATS_BH(ListenOverflows);
out:
- NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+ NET_INC_STATS_BH(ListenDrops);
if (opt && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
kfree_skb(skb);
return 0;
csum_err:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
/*
* Count it even if it's bad.
*/
- TCP_INC_STATS_BH(TCP_MIB_INSEGS);
+ TCP_INC_STATS_BH(TcpInSegs);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
bad_packet:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
} else {
tcp_v6_send_reset(skb);
}
}
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
tcp_tw_put((struct tcp_tw_bucket *) sk);
goto discard_it;
}
struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
- seq_puts(seq,
- " sl "
- "local_address "
- "remote_address "
- "st tx_queue rx_queue tr tm->when retrnsmt"
- " uid timeout inode\n");
+ seq_printf(seq,
+ " sl "
+ "local_address "
+ "remote_address "
+ "st tx_queue rx_queue tr tm->when retrnsmt"
+ " uid timeout inode\n");
goto out;
}
st = seq->private;
#include <net/addrconf.h>
#include <net/ip.h>
#include <net/udp.h>
-#include <net/raw.h>
#include <net/inet_common.h>
#include <net/ip6_checksum.h>
*
*/
+int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+ struct inet_opt *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct in6_addr *daddr;
+ struct dst_entry *dst;
+ struct flowi fl;
+ struct ip6_flowlabel *flowlabel = NULL;
+ int addr_type;
+ int err;
+
+ if (usin->sin6_family == AF_INET) {
+ if (__ipv6_only_sock(sk))
+ return -EAFNOSUPPORT;
+ err = udp_connect(sk, uaddr, addr_len);
+ goto ipv4_connected;
+ }
+
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+ if (usin->sin6_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
+ memset(&fl, 0, sizeof(fl));
+ if (np->sndflow) {
+ fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ }
+ }
+
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
+ if (addr_type == IPV6_ADDR_ANY) {
+ /*
+ * connect to self
+ */
+ usin->sin6_addr.s6_addr[15] = 0x01;
+ }
+
+ daddr = &usin->sin6_addr;
+
+ if (addr_type == IPV6_ADDR_MAPPED) {
+ struct sockaddr_in sin;
+
+ if (__ipv6_only_sock(sk)) {
+ err = -ENETUNREACH;
+ goto out;
+ }
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = daddr->s6_addr32[3];
+ sin.sin_port = usin->sin6_port;
+
+ err = udp_connect(sk, (struct sockaddr*) &sin, sizeof(sin));
+
+ipv4_connected:
+ if (err)
+ goto out;
+
+ ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
+
+ if (ipv6_addr_any(&np->saddr)) {
+ ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
+ inet->saddr);
+ }
+
+ if (ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
+ inet->rcv_saddr);
+ }
+ goto out;
+ }
+
+ if (addr_type&IPV6_ADDR_LINKLOCAL) {
+ if (addr_len >= sizeof(struct sockaddr_in6) &&
+ usin->sin6_scope_id) {
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id) {
+ err = -EINVAL;
+ goto out;
+ }
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
+ if (!sk->sk_bound_dev_if &&
+ (addr_type & IPV6_ADDR_MULTICAST))
+ fl.oif = np->mcast_oif;
+ }
+
+ /* Connect to link-local address requires an interface */
+ if (!sk->sk_bound_dev_if) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ ipv6_addr_copy(&np->daddr, daddr);
+ np->flow_label = fl.fl6_flowlabel;
+
+ inet->dport = usin->sin6_port;
+
+ /*
+ * Check for a route to destination an obtain the
+ * destination cache for it.
+ */
+
+ fl.proto = IPPROTO_UDP;
+ ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
+ ipv6_addr_copy(&fl.fl6_src, &np->saddr);
+ fl.oif = sk->sk_bound_dev_if;
+ fl.fl_ip_dport = inet->dport;
+ fl.fl_ip_sport = inet->sport;
+
+ if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
+ fl.oif = np->mcast_oif;
+
+ if (flowlabel) {
+ if (flowlabel->opt && flowlabel->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
+ ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+ }
+ } else if (np->opt && np->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
+ ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+ }
+
+ err = ip6_dst_lookup(sk, &dst, &fl);
+ if (err)
+ goto out;
+
+ /* source address lookup done in ip6_dst_lookup */
+
+ if (ipv6_addr_any(&np->saddr))
+ ipv6_addr_copy(&np->saddr, &fl.fl6_src);
+
+ if (ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
+ inet->rcv_saddr = LOOPBACK4_IPV6;
+ }
+
+ ip6_dst_store(sk, dst,
+ !ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
+ &np->daddr : NULL);
+
+ sk->sk_state = TCP_ESTABLISHED;
+out:
+ fl6_sock_release(flowlabel);
+ return err;
+}
+
static void udpv6_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
sin6->sin6_scope_id = IP6CB(skb)->iif;
}
}
-
err = copied;
- if (flags & MSG_TRUNC)
- err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
skb_free_datagram(sk, skb);
if (flags & MSG_DONTWAIT) {
- UDP6_INC_STATS_USER(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_USER(UdpInErrors);
return -EAGAIN;
}
goto try_again;
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return 0;
}
}
if (sock_queue_rcv_skb(sk,skb)<0) {
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return 0;
}
- UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+ UDP6_INC_STATS_BH(UdpInDatagrams);
return 0;
}
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
(unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
goto discard;
- UDP6_INC_STATS_BH(UDP_MIB_NOPORTS);
+ UDP6_INC_STATS_BH(UdpNoPorts);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
printk(KERN_DEBUG "UDP: short packet: %d/%u\n", ulen, skb->len);
discard:
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ UDP6_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
}
out:
fl6_sock_release(flowlabel);
if (!err) {
- UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
+ UDP6_INC_STATS_USER(UdpOutDatagrams);
return len;
}
return err;
struct proto udpv6_prot = {
.name = "UDP",
.close = udpv6_close,
- .connect = ip6_datagram_connect,
+ .connect = udpv6_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
- int nexthdr;
- unsigned int nhoff;
+ int nexthdr = 0;
+ u8 *prevhdr = NULL;
- nhoff = *nhoffp;
- nexthdr = skb->nh.raw[nhoff];
+ ip6_find_1stfragopt(skb, &prevhdr);
+ nexthdr = *prevhdr;
+ *nhoffp = prevhdr - skb->nh.raw;
if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
goto drop;
if (nexthdr <= 0)
goto drop_unlock;
- skb->nh.raw[nhoff] = nexthdr;
-
if (x->props.replay_window)
xfrm_replay_advance(x, seq);
+++ /dev/null
-/*
- * xfrm6_output.c - Common IPsec encapsulation code for IPv6.
- * Copyright (C) 2002 USAGI/WIDE Project
- * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/icmpv6.h>
-#include <net/inet_ecn.h>
-#include <net/ipv6.h>
-#include <net/xfrm.h>
-
-/* Add encapsulation header.
- *
- * In transport mode, the IP header and mutable extension headers will be moved
- * forward to make space for the encapsulation header.
- *
- * In tunnel mode, the top IP header will be constructed per RFC 2401.
- * The following fields in it shall be filled in by x->type->output:
- * payload_len
- *
- * On exit, skb->h will be set to the start of the encapsulation header to be
- * filled in by x->type->output and skb->nh will be set to the nextheader field
- * of the extension header directly preceding the encapsulation header, or in
- * its absence, that of the top IP header. The value of skb->data will always
- * point to the top IP header.
- */
-static void xfrm6_encap(struct sk_buff *skb)
-{
- struct dst_entry *dst = skb->dst;
- struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph, *top_iph;
-
- skb_push(skb, x->props.header_len);
- iph = skb->nh.ipv6h;
-
- if (!x->props.mode) {
- u8 *prevhdr;
- int hdr_len;
-
- hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
- skb->nh.raw = prevhdr - x->props.header_len;
- skb->h.raw = skb->data + hdr_len;
- memmove(skb->data, iph, hdr_len);
- return;
- }
-
- skb->nh.raw = skb->data;
- top_iph = skb->nh.ipv6h;
- skb->nh.raw = &top_iph->nexthdr;
- skb->h.ipv6h = top_iph + 1;
-
- top_iph->version = 6;
- top_iph->priority = iph->priority;
- if (x->props.flags & XFRM_STATE_NOECN)
- IP6_ECN_clear(top_iph);
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
- top_iph->nexthdr = IPPROTO_IPV6;
- top_iph->hop_limit = iph->hop_limit;
- ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
-}
-
-static int xfrm6_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
- struct dst_entry *dst = skb->dst;
-
- mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr);
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
-
- if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- ret = -EMSGSIZE;
- }
-
- return ret;
-}
-
-int xfrm6_output(struct sk_buff **pskb)
-{
- struct sk_buff *skb = *pskb;
- struct dst_entry *dst = skb->dst;
- struct xfrm_state *x = dst->xfrm;
- int err;
-
- if (skb->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(pskb, 0);
- skb = *pskb;
- if (err)
- goto error_nolock;
- }
-
- spin_lock_bh(&x->lock);
- err = xfrm_state_check(x, skb);
- if (err)
- goto error;
-
- if (x->props.mode) {
- err = xfrm6_tunnel_check_size(skb);
- if (err)
- goto error;
- }
-
- xfrm6_encap(skb);
-
- err = x->type->output(pskb);
- skb = *pskb;
- if (err)
- goto error;
-
- x->curlft.bytes += skb->len;
- x->curlft.packets++;
-
- spin_unlock_bh(&x->lock);
-
- skb->nh.raw = skb->data;
-
- if (!(skb->dst = dst_pop(dst))) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- err = NET_XMIT_BYPASS;
-
-out_exit:
- return err;
-error:
- spin_unlock_bh(&x->lock);
-error_nolock:
- kfree_skb(skb);
- goto out_exit;
-}
/* Copy neighbour for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
- dst_prev->output = xfrm6_output;
+ dst_prev->output = dst_prev->xfrm->type->output;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
#include <linux/ipsec.h>
#include <net/ipv6.h>
-static struct xfrm_state_afinfo xfrm6_state_afinfo;
+extern struct xfrm_state_afinfo xfrm6_state_afinfo;
static void
__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
proto == x->id.proto &&
!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)x->props.saddr.a6) &&
reqid == x->props.reqid &&
- x->km.state == XFRM_STATE_ACQ &&
- !x->id.spi) {
+ x->km.state == XFRM_STATE_ACQ) {
+ if (!x0)
+ x0 = x;
+ if (x->id.spi)
+ continue;
x0 = x;
break;
}
#include <linux/list.h>
#include <net/ip.h>
#include <net/xfrm.h>
+#include <net/icmp.h>
#include <net/ipv6.h>
-#include <net/protocol.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
+int xfrm6_tunnel_check_size(struct sk_buff *skb)
+{
+ int mtu, ret = 0;
+ struct dst_entry *dst = skb->dst;
+
+ mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr);
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+ if (skb->len > mtu) {
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ ret = -EMSGSIZE;
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL(xfrm6_tunnel_check_size);
+
static int xfrm6_tunnel_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
- struct ipv6hdr *top_iph;
-
- top_iph = (struct ipv6hdr *)skb->data;
+ struct dst_entry *dst = skb->dst;
+ struct xfrm_state *x = dst->xfrm;
+ struct ipv6hdr *iph, *top_iph;
+ int err;
+
+ if ((err = xfrm6_tunnel_check_size(skb)) != 0)
+ goto error_nolock;
+
+ iph = skb->nh.ipv6h;
+
+ top_iph = (struct ipv6hdr *)skb_push(skb, x->props.header_len);
+ top_iph->version = 6;
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ top_iph->nexthdr = IPPROTO_IPV6;
top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+ top_iph->hop_limit = iph->hop_limit;
+ memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
+ memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
+ skb->nh.raw = skb->data;
+ skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr);
- return 0;
+ x->curlft.bytes += skb->len;
+ x->curlft.packets++;
+
+ spin_unlock_bh(&x->lock);
+
+ if ((skb->dst = dst_pop(dst)) == NULL) {
+ kfree_skb(skb);
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+
+ return NET_XMIT_BYPASS;
+
+error_nolock:
+ kfree_skb(skb);
+ return err;
}
static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
/* Check that we don't send out to big frames */
if (len > self->max_data_size) {
- IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
+ IRDA_DEBUG(2, "%s(), Chopping frame from %d to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
copied = skb->len;
if (copied > size) {
- IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
+ IRDA_DEBUG(2, "%s(), Received truncated frame (%d < %d)!\n",
__FUNCTION__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
*/
if (len > self->max_data_size) {
IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %zd to %d bytes!\n",
+ "Chopping frame from %d to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
*/
if (len > self->max_data_size) {
IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %zd to %d bytes!\n",
+ "Chopping frame from %d to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
* us on that - Jean II */
static int do_devname(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
int ret;
- ret = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dostring(table, write, filp, buffer, lenp);
if (ret == 0 && write) {
struct ias_value *val;
min_spi = range->sadb_spirange_min;
max_spi = range->sadb_spirange_max;
} else {
- min_spi = 0x100;
- max_spi = 0x0fffffff;
+ min_spi = htonl(0x100);
+ max_spi = htonl(0x0fffffff);
}
- xfrm_alloc_spi(x, htonl(min_spi), htonl(max_spi));
+ xfrm_alloc_spi(x, min_spi, max_spi);
if (x->id.spi)
resp_skb = pfkey_xfrm_state2msg(x, 0, 3);
}
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
module_init(nr_proto_init);
-module_param(nr_ndevs, int, 0);
+
+MODULE_PARM(nr_ndevs, "i");
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/vs_base.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
sk = pt->af_packet_priv;
po = pkt_sk(sk);
- if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
+ if (sk->sk_xid && sk->sk_xid != skb->xid)
goto drop;
skb->dev = dev;
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
-static HLIST_HEAD(rose_list);
+HLIST_HEAD(rose_list);
spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
static struct proto_ops rose_proto_ops;
}
module_init(rose_proto_init);
-module_param(rose_ndevs, int, 0);
+MODULE_PARM(rose_ndevs, "i");
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
struct rxrpc_message *msg)
{
struct msghdr msghdr;
+ mm_segment_t oldfs;
int ret;
_enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
/* set up the message to be transmitted */
msghdr.msg_name = &conn->addr;
msghdr.msg_namelen = sizeof(conn->addr);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msghdr.msg_iov = (struct iovec *)msg->data;
+ msghdr.msg_iovlen = msg->dcount;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
htons(conn->addr.sin_port));
/* send the message */
- ret = kernel_sendmsg(conn->trans->socket, &msghdr,
- msg->data, msg->dcount, msg->dsize);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sock_sendmsg(conn->trans->socket, &msghdr, msg->dsize);
+ set_fs(oldfs);
+
if (ret < 0) {
msg->state = RXRPC_MSG_ERROR;
- } else {
+ }
+ else {
msg->state = RXRPC_MSG_SENT;
ret = 0;
struct sockaddr_in sin;
struct msghdr msghdr;
struct kvec iov[2];
+ mm_segment_t oldfs;
uint32_t _error;
int len, ret;
msghdr.msg_name = &sin;
msghdr.msg_namelen = sizeof(sin);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msghdr.msg_iov = (struct iovec *)iov;
+ msghdr.msg_iovlen = 2;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_DONTWAIT;
htons(sin.sin_port));
/* send the message */
- ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sock_sendmsg(trans->socket, &msghdr, len);
+ set_fs(oldfs);
_leave(" = %d", ret);
return ret;
struct list_head connq, *_p;
struct errormsg emsg;
struct msghdr msg;
+ mm_segment_t oldfs;
uint16_t port;
int local, err;
/* try and receive an error message */
msg.msg_name = &sin;
msg.msg_namelen = sizeof(sin);
+ msg.msg_iov = NULL;
+ msg.msg_iovlen = 0;
msg.msg_control = &emsg;
msg.msg_controllen = sizeof(emsg);
msg.msg_flags = 0;
- err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sock_recvmsg(trans->socket, &msg, 0,
MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
+ set_fs(oldfs);
if (err == -EAGAIN) {
_leave("");
#
# Traffic control configuration.
#
-choice
- prompt "Packet scheduler clock source"
- depends on NET_SCHED
- default NET_SCH_CLK_JIFFIES
- help
- Packet schedulers need a monotonic clock that increments at a static
- rate. The kernel provides several suitable interfaces, each with
- different properties:
-
- - high resolution (us or better)
- - fast to read (minimal locking, no i/o access)
- - synchronized on all processors
- - handles cpu clock frequency changes
-
- but nothing provides all of the above.
-
-config NET_SCH_CLK_JIFFIES
- bool "Timer interrupt"
- help
- Say Y here if you want to use the timer interrupt (jiffies) as clock
- source. This clock source is fast, synchronized on all processors and
- handles cpu clock frequency changes, but its resolution is too low
- for accurate shaping except at very low speed.
-
-config NET_SCH_CLK_GETTIMEOFDAY
- bool "gettimeofday"
- help
- Say Y here if you want to use gettimeofday as clock source. This clock
- source has high resolution, is synchronized on all processors and
- handles cpu clock frequency changes, but it is slow.
-
- Choose this if you need a high resolution clock source but can't use
- the CPU's cycle counter.
-
-config NET_SCH_CLK_CPU
- bool "CPU cycle counter"
- depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64
- help
- Say Y here if you want to use the CPU's cycle counter as clock source.
- This is a cheap and high resolution clock source, but on some
- architectures it is not synchronized on all processors and doesn't
- handle cpu clock frequency changes.
-
- The useable cycle counters are:
-
- x86/x86_64 - Timestamp Counter
- alpha - Cycle Counter
- sparc64 - %ticks register
- ppc64 - Time base
- ia64 - Interval Time Counter
-
- Choose this if your CPU's cycle counter is working properly.
-
-endchoice
-
config NET_SCH_CBQ
tristate "CBQ packet scheduler"
depends on NET_SCHED
testing applications or protocols.
To compile this driver as a module, choose M here: the module
- will be called sch_netem.
-
- If unsure, say N.
+ will be called sch_delay.
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
kfree(tp);
goto errout;
}
-
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
tp->next = *back;
*back = tp;
- qdisc_unlock_tree(dev);
-
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
} else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
goto errout;
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
*back = tp->next;
- qdisc_unlock_tree(dev);
-
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
tfilter_notify(skb, n, tp, fh_s, RTM_DELTFILTER);
tcf_destroy(tp);
err = 0;
return err;
}
-unsigned long tcf_set_class(struct tcf_proto *tp, unsigned long *clp,
- unsigned long cl)
-{
- unsigned long old_cl;
-
- tcf_tree_lock(tp);
- old_cl = __cls_set_class(clp, cl);
- tcf_tree_unlock(tp);
-
- return old_cl;
-}
-
-
static int
tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
u32 pid, u32 seq, unsigned flags, int event)
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len;
- read_lock_bh(&qdisc_tree_lock);
+ read_lock(&qdisc_tree_lock);
if (!tcm->tcm_parent)
q = dev->qdisc_sleeping;
else
if (cl)
cops->put(q, cl);
out:
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
dev_put(dev);
return skb->len;
}
EXPORT_SYMBOL(register_tcf_proto_ops);
EXPORT_SYMBOL(unregister_tcf_proto_ops);
-EXPORT_SYMBOL(tcf_set_class);
struct tc_u_hnode *ht_up;
#ifdef CONFIG_NET_CLS_ACT
struct tc_action *action;
+#ifdef CONFIG_NET_CLS_IND
+ char indev[IFNAMSIZ];
+#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
struct tcf_police *police;
#endif
-#endif
-#ifdef CONFIG_NET_CLS_IND
- char indev[IFNAMSIZ];
#endif
u8 fshift;
struct tcf_result res;
struct tc_u_hnode *ht_down;
-#ifdef CONFIG_CLS_U32_PERF
- struct tc_u32_pcnt *pf;
-#endif
struct tc_u32_sel sel;
};
int sdepth = 0;
int off2 = 0;
int sel = 0;
-#ifdef CONFIG_CLS_U32_PERF
- int j;
-#endif
int i;
next_ht:
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rcnt +=1;
- j = 0;
+ n->sel.rcnt +=1;
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- n->pf->kcnts[j] +=1;
- j++;
+ key->kcnt +=1;
#endif
}
if (n->ht_down == NULL) {
if (n->sel.flags&TC_U32_TERMINAL) {
*res = n->res;
+#ifdef CONFIG_NET_CLS_ACT
#ifdef CONFIG_NET_CLS_IND
/* yes, i know it sucks but the feature is
** optional dammit! - JHS */
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rhit +=1;
+ n->sel.rhit +=1;
#endif
-#ifdef CONFIG_NET_CLS_ACT
if (n->action) {
int pol_res = tcf_action_exec(skb, n->action);
if (skb->tc_classid > 0) {
#endif
if (n->ht_down)
n->ht_down->refcnt--;
-#ifdef CONFIG_CLS_U32_PERF
- if (n && (NULL != n->pf))
- kfree(n->pf);
-#endif
kfree(n);
return 0;
}
tcf_action_destroy(act, TCA_ACT_UNBIND);
}
-
-#else
-#ifdef CONFIG_NET_CLS_POLICE
- if (tb[TCA_U32_POLICE-1]) {
- struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
- sch_tree_lock(q);
- police = xchg(&n->police, police);
- sch_tree_unlock(q);
- tcf_police_release(police, TCA_ACT_UNBIND);
- }
-#endif
-#endif
#ifdef CONFIG_NET_CLS_IND
n->indev[0] = 0;
if(tb[TCA_U32_INDEV-1]) {
return -EINVAL;
}
sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
- printk("got IND %s\n",n->indev);
}
#endif
+#else
+#ifdef CONFIG_NET_CLS_POLICE
+ if (tb[TCA_U32_POLICE-1]) {
+ struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
+ sch_tree_lock(q);
+ police = xchg(&n->police, police);
+ sch_tree_unlock(q);
+ tcf_police_release(police, TCA_ACT_UNBIND);
+ }
+#endif
+#endif
+
return 0;
}
s = RTA_DATA(tb[TCA_U32_SEL-1]);
+#ifdef CONFIG_CLS_U32_PERF
+ if (RTA_PAYLOAD(tb[TCA_U32_SEL-1]) <
+ (s->nkeys*sizeof(struct tc_u32_key)) + sizeof(struct tc_u32_sel)) {
+ printk("Please upgrade your iproute2 tools or compile proper options in!\n");
+ return -EINVAL;
+}
+#endif
n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
-
memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
-#ifdef CONFIG_CLS_U32_PERF
- n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64), GFP_KERNEL);
- if (n->pf == NULL) {
- kfree(n);
- return -ENOBUFS;
- }
- memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64));
-#endif
-
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
*arg = (unsigned long)n;
return 0;
}
-#ifdef CONFIG_CLS_U32_PERF
- if (n && (NULL != n->pf))
- kfree(n->pf);
-#endif
kfree(n);
return err;
}
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
+#ifdef CONFIG_NET_CLS_IND
+ if(strlen(n->indev)) {
+ struct rtattr * p_rta = (struct rtattr*)skb->tail;
+ RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
+ p_rta->rta_len = skb->tail - (u8*)p_rta;
+ }
+#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
}
#endif
-#endif
-
-#ifdef CONFIG_NET_CLS_IND
- if(strlen(n->indev)) {
- struct rtattr * p_rta = (struct rtattr*)skb->tail;
- RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
- p_rta->rta_len = skb->tail - (u8*)p_rta;
- }
-#endif
-#ifdef CONFIG_CLS_U32_PERF
- RTA_PUT(skb, TCA_U32_PCNT,
- sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(__u64),
- n->pf);
#endif
}
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_ACT
- if (TC_U32_KEY(n->handle) != 0) {
- if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
- if (tcf_action_copy_stats(skb,n->action))
- goto rtattr_failure;
- }
+ if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
+ if (tcf_action_copy_stats(skb,n->action))
+ goto rtattr_failure;
}
#else
#ifdef CONFIG_NET_CLS_POLICE
static int __init init_u32(void)
{
- printk("u32 classifier\n");
-#ifdef CONFIG_CLS_U32_PERF
- printk(" Perfomance counters on\n");
-#endif
-#ifdef CONFIG_NET_CLS_POLICE
- printk(" OLD policer on \n");
-#endif
-#ifdef CONFIG_NET_CLS_IND
- printk(" input device check on \n");
-#endif
-#ifdef CONFIG_NET_CLS_ACT
- printk(" Actions configured \n");
-#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
-#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
{
struct Qdisc *q;
- list_for_each_entry(q, &dev->qdisc_list, list) {
+ for (q = dev->qdisc_list; q; q = q->next) {
if (q->handle == handle)
return q;
}
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
if (qdisc && qdisc->flags&TCQ_F_INGRES) {
oqdisc = dev->qdisc_ingress;
/* Prune old scheduler */
dev->qdisc = &noop_qdisc;
}
- qdisc_unlock_tree(dev);
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
if (dev->flags & IFF_UP)
dev_activate(dev);
{
int err;
struct rtattr *kind = tca[TCA_KIND-1];
- void *p = NULL;
- struct Qdisc *sch;
+ struct Qdisc *sch = NULL;
struct Qdisc_ops *ops;
int size;
if (ops == NULL)
goto err_out;
- /* ensure that the Qdisc and the private data are 32-byte aligned */
- size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST);
- size += ops->priv_size + QDISC_ALIGN_CONST;
+ size = sizeof(*sch) + ops->priv_size;
- p = kmalloc(size, GFP_KERNEL);
+ sch = kmalloc(size, GFP_KERNEL);
err = -ENOBUFS;
- if (!p)
+ if (!sch)
goto err_out;
- memset(p, 0, size);
- sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST)
- & ~QDISC_ALIGN_CONST);
- sch->padded = (char *)sch - (char *)p;
/* Grrr... Resolve race condition with module unload */
if (ops != qdisc_lookup_ops(kind))
goto err_out;
- INIT_LIST_HEAD(&sch->list);
+ memset(sch, 0, size);
+
skb_queue_head_init(&sch->q);
if (handle == TC_H_INGRESS)
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
- dev_hold(dev);
atomic_set(&sch->refcnt, 1);
sch->stats_lock = &dev->queue_lock;
if (handle == 0) {
* before we set a netdevice's qdisc pointer to sch */
smp_wmb();
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
- qdisc_lock_tree(dev);
- list_add_tail(&sch->list, &dev->qdisc_list);
- qdisc_unlock_tree(dev);
-
+ write_lock(&qdisc_tree_lock);
+ sch->next = dev->qdisc_list;
+ dev->qdisc_list = sch;
+ write_unlock(&qdisc_tree_lock);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
qdisc_new_estimator(&sch->stats, sch->stats_lock,
err_out:
*errp = err;
- if (p)
- kfree(p);
+ if (sch)
+ kfree(sch);
return NULL;
}
nlh->nlmsg_flags = flags;
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = q->dev->ifindex;
+ tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
tcm->tcm_info = atomic_read(&q->refcnt);
continue;
if (idx > s_idx)
s_q_idx = 0;
- read_lock_bh(&qdisc_tree_lock);
- q_idx = 0;
- list_for_each_entry(q, &dev->qdisc_list, list) {
- if (q_idx < s_q_idx) {
- q_idx++;
+ read_lock(&qdisc_tree_lock);
+ for (q = dev->qdisc_list, q_idx = 0; q;
+ q = q->next, q_idx++) {
+ if (q_idx < s_q_idx)
continue;
- }
if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
goto done;
}
- q_idx++;
}
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
}
done:
nlh->nlmsg_flags = flags;
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = q->dev->ifindex;
+ tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
tcm->tcm_parent = q->handle;
tcm->tcm_handle = q->handle;
tcm->tcm_info = 0;
return 0;
s_t = cb->args[0];
- t = 0;
-
- read_lock_bh(&qdisc_tree_lock);
- list_for_each_entry(q, &dev->qdisc_list, list) {
- if (t < s_t || !q->ops->cl_ops ||
- (tcm->tcm_parent &&
- TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
- t++;
+
+ read_lock(&qdisc_tree_lock);
+ for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
+ if (t < s_t) continue;
+ if (!q->ops->cl_ops) continue;
+ if (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)
continue;
- }
if (t > s_t)
memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
arg.w.fn = qdisc_class_dump;
cb->args[1] = arg.w.count;
if (arg.w.stop)
break;
- t++;
}
- read_unlock_bh(&qdisc_tree_lock);
+ read_unlock(&qdisc_tree_lock);
cb->args[0] = t;
};
#endif
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
int psched_tod_diff(int delta_sec, int bound)
{
int delta;
EXPORT_SYMBOL(psched_tod_diff);
#endif
-#ifdef CONFIG_NET_SCH_CLK_CPU
+psched_time_t psched_time_base;
+
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
psched_tdiff_t psched_clock_per_hz;
int psched_clock_scale;
EXPORT_SYMBOL(psched_clock_per_hz);
EXPORT_SYMBOL(psched_clock_scale);
+#endif
-psched_time_t psched_time_base;
-cycles_t psched_time_mark;
+#ifdef PSCHED_WATCHER
+PSCHED_WATCHER psched_time_mark;
EXPORT_SYMBOL(psched_time_mark);
EXPORT_SYMBOL(psched_time_base);
-/*
- * Periodically adjust psched_time_base to avoid overflow
- * with 32-bit get_cycles(). Safe up to 4GHz CPU.
- */
static void psched_tick(unsigned long);
+
static struct timer_list psched_timer = TIMER_INITIALIZER(psched_tick, 0, 0);
static void psched_tick(unsigned long dummy)
{
- if (sizeof(cycles_t) == sizeof(u32)) {
- psched_time_t dummy_stamp;
- PSCHED_GET_TIME(dummy_stamp);
- psched_timer.expires = jiffies + 1*HZ;
- add_timer(&psched_timer);
- }
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
+ psched_time_t dummy_stamp;
+ PSCHED_GET_TIME(dummy_stamp);
+ /* It is OK up to 4GHz cpu */
+ psched_timer.expires = jiffies + 1*HZ;
+#else
+ unsigned long now = jiffies;
+ psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
+ psched_time_mark = now;
+ psched_timer.expires = now + 60*60*HZ;
+#endif
+ add_timer(&psched_timer);
}
+#endif
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
int __init psched_calibrate_clock(void)
{
psched_time_t stamp, stamp1;
long rdelay;
unsigned long stop;
+#ifdef PSCHED_WATCHER
psched_tick(0);
+#endif
stop = jiffies + HZ/10;
PSCHED_GET_TIME(stamp);
do_gettimeofday(&tv);
{
struct rtnetlink_link *link_p;
-#ifdef CONFIG_NET_SCH_CLK_CPU
+#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
if (psched_calibrate_clock() < 0)
return -1;
-#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
+#elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
psched_tick_per_us = HZ<<PSCHED_JSCALE;
psched_us_per_tick = 1000000;
+#ifdef PSCHED_WATCHER
+ psched_tick(0);
+#endif
#endif
link_p = rtnetlink_links[PF_UNSPEC];
*/
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct atm_qdisc_data *) (sch)->data)
#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
u32 classid)
{
- struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
- for (flow = p->flows; flow; flow = flow->next)
+ for (flow = PRIV(sch)->flows; flow; flow = flow->next)
if (flow->classid == classid) break;
return flow;
}
static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *head = &q->link;
struct cbq_class **defmap;
struct cbq_class *cl = NULL;
static __inline__ void cbq_activate_class(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
int prio = cl->cpriority;
struct cbq_class *cl_tail;
static void cbq_deactivate_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
int prio = this->cpriority;
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
int len = skb->len;
int ret = NET_XMIT_SUCCESS;
struct cbq_class *cl = cbq_classify(skb, sch,&ret);
static int
cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl;
int ret;
static void cbq_ovl_classic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
struct cbq_class *this = cl;
do {
static void cbq_ovl_delay(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_lowprio(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
cl->penalized = jiffies + cl->penalty;
static void cbq_undelay(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
long delay = 0;
unsigned pmask;
{
int len = skb->len;
struct Qdisc *sch = child->__parent;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = q->rx_class;
q->rx_class = NULL;
static __inline__ struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
struct cbq_class *this_cl = cl;
if (cl->tparent == NULL)
static __inline__ struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl_tail, *cl_prev, *cl;
struct sk_buff *skb;
int deficit;
static __inline__ struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct sk_buff *skb;
unsigned activemask;
cbq_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
psched_time_t now;
psched_tdiff_t incr;
static void cbq_sync_defmap(struct cbq_class *cl)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
struct cbq_class *split = cl->split;
unsigned h;
int i;
static void cbq_unlink_class(struct cbq_class *this)
{
struct cbq_class *cl, **clp;
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) {
if (cl == this) {
static void cbq_link_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
unsigned h = cbq_hash(this->classid);
struct cbq_class *parent = this->tparent;
static unsigned int cbq_drop(struct Qdisc* sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl, *cl_head;
int prio;
unsigned int len;
static void
cbq_reset(struct Qdisc* sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl;
int prio;
unsigned h;
static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
if (wrr->allot)
cl->allot = wrr->allot;
static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct rtattr *tb[TCA_CBQ_MAX];
struct tc_ratespec *r;
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
unsigned char *b = skb->tail;
struct rtattr *rta;
static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
static void
cbq_destroy(struct Qdisc* sch)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl;
unsigned h;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
unsigned long *arg)
{
int err;
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)*arg;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct rtattr *tb[TCA_CBQ_MAX];
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
if (cl->filters || cl->children || cl == &q->link)
static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class *)arg;
if (cl == NULL)
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *p = (struct cbq_class*)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct cbq_sched_data *q = qdisc_priv(sch);
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
unsigned h;
if (arg->stop)
#endif
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct dsmark_qdisc_data *) (sch)->data)
/*
tcf_destroy(tp);
}
qdisc_destroy(p->q);
+ p->q = &noop_qdisc;
kfree(p->mask);
}
static int
bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
if (sch->stats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb);
static int
pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
if (sch->q.qlen < q->limit) {
__skb_queue_tail(&sch->q, skb);
static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (void*)sch->data;
if (opt == NULL) {
unsigned int limit = sch->dev->tx_queue_len ? : 1;
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct fifo_sched_data *q = qdisc_priv(sch);
+ struct fifo_sched_data *q = (void*)sch->data;
unsigned char *b = skb->tail;
struct tc_fifo_qopt opt;
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
-#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
The idea is the following:
- enqueue, dequeue are serialized via top level device
spinlock dev->queue_lock.
- - tree walking is protected by read_lock_bh(qdisc_tree_lock)
+ - tree walking is protected by read_lock(qdisc_tree_lock)
and this lock is used only in process context.
- - updates to tree are made under rtnl semaphore or
- from softirq context (__qdisc_destroy rcu-callback)
- hence this lock needs local bh disabling.
+ - updates to tree are made only under rtnl semaphore,
+ hence this lock may be made without local bh disabling.
qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
*/
rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
-void qdisc_lock_tree(struct net_device *dev)
-{
- write_lock_bh(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
-}
-
-void qdisc_unlock_tree(struct net_device *dev)
-{
- spin_unlock_bh(&dev->queue_lock);
- write_unlock_bh(&qdisc_tree_lock);
-}
-
/*
dev->queue_lock serializes queue accesses for this device
AND dev->qdisc pointer itself.
static int
pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list;
- list += prio2band[skb->priority&TC_PRIO_MAX];
+ list = ((struct sk_buff_head*)qdisc->data) +
+ prio2band[skb->priority&TC_PRIO_MAX];
if (list->qlen < qdisc->dev->tx_queue_len) {
__skb_queue_tail(list, skb);
pfifo_fast_dequeue(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
struct sk_buff *skb;
for (prio = 0; prio < 3; prio++, list++) {
static int
pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list;
- list += prio2band[skb->priority&TC_PRIO_MAX];
+ list = ((struct sk_buff_head*)qdisc->data) +
+ prio2band[skb->priority&TC_PRIO_MAX];
__skb_queue_head(list, skb);
qdisc->q.qlen++;
pfifo_fast_reset(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
for (prio=0; prio < 3; prio++)
skb_queue_purge(list+prio);
static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
{
int i;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct sk_buff_head *list;
+
+ list = ((struct sk_buff_head*)qdisc->data);
for (i=0; i<3; i++)
skb_queue_head_init(list+i);
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
{
- void *p;
struct Qdisc *sch;
- int size;
-
- /* ensure that the Qdisc and the private data are 32-byte aligned */
- size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST);
- size += ops->priv_size + QDISC_ALIGN_CONST;
+ int size = sizeof(*sch) + ops->priv_size;
- p = kmalloc(size, GFP_KERNEL);
- if (!p)
+ sch = kmalloc(size, GFP_KERNEL);
+ if (!sch)
return NULL;
- memset(p, 0, size);
+ memset(sch, 0, size);
- sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST)
- & ~QDISC_ALIGN_CONST);
- sch->padded = (char *)sch - (char *)p;
-
- INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
- dev_hold(dev);
sch->stats_lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1);
/* enqueue is accessed locklessly - make sure it's visible
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
- kfree(p);
+ kfree(sch);
return NULL;
}
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&qdisc->stats);
#endif
- write_lock(&qdisc_tree_lock);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
- write_unlock(&qdisc_tree_lock);
module_put(ops->owner);
- dev_put(qdisc->dev);
if (!(qdisc->flags&TCQ_F_BUILTIN))
- kfree((char *) qdisc - qdisc->padded);
+ kfree(qdisc);
}
/* Under dev->queue_lock and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
+ struct net_device *dev = qdisc->dev;
+
if (!atomic_dec_and_test(&qdisc->refcnt))
return;
- list_del(&qdisc->list);
+
+ if (dev) {
+ struct Qdisc *q, **qp;
+ for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
+ if (q == qdisc) {
+ *qp = q->next;
+ break;
+ }
+ }
+ }
+
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
+
}
+
void dev_activate(struct net_device *dev)
{
/* No queueing discipline is attached to device;
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- write_lock_bh(&qdisc_tree_lock);
- list_add_tail(&qdisc->list, &dev->qdisc_list);
- write_unlock_bh(&qdisc_tree_lock);
+
+ write_lock(&qdisc_tree_lock);
+ qdisc->next = dev->qdisc_list;
+ dev->qdisc_list = qdisc;
+ write_unlock(&qdisc_tree_lock);
+
} else {
qdisc = &noqueue_qdisc;
}
- write_lock_bh(&qdisc_tree_lock);
+ write_lock(&qdisc_tree_lock);
dev->qdisc_sleeping = qdisc;
- write_unlock_bh(&qdisc_tree_lock);
+ write_unlock(&qdisc_tree_lock);
}
spin_lock_bh(&dev->queue_lock);
void dev_init_scheduler(struct net_device *dev)
{
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
dev->qdisc = &noop_qdisc;
+ spin_unlock_bh(&dev->queue_lock);
dev->qdisc_sleeping = &noop_qdisc;
- INIT_LIST_HEAD(&dev->qdisc_list);
- qdisc_unlock_tree(dev);
+ dev->qdisc_list = NULL;
+ write_unlock(&qdisc_tree_lock);
dev_watchdog_init(dev);
}
{
struct Qdisc *qdisc;
- qdisc_lock_tree(dev);
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
qdisc = dev->qdisc_sleeping;
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
qdisc_destroy(qdisc);
}
#endif
+ BUG_TRAP(dev->qdisc_list == NULL);
BUG_TRAP(!timer_pending(&dev->watchdog_timer));
- qdisc_unlock_tree(dev);
+ dev->qdisc_list = NULL;
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
}
EXPORT_SYMBOL(__netdev_watchdog_up);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_restart);
-EXPORT_SYMBOL(qdisc_lock_tree);
-EXPORT_SYMBOL(qdisc_unlock_tree);
+EXPORT_SYMBOL(qdisc_tree_lock);
{
psched_time_t now;
struct gred_sched_data *q=NULL;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
unsigned long qave=0;
int i=0;
gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
q= t->tab[(skb->tc_index&0xf)];
/* error checking here -- probably unnecessary */
PSCHED_SET_PASTPERFECT(q->qidlestart);
{
struct sk_buff *skb;
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
skb = __skb_dequeue(&sch->q);
if (skb) {
struct sk_buff *skb;
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
{
int i;
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t= (struct gred_sched *)sch->data;
__skb_queue_purge(&sch->q);
static int gred_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
struct gred_sched_data *q;
struct tc_gred_qopt *ctl;
struct tc_gred_sopt *sopt;
static int gred_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
struct tc_gred_sopt *sopt;
struct rtattr *tb[TCA_GRED_STAB];
struct rtattr *tb2[TCA_GRED_DPS];
struct rtattr *rta;
struct tc_gred_qopt *opt = NULL ;
struct tc_gred_qopt *dst;
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
struct gred_sched_data *q;
int i;
unsigned char *b = skb->tail;
static void gred_destroy(struct Qdisc *sch)
{
- struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched *table = (struct gred_sched *)sch->data;
int i;
for (i = 0;i < table->DPs; i++) {
/*
* macros
*/
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#include <linux/time.h>
#undef PSCHED_GET_TIME
#define PSCHED_GET_TIME(stamp) \
* ism: (psched_us/byte) << ISM_SHIFT
* dx: psched_us
*
- * Clock source resolution (CONFIG_NET_SCH_CLK_*)
- * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
- * CPU: resolution is between 0.5us and 1us.
- * GETTIMEOFDAY: resolution is exactly 1us.
+ * Time source resolution
+ * PSCHED_JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
+ * PSCHED_CPU: resolution is between 0.5us and 1us.
+ * PSCHED_GETTIMEOFDAY: resolution is exactly 1us.
*
* sm and ism are scaled in order to keep effective digits.
* SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
static inline struct hfsc_class *
hfsc_find_class(u32 classid, struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct rtattr **tca, unsigned long *arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = (struct hfsc_class *)*arg;
struct hfsc_class *parent = NULL;
struct rtattr *opt = tca[TCA_OPTIONS-1];
static void
hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
hfsc_destroy_filters(&cl->filter_list);
qdisc_destroy(cl->qdisc);
static int
hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
static struct tcf_proto **
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl == NULL)
static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
u64 next_time = 0;
long delay;
static int
hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct tc_hfsc_qopt *qopt;
unsigned int i;
static int
hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct tc_hfsc_qopt *qopt;
if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
static void
hfsc_reset_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_destroy_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl, *next;
unsigned int i;
static int
hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
unsigned char *b = skb->tail;
struct tc_hfsc_qopt qopt;
static struct sk_buff *
hfsc_dequeue(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
struct sk_buff *skb;
u64 cur_time;
static int
hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
static unsigned int
hfsc_drop(struct Qdisc *sch)
{
- struct hfsc_sched *q = qdisc_priv(sch);
+ struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
struct hfsc_class *cl;
unsigned int len;
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
-#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
+#define HTB_VER 0x30010 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
struct htb_class_inner {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
- /* When class changes from state 1->2 and disconnects from
- parent's feed then we lost ptr value and start from the
- first child again. Here we store classid of the
- last valid ptr (used when ptr is NULL). */
- u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
int row_mask[TC_HTB_MAXDEPTH];
struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
- u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
/* self wait list - roots of wait PQs per row */
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* find class in global hash table using given handle */
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct list_head *p;
if (TC_H_MAJ(handle) != sch->handle)
return NULL;
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.ptr[prio] == cl->node+prio) {
- /* we are removing child which is pointed to from
- parent feed - forget the pointer but remember
- classid */
- p->un.inner.last_ptr_id[prio] = cl->classid;
- p->un.inner.ptr[prio] = NULL;
- }
+ if (p->un.inner.ptr[prio] == cl->node+prio)
+ htb_next_rb_node(p->un.inner.ptr + prio);
htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret = NET_XMIT_SUCCESS;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_classify(skb,sch,&ret);
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int ret = NET_XMIT_SUCCESS;
struct htb_class *cl = htb_classify(skb,sch, &ret);
struct sk_buff *tskb;
static void htb_rate_timer(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct list_head *p;
/* lock queue so that we can muck with it */
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
- q->now.tv_sec * 1000000ULL + q->now.tv_usec,
- cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
-#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
-#endif
q->jiffies);
diff = 1000;
}
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
- q->now.tv_sec * 1000000ULL + q->now.tv_usec,
- cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
-#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
-#endif
q->jiffies);
diff = 1000;
}
return HZ/10;
}
-/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
- is no such one exists. */
-static struct rb_node *
-htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
-{
- struct rb_node *r = NULL;
- while (n) {
- struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
- if (id == cl->classid) return n;
-
- if (id > cl->classid) {
- n = n->rb_right;
- } else {
- r = n;
- n = n->rb_left;
- }
- }
- return r;
-}
-
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to.
*/
static struct htb_class *
-htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
+htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr)
{
int i;
struct {
struct rb_node *root;
struct rb_node **pptr;
- u32 *pid;
} stk[TC_HTB_MAXDEPTH],*sp = stk;
BUG_TRAP(tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
- sp->pid = pid;
for (i = 0; i < 65535; i++) {
- HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
-
- if (!*sp->pptr && *sp->pid) {
- /* ptr was invalidated but id is valid - try to recover
- the original or next ptr */
- *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
- }
- *sp->pid = 0; /* ptr is valid now so that remove this hint as it
- can become out of date quickly */
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
return cl;
(++sp)->root = cl->un.inner.feed[prio].rb_node;
sp->pptr = cl->un.inner.ptr+prio;
- sp->pid = cl->un.inner.last_ptr_id+prio;
}
}
BUG_TRAP(0);
struct sk_buff *skb = NULL;
struct htb_class *cl,*start;
/* look initial class up in the row */
- start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
- q->ptr[level]+prio,q->last_ptr_id[level]+prio);
+ start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
do {
next:
if ((q->row_mask[level] & (1 << prio)) == 0)
return NULL;
- next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
- prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
-
+ next = htb_lookup_leaf (q->row[level]+prio,
+ prio,q->ptr[level]+prio);
if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
}
q->nwc_hit++;
htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
- cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
- q->last_ptr_id[level]+prio);
-
+ cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
} while (cl != start);
if (likely(skb != NULL)) {
static void htb_delay_by(struct Qdisc *sch,long delay)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
if (delay <= 0) delay = 1;
if (unlikely(delay > 5*HZ)) {
if (net_ratelimit())
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int level;
long min_delay;
#ifdef HTB_DEBUG
/* try to drop from each class (by prio) until one succeed */
static unsigned int htb_drop(struct Qdisc* sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc* sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int i;
HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
static int htb_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched*)sch->data;
struct rtattr *tb[TCA_HTB_INIT];
struct tc_htb_glob *gopt;
int i;
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched*)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_htb_glob gopt;
struct sk_buff *skb, struct tcmsg *tcm)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched*)sch->data;
#endif
struct htb_class *cl = (struct htb_class*)arg;
unsigned char *b = skb->tail;
sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
if (cl->prio_activity)
- htb_deactivate (qdisc_priv(sch),cl);
+ htb_deactivate ((struct htb_sched*)sch->data,cl);
/* TODO: is it correct ? Why CBQ doesn't do it ? */
sch->q.qlen -= (*old)->q.qlen;
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
#endif
struct htb_class *cl = htb_find(classid,sch);
HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
if (!cl->level) {
BUG_TRAP(cl->un.leaf.q);
/* always caled under BH & queue lock */
static void htb_destroy(struct Qdisc* sch)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
HTB_DBG(0,1,"htb_destroy q=%p\n",q);
del_timer_sync (&q->timer);
static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
static void htb_put(struct Qdisc *sch, unsigned long arg)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
#endif
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
u32 parentid, struct rtattr **tca, unsigned long *arg)
{
int err = -EINVAL;
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class*)*arg,*parent;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class *)arg;
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = htb_find (classid,sch);
HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
/*if (cl && !cl->level) return 0;
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
struct htb_class *cl = (struct htb_class *)arg;
HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
if (cl)
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct htb_sched *q = qdisc_priv(sch);
+ struct htb_sched *q = (struct htb_sched *)sch->data;
int i;
if (arg->stop)
#endif
-#define PRIV(sch) qdisc_priv(sch)
+#define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data)
/* Thanks to Doron Oz for this hack
*/
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
psched_time_t now;
long delay;
PSCHED_TADD2(now, delay, cb->time_to_send);
/* Always queue at tail to keep packets in order */
- if (likely(q->delayed.qlen < q->limit)) {
- __skb_queue_tail(&q->delayed, skb);
- sch->q.qlen++;
- sch->stats.bytes += skb->len;
- sch->stats.packets++;
- return 0;
- }
-
- sch->stats.drops++;
- kfree_skb(skb);
- return NET_XMIT_DROP;
+ __skb_queue_tail(&q->delayed, skb);
+ sch->q.qlen++;
+ sch->stats.bytes += skb->len;
+ sch->stats.packets++;
+ return 0;
}
/* Requeue packets but don't change time stamp */
static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
static unsigned int netem_drop(struct Qdisc* sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
unsigned int len;
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
*/
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
struct sk_buff *skb;
psched_time_t now;
static void netem_reset(struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
qdisc_reset(q->qdisc);
skb_queue_purge(&q->delayed);
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
struct tc_netem_qopt *qopt = RTA_DATA(opt);
struct Qdisc *child;
int ret;
static int netem_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
if (!opt)
return -EINVAL;
static void netem_destroy(struct Qdisc *sch)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
del_timer_sync(&q->timer);
- qdisc_destroy(q->qdisc);
}
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct netem_sched_data *q = qdisc_priv(sch);
+ struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_netem_qopt qopt;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
- qopt.limit = q->limit;
+ qopt.limit = sch->dev->tx_queue_len;
qopt.loss = q->loss;
qopt.gap = q->gap;
return -1;
}
-static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
-
- if (cl != 1) /* only one class */
- return -ENOENT;
-
- tcm->tcm_handle |= TC_H_MIN(1);
- tcm->tcm_info = q->qdisc->handle;
-
- return 0;
-}
-
-static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
-
- if (new == NULL)
- new = &noop_qdisc;
-
- sch_tree_lock(sch);
- *old = xchg(&q->qdisc, new);
- qdisc_reset(*old);
- sch->q.qlen = 0;
- sch_tree_unlock(sch);
-
- return 0;
-}
-
-static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
-{
- struct netem_sched_data *q = qdisc_priv(sch);
- return q->qdisc;
-}
-
-static unsigned long netem_get(struct Qdisc *sch, u32 classid)
-{
- return 1;
-}
-
-static void netem_put(struct Qdisc *sch, unsigned long arg)
-{
-}
-
-static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct rtattr **tca, unsigned long *arg)
-{
- return -ENOSYS;
-}
-
-static int netem_delete(struct Qdisc *sch, unsigned long arg)
-{
- return -ENOSYS;
-}
-
-static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
- if (!walker->stop) {
- if (walker->count >= walker->skip)
- if (walker->fn(sch, 1, walker) < 0) {
- walker->stop = 1;
- return;
- }
- walker->count++;
- }
-}
-
-static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
- return NULL;
-}
-
-static struct Qdisc_class_ops netem_class_ops = {
- .graft = netem_graft,
- .leaf = netem_leaf,
- .get = netem_get,
- .put = netem_put,
- .change = netem_change_class,
- .delete = netem_delete,
- .walk = netem_walk,
- .tcf_chain = netem_find_tcf,
- .dump = netem_dump_class,
-};
-
static struct Qdisc_ops netem_qdisc_ops = {
.id = "netem",
- .cl_ops = &netem_class_ops,
.priv_size = sizeof(struct netem_sched_data),
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
struct Qdisc *prio_classify(struct sk_buff *skb, struct Qdisc *sch,int *r)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
u32 band = skb->priority;
struct tcf_result res;
prio_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int prio;
struct Qdisc *qdisc;
static unsigned int prio_drop(struct Qdisc* sch)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int prio;
unsigned int len;
struct Qdisc *qdisc;
prio_reset(struct Qdisc* sch)
{
int prio;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
for (prio=0; prio<q->bands; prio++)
qdisc_reset(q->queues[prio]);
prio_destroy(struct Qdisc* sch)
{
int prio;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
struct tcf_proto *tp;
while ((tp = q->filter_list) != NULL) {
tcf_destroy(tp);
}
- for (prio=0; prio<q->bands; prio++)
+ for (prio=0; prio<q->bands; prio++) {
qdisc_destroy(q->queues[prio]);
+ q->queues[prio] = &noop_qdisc;
+ }
}
static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
struct tc_prio_qopt *qopt = RTA_DATA(opt);
int i;
static int prio_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int i;
for (i=0; i<TCQ_PRIO_BANDS; i++)
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_prio_qopt opt;
static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned long band = arg - 1;
if (band >= q->bands)
static struct Qdisc *
prio_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned long band = arg - 1;
if (band >= q->bands)
static unsigned long prio_get(struct Qdisc *sch, u32 classid)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
unsigned long band = TC_H_MIN(classid);
if (band - 1 >= q->bands)
static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg)
{
unsigned long cl = *arg;
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
static int prio_delete(struct Qdisc *sch, unsigned long cl)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
return 0;
static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
struct tcmsg *tcm)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
int prio;
if (arg->stop)
static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
- struct prio_sched_data *q = qdisc_priv(sch);
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl)
return NULL;
static int
red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
psched_time_t now;
static int
red_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
PSCHED_SET_PASTPERFECT(q->qidlestart);
red_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
skb = __skb_dequeue(&sch->q);
if (skb) {
static unsigned int red_drop(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
static void red_reset(struct Qdisc* sch)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
__skb_queue_purge(&sch->q);
sch->stats.backlog = 0;
static int red_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
struct rtattr *tb[TCA_RED_STAB];
struct tc_red_qopt *ctl;
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct red_sched_data *q = qdisc_priv(sch);
+ struct red_sched_data *q = (struct red_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_red_qopt opt;
return -1;
}
+static void red_destroy(struct Qdisc *sch)
+{
+}
+
static struct Qdisc_ops red_qdisc_ops = {
.next = NULL,
.cl_ops = NULL,
.drop = red_drop,
.init = red_init,
.reset = red_reset,
+ .destroy = red_destroy,
.change = red_change,
.dump = red_dump,
.owner = THIS_MODULE,
static unsigned int sfq_drop(struct Qdisc *sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
sfq_index d = q->max_depth;
struct sk_buff *skb;
unsigned int len;
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
unsigned hash = sfq_hash(q, skb);
sfq_index x;
static int
sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
unsigned hash = sfq_hash(q, skb);
sfq_index x;
static struct sk_buff *
sfq_dequeue(struct Qdisc* sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
struct sk_buff *skb;
sfq_index a, old_a;
static void sfq_perturbation(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
q->perturbation = net_random()&0x1F;
q->perturb_timer.expires = jiffies + q->perturb_period;
static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
struct tc_sfq_qopt *ctl = RTA_DATA(opt);
if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
int i;
init_timer(&q->perturb_timer);
static void sfq_destroy(struct Qdisc *sch)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
del_timer(&q->perturb_timer);
}
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct tc_sfq_qopt opt;
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
int ret;
if (skb->len > q->max_size) {
static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
static unsigned int tbf_drop(struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
unsigned int len;
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
struct sk_buff *skb;
skb = q->qdisc->dequeue(q->qdisc);
static void tbf_reset(struct Qdisc* sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
{
int err = -EINVAL;
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
struct rtattr *tb[TCA_TBF_PTAB];
struct tc_tbf_qopt *qopt;
struct qdisc_rate_table *rtab = NULL;
static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
if (opt == NULL)
return -EINVAL;
static void tbf_destroy(struct Qdisc *sch)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
del_timer(&q->wd_timer);
qdisc_put_rtab(q->R_tab);
qdisc_destroy(q->qdisc);
+ q->qdisc = &noop_qdisc;
}
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_tbf_qopt opt;
static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data*)sch->data;
if (cl != 1) /* only one class */
return -ENOENT;
static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
if (new == NULL)
new = &noop_qdisc;
static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct tbf_sched_data *q = qdisc_priv(sch);
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
return q->qdisc;
}
struct sk_buff_head q;
};
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data*)((q)->data))->next)
#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT|IFF_BROADCAST)
teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct net_device *dev = sch->dev;
- struct teql_sched_data *q = qdisc_priv(sch);
+ struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
__skb_queue_tail(&q->q, skb);
if (q->q.qlen <= dev->tx_queue_len) {
static int
teql_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct teql_sched_data *q = qdisc_priv(sch);
+ struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
__skb_queue_head(&q->q, skb);
return 0;
static struct sk_buff *
teql_dequeue(struct Qdisc* sch)
{
- struct teql_sched_data *dat = qdisc_priv(sch);
+ struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
struct sk_buff *skb;
skb = __skb_dequeue(&dat->q);
static void
teql_reset(struct Qdisc* sch)
{
- struct teql_sched_data *dat = qdisc_priv(sch);
+ struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
skb_queue_purge(&dat->q);
sch->q.qlen = 0;
teql_destroy(struct Qdisc* sch)
{
struct Qdisc *q, *prev;
- struct teql_sched_data *dat = qdisc_priv(sch);
+ struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
struct teql_master *master = dat->m;
if ((prev = master->slaves) != NULL) {
{
struct net_device *dev = sch->dev;
struct teql_master *m = (struct teql_master*)sch->ops;
- struct teql_sched_data *q = qdisc_priv(sch);
+ struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
if (dev->hard_header_len > m->dev->hard_header_len)
return -EINVAL;
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
{
- struct teql_sched_data *q = qdisc_priv(dev->qdisc);
+ struct teql_sched_data *q = (void*)dev->qdisc->data;
struct neighbour *mn = skb->dst->neighbour;
struct neighbour *n = q->ncache;
config IP_SCTP
tristate "The SCTP Protocol (EXPERIMENTAL)"
depends on IPV6 || IPV6=n
- select CRYPTO if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
- select CRYPTO_HMAC if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
- select CRYPTO_SHA1 if SCTP_HMAC_SHA1
- select CRYPTO_MD5 if SCTP_HMAC_MD5
---help---
Stream Control Transmission Protocol
config SCTP_HMAC_SHA1
bool "HMAC-SHA1"
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
help
Enable the use of HMAC-SHA1 during association establishment. It
is advised to use either HMAC-MD5 or HMAC-SHA1.
config SCTP_HMAC_MD5
bool "HMAC-MD5"
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_MD5
help
Enable the use of HMAC-MD5 during association establishment. It is
advised to use either HMAC-MD5 or HMAC-SHA1.
if (sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
- SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+ SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
- case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu)))
offset = 0;
if ((whole > 1) || (whole && over))
- SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
+ SCTP_INC_STATS_USER(SctpFragUsrMsgs);
/* Create chunks for all the full sized DATA chunks. */
for (i=0, len=first_len; i < whole; i++) {
if (asoc && sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
- SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+ SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
if (val != cmp) {
/* CRC failure, dump it. */
- SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
+ SCTP_INC_STATS_BH(SctpChecksumErrors);
return -1;
}
return 0;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
- SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
+ SCTP_INC_STATS_BH(SctpInSCTPPacks);
sh = (struct sctphdr *) skb->h.raw;
if (!asoc) {
ep = __sctp_rcv_lookup_endpoint(&dest);
if (sctp_rcv_ootb(skb)) {
- SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
+ SCTP_INC_STATS_BH(SctpOutOfBlues);
goto discard_release;
}
}
if (asoc) {
if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
}
sk = asoc->base.sk;
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
+ NET_INC_STATS_BH(LockDroppedIcmps);
*epp = ep;
*app = asoc;
int err;
if (skb->len < ((iph->ihl << 2) + 8)) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
/* Warning: The sock lock is held. Remember to call
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
- ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
+ ICMP6_INC_STATS_BH(idev, Icmp6InErrors);
goto out;
}
__FUNCTION__, skb, skb->len,
NIP6(fl.fl6_src), NIP6(fl.fl6_dst));
- SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+ SCTP_INC_STATS(SctpOutSCTPPacks);
return ip6_xmit(sk, skb, &fl, np->opt, ipfragok);
}
return err;
no_route:
kfree_skb(nskb);
- IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS_BH(OutNoRoutes);
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
sctp_outq_tail_data(q, chunk);
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
+ SCTP_INC_STATS(SctpOutUnorderChunks);
else
- SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
+ SCTP_INC_STATS(SctpOutOrderChunks);
q->empty = 0;
break;
};
} else {
__skb_queue_tail(&q->control, (struct sk_buff *) chunk);
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
}
if (error < 0)
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
- struct list_head *lchunk, *lchunk1;
+ struct list_head *lchunk;
struct sctp_transport *transport = pkt->transport;
sctp_xmit_t status;
- struct sctp_chunk *chunk, *chunk1;
+ struct sctp_chunk *chunk;
struct sctp_association *asoc;
int error = 0;
* the transmitted list.
*/
list_add_tail(lchunk, &transport->transmitted);
-
- /* Mark the chunk as ineligible for fast retransmit
- * after it is retransmitted.
- */
- chunk->fast_retransmit = 0;
-
*start_timer = 1;
q->empty = 0;
lchunk = sctp_list_dequeue(lqueue);
break;
};
-
- /* If we are here due to a retransmit timeout or a fast
- * retransmit and if there are any chunks left in the retransmit
- * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
- */
- if (rtx_timeout && !lchunk) {
- list_for_each(lchunk1, lqueue) {
- chunk1 = list_entry(lchunk1, struct sctp_chunk,
- transmitted_list);
- chunk1->fast_retransmit = 0;
- }
- }
}
return error;
if (ftsn_chunk) {
__skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
}
}
#include <linux/init.h>
#include <net/sctp/sctp.h>
-struct snmp_mib sctp_snmp_list[] = {
- SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
- SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
- SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
- SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS),
- SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS),
- SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES),
- SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS),
- SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS),
- SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS),
- SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS),
- SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS),
- SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS),
- SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS),
- SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS),
- SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS),
- SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS),
- SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS),
+static char *sctp_snmp_list[] = {
+#define SCTP_SNMP_ENTRY(x) #x
+ SCTP_SNMP_ENTRY(SctpCurrEstab),
+ SCTP_SNMP_ENTRY(SctpActiveEstabs),
+ SCTP_SNMP_ENTRY(SctpPassiveEstabs),
+ SCTP_SNMP_ENTRY(SctpAborteds),
+ SCTP_SNMP_ENTRY(SctpShutdowns),
+ SCTP_SNMP_ENTRY(SctpOutOfBlues),
+ SCTP_SNMP_ENTRY(SctpChecksumErrors),
+ SCTP_SNMP_ENTRY(SctpOutCtrlChunks),
+ SCTP_SNMP_ENTRY(SctpOutOrderChunks),
+ SCTP_SNMP_ENTRY(SctpOutUnorderChunks),
+ SCTP_SNMP_ENTRY(SctpInCtrlChunks),
+ SCTP_SNMP_ENTRY(SctpInOrderChunks),
+ SCTP_SNMP_ENTRY(SctpInUnorderChunks),
+ SCTP_SNMP_ENTRY(SctpFragUsrMsgs),
+ SCTP_SNMP_ENTRY(SctpReasmUsrMsgs),
+ SCTP_SNMP_ENTRY(SctpOutSCTPPacks),
+ SCTP_SNMP_ENTRY(SctpInSCTPPacks),
+#undef SCTP_SNMP_ENTRY
};
/* Return the current value of a particular entry in the mib by adding its
{
int i;
- for (i = 0; sctp_snmp_list[i].name != NULL; i++)
- seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
- fold_field((void **)sctp_statistics,
- sctp_snmp_list[i].entry));
+ for (i = 0; i < sizeof(sctp_snmp_list) / sizeof(char *); i++)
+ seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i],
+ fold_field((void **)sctp_statistics, i));
return 0;
}
NIPQUAD(((struct rtable *)skb->dst)->rt_src),
NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
- SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+ SCTP_INC_STATS(SctpOutSCTPPacks);
return ip_queue_xmit(skb, ipfragok);
}
if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
goto clean_up;
spin_lock_bh(&sctp_assocs_id_lock);
- error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1,
- &assoc_id);
+ error = idr_get_new(&sctp_assocs_id,
+ (void *)asoc,
+ &assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
if (error == -EAGAIN)
goto retry;
}
}
-/* Helper function to stop any pending T3-RTX timers */
-static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
- struct sctp_association *asoc)
-{
- struct sctp_transport *t;
- struct list_head *pos;
-
- list_for_each(pos, &asoc->peer.transport_addr_list) {
- t = list_entry(pos, struct sctp_transport, transports);
- if (timer_pending(&t->T3_rtx_timer) &&
- del_timer(&t->T3_rtx_timer)) {
- sctp_transport_put(t);
- }
- }
-}
-
-
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
return;
}
-/* Helper function to remove the association non-primary peer
- * transports.
- */
-static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
-{
- struct sctp_transport *t;
- struct list_head *pos;
- struct list_head *temp;
-
- list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
- t = list_entry(pos, struct sctp_transport, transports);
- if (!sctp_cmp_addr_exact(&t->ipaddr,
- &asoc->peer.primary_addr)) {
- sctp_assoc_del_peer(asoc, &t->ipaddr);
- }
- }
-
- return;
-}
-
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
if (cmd->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(cmd->obj.ptr));
-
- /* FIXME - Eventually come up with a cleaner way to
- * enabling COOKIE-ECHO + DATA bundling during
- * multihoming stale cookie scenarios, the following
- * command plays with asoc->peer.retran_path to
- * avoid the problem of sending the COOKIE-ECHO and
- * DATA in different paths, which could result
- * in the association being ABORTed if the DATA chunk
- * is processed first by the server. Checking the
- * init error counter simply causes this command
- * to be executed only during failed attempts of
- * association establishment.
- */
- if ((asoc->peer.retran_path !=
- asoc->peer.primary_path) &&
- (asoc->counters[SCTP_COUNTER_INIT_ERROR] > 0)) {
- sctp_add_cmd_sf(commands,
- SCTP_CMD_FORCE_PRIM_RETRAN,
- SCTP_NULL());
- }
-
break;
case SCTP_CMD_GEN_SHUTDOWN:
case SCTP_CMD_CLEAR_INIT_TAG:
asoc->peer.i.init_tag = 0;
break;
- case SCTP_CMD_DEL_NON_PRIMARY:
- sctp_cmd_del_non_primary(asoc);
- break;
- case SCTP_CMD_T3_RTX_TIMERS_STOP:
- sctp_cmd_t3_rtx_timers_stop(commands, asoc);
- break;
- case SCTP_CMD_FORCE_PRIM_RETRAN:
- t = asoc->peer.retran_path;
- asoc->peer.retran_path = asoc->peer.primary_path;
- error = sctp_outq_uncork(&asoc->outqueue);
- local_cork = 0;
- asoc->peer.retran_path = t;
- break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpShutdowns);
+ SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET,
+ SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
- SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
+ SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SctpPassiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->autoclose)
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- /* Reset init error count upon receipt of COOKIE-ACK,
- * to avoid problems with the managemement of this
- * counter in stale cookie situations when a transition back
- * from the COOKIE-ECHOED state to the COOKIE-WAIT
- * state is performed.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET,
- SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
-
/* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
- SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
+ SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SctpActiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
time_t stale;
sctp_cookie_preserve_param_t bht;
sctp_errhdr_t *err;
+ struct list_head *pos;
+ struct sctp_transport *t;
struct sctp_chunk *reply;
struct sctp_bind_addr *bp;
int attempts;
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
- /* Stop pending T3-rtx and heartbeat timers */
- sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
- sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
-
- /* Delete non-primary peer ip addresses since we are transitioning
- * back to the COOKIE-WAIT state
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
-
- /* If we've sent any data bundled with COOKIE-ECHO we will need to
- * resend
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
- SCTP_TRANSPORT(asoc->peer.primary_path));
-
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_INC,
SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
+ /* If we've sent any data bundled with COOKIE-ECHO we need to
+ * resend.
+ */
+ list_for_each(pos, &asoc->peer.transport_addr_list) {
+ t = list_entry(pos, struct sctp_transport, transports);
+ sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(t));
+ }
+
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- int error;
+ sctp_datahdr_t *data_hdr;
+ struct sctp_chunk *err;
+ size_t datalen;
+ sctp_verb_t deliver;
+ int tmp;
+ __u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
- error = sctp_eat_data(asoc, chunk, commands );
- switch (error) {
- case SCTP_IERROR_NO_ERROR:
- break;
- case SCTP_IERROR_HIGH_TSN:
- case SCTP_IERROR_BAD_STREAM:
+ data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
+
+ tsn = ntohl(data_hdr->tsn);
+ SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
+
+ /* ASSERT: Now skb->data is really the user data. */
+
+ /* Process ECN based congestion.
+ *
+ * Since the chunk structure is reused for all chunks within
+ * a packet, we use ecn_ce_done to track if we've already
+ * done CE processing for this packet.
+ *
+ * We need to do ECN processing even if we plan to discard the
+ * chunk later.
+ */
+
+ if (!chunk->ecn_ce_done) {
+ struct sctp_af *af;
+ chunk->ecn_ce_done = 1;
+
+ af = sctp_get_af_specific(
+ ipver2af(chunk->skb->nh.iph->version));
+
+ if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
+ /* Do real work as sideffect. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
+ SCTP_U32(tsn));
+ }
+ }
+
+ tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
+ if (tmp < 0) {
+ /* The TSN is too high--silently discard the chunk and
+ * count on it getting retransmitted later.
+ */
goto discard_noforce;
- case SCTP_IERROR_DUP_TSN:
- case SCTP_IERROR_IGNORE_TSN:
+ } else if (tmp > 0) {
+ /* This is a duplicate. Record it. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
goto discard_force;
- case SCTP_IERROR_NO_DATA:
- goto consume;
- default:
- BUG();
}
+ /* This is a new TSN. */
+
+ /* Discard if there is no room in the receive window.
+ * Actually, allow a little bit of overflow (up to a MTU).
+ */
+ datalen = ntohs(chunk->chunk_hdr->length);
+ datalen -= sizeof(sctp_data_chunk_t);
+
+ deliver = SCTP_CMD_CHUNK_ULP;
+
+ /* Think about partial delivery. */
+ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
+
+ /* Even if we don't accept this chunk there is
+ * memory pressure.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
+ }
+
+ /* Spill over rwnd a little bit. Note: While allowed, this spill over
+ * seems a bit troublesome in that frag_point varies based on
+ * PMTU. In cases, such as loopback, this might be a rather
+ * large spill over.
+ */
+ if (!asoc->rwnd || asoc->rwnd_over ||
+ (datalen > asoc->rwnd + asoc->frag_point)) {
+
+ /* If this is the next TSN, consider reneging to make
+ * room. Note: Playing nice with a confused sender. A
+ * malicious sender can still eat up all our buffer
+ * space and in the future we may want to detect and
+ * do more drastic reneging.
+ */
+ if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
+ (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
+ SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
+ deliver = SCTP_CMD_RENEGE;
+ } else {
+ SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
+ "rwnd: %d\n", tsn, datalen,
+ asoc->rwnd);
+ goto discard_force;
+ }
+ }
+
+ /*
+ * Section 3.3.10.9 No User Data (9)
+ *
+ * Cause of error
+ * ---------------
+ * No User Data: This error cause is returned to the originator of a
+ * DATA chunk if a received DATA chunk has no user data.
+ */
+ if (unlikely(0 == datalen)) {
+ err = sctp_make_abort_no_data(asoc, chunk, tsn);
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_U32(SCTP_ERROR_NO_DATA));
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
+ return SCTP_DISPOSITION_CONSUME;
+ }
+
+ /* If definately accepting the DATA chunk, record its TSN, otherwise
+ * wait for renege processing.
+ */
+ if (SCTP_CMD_CHUNK_ULP == deliver)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+
+ /* Note: Some chunks may get overcounted (if we drop) or overcounted
+ * if we renege and the chunk arrives again.
+ */
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * If an endpoint receive a DATA chunk with an invalid stream
+ * identifier, it shall acknowledge the reception of the DATA chunk
+ * following the normal procedure, immediately send an ERROR chunk
+ * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
+ * and discard the DATA chunk.
+ */
+ if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
+ err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
+ &data_hdr->stream,
+ sizeof(data_hdr->stream));
+ if (err)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ goto discard_noforce;
+ }
+
+ /* Send the data up to the user. Note: Schedule the
+ * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
+ * chunk needs the updated rwnd.
+ */
+ sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
+
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
}
return SCTP_DISPOSITION_DISCARD;
-consume:
- return SCTP_DISPOSITION_CONSUME;
-
}
/*
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- int error;
+ sctp_datahdr_t *data_hdr;
+ struct sctp_chunk *err;
+ size_t datalen;
+ int tmp;
+ __u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
- error = sctp_eat_data(asoc, chunk, commands );
- switch (error) {
- case SCTP_IERROR_NO_ERROR:
- case SCTP_IERROR_HIGH_TSN:
- case SCTP_IERROR_DUP_TSN:
- case SCTP_IERROR_IGNORE_TSN:
- case SCTP_IERROR_BAD_STREAM:
- break;
- case SCTP_IERROR_NO_DATA:
- goto consume;
- default:
- BUG();
+ data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *) chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
+
+ tsn = ntohl(data_hdr->tsn);
+
+ SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
+
+ /* ASSERT: Now skb->data is really the user data. */
+
+ /* Process ECN based congestion.
+ *
+ * Since the chunk structure is reused for all chunks within
+ * a packet, we use ecn_ce_done to track if we've already
+ * done CE processing for this packet.
+ *
+ * We need to do ECN processing even if we plan to discard the
+ * chunk later.
+ */
+ if (!chunk->ecn_ce_done) {
+ struct sctp_af *af;
+ chunk->ecn_ce_done = 1;
+
+ af = sctp_get_af_specific(
+ ipver2af(chunk->skb->nh.iph->version));
+
+ if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
+ /* Do real work as sideffect. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
+ SCTP_U32(tsn));
+ }
}
- /* Go a head and force a SACK, since we are shutting down. */
+ tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
+ if (tmp < 0) {
+ /* The TSN is too high--silently discard the chunk and
+ * count on it getting retransmitted later.
+ */
+ goto gen_shutdown;
+ } else if (tmp > 0) {
+ /* This is a duplicate. Record it. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
+ goto gen_shutdown;
+ }
+
+ /* This is a new TSN. */
+
+ datalen = ntohs(chunk->chunk_hdr->length);
+ datalen -= sizeof(sctp_data_chunk_t);
+
+ /*
+ * Section 3.3.10.9 No User Data (9)
+ *
+ * Cause of error
+ * ---------------
+ * No User Data: This error cause is returned to the originator of a
+ * DATA chunk if a received DATA chunk has no user data.
+ */
+ if (unlikely(0 == datalen)) {
+ err = sctp_make_abort_no_data(asoc, chunk, tsn);
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_U32(SCTP_ERROR_NO_DATA));
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
+ return SCTP_DISPOSITION_CONSUME;
+ }
+
+ /* We are accepting this DATA chunk. */
+
+ /* Record the fact that we have received this TSN. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SctpInUnorderChunks);
+ else
+ SCTP_INC_STATS(SctpInOrderChunks);
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * If an endpoint receive a DATA chunk with an invalid stream
+ * identifier, it shall acknowledge the reception of the DATA chunk
+ * following the normal procedure, immediately send an ERROR chunk
+ * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
+ * and discard the DATA chunk.
+ */
+ if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
+ err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
+ &data_hdr->stream,
+ sizeof(data_hdr->stream));
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ }
+ /* Go a head and force a SACK, since we are shutting down. */
+gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
}
-
-consume:
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpShutdowns);
+ SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
__u8 *ch_end;
int ootb_shut_ack = 0;
- SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+ SCTP_INC_STATS(SctpOutOfBlues);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return retval;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+ SCTP_INC_STATS(SctpShutdowns);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SctpAborteds);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_ABORT;
}
num_blocks = ntohs(sack->num_gap_ack_blocks);
num_dup_tsns = ntohs(sack->num_dup_tsns);
len = sizeof(struct sctp_sackhdr);
- len += (num_blocks + num_dup_tsns) * sizeof(__u32);
+ len = (num_blocks + num_dup_tsns) * sizeof(__u32);
if (len > chunk->skb->len)
return NULL;
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ SCTP_INC_STATS(SctpOutCtrlChunks);
} else
sctp_chunk_free (err_chunk);
}
}
-
-
-/* Process a data chunk */
-int sctp_eat_data(const struct sctp_association *asoc,
- struct sctp_chunk *chunk,
- sctp_cmd_seq_t *commands)
-{
- sctp_datahdr_t *data_hdr;
- struct sctp_chunk *err;
- size_t datalen;
- sctp_verb_t deliver;
- int tmp;
- __u32 tsn;
-
- data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
- skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
-
- tsn = ntohl(data_hdr->tsn);
- SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
-
- /* ASSERT: Now skb->data is really the user data. */
-
- /* Process ECN based congestion.
- *
- * Since the chunk structure is reused for all chunks within
- * a packet, we use ecn_ce_done to track if we've already
- * done CE processing for this packet.
- *
- * We need to do ECN processing even if we plan to discard the
- * chunk later.
- */
-
- if (!chunk->ecn_ce_done) {
- struct sctp_af *af;
- chunk->ecn_ce_done = 1;
-
- af = sctp_get_af_specific(
- ipver2af(chunk->skb->nh.iph->version));
-
- if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
- /* Do real work as sideffect. */
- sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
- SCTP_U32(tsn));
- }
- }
-
- tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
- if (tmp < 0) {
- /* The TSN is too high--silently discard the chunk and
- * count on it getting retransmitted later.
- */
- return SCTP_IERROR_HIGH_TSN;
- } else if (tmp > 0) {
- /* This is a duplicate. Record it. */
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
- return SCTP_IERROR_DUP_TSN;
- }
-
- /* This is a new TSN. */
-
- /* Discard if there is no room in the receive window.
- * Actually, allow a little bit of overflow (up to a MTU).
- */
- datalen = ntohs(chunk->chunk_hdr->length);
- datalen -= sizeof(sctp_data_chunk_t);
-
- deliver = SCTP_CMD_CHUNK_ULP;
-
- /* Think about partial delivery. */
- if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
-
- /* Even if we don't accept this chunk there is
- * memory pressure.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
- }
-
- /* Spill over rwnd a little bit. Note: While allowed, this spill over
- * seems a bit troublesome in that frag_point varies based on
- * PMTU. In cases, such as loopback, this might be a rather
- * large spill over.
- */
- if (!asoc->rwnd || asoc->rwnd_over ||
- (datalen > asoc->rwnd + asoc->frag_point)) {
-
- /* If this is the next TSN, consider reneging to make
- * room. Note: Playing nice with a confused sender. A
- * malicious sender can still eat up all our buffer
- * space and in the future we may want to detect and
- * do more drastic reneging.
- */
- if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
- (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
- SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
- deliver = SCTP_CMD_RENEGE;
- } else {
- SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
- "rwnd: %d\n", tsn, datalen,
- asoc->rwnd);
- return SCTP_IERROR_IGNORE_TSN;
- }
- }
-
- /*
- * Section 3.3.10.9 No User Data (9)
- *
- * Cause of error
- * ---------------
- * No User Data: This error cause is returned to the originator of a
- * DATA chunk if a received DATA chunk has no user data.
- */
- if (unlikely(0 == datalen)) {
- err = sctp_make_abort_no_data(asoc, chunk, tsn);
- if (err) {
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- }
- /* We are going to ABORT, so we might as well stop
- * processing the rest of the chunks in the packet.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_U32(SCTP_ERROR_NO_DATA));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
- return SCTP_IERROR_NO_DATA;
- }
-
- /* If definately accepting the DATA chunk, record its TSN, otherwise
- * wait for renege processing.
- */
- if (SCTP_CMD_CHUNK_ULP == deliver)
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
-
- /* Note: Some chunks may get overcounted (if we drop) or overcounted
- * if we renege and the chunk arrives again.
- */
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
- else
- SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
-
- /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
- *
- * If an endpoint receive a DATA chunk with an invalid stream
- * identifier, it shall acknowledge the reception of the DATA chunk
- * following the normal procedure, immediately send an ERROR chunk
- * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
- * and discard the DATA chunk.
- */
- if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
- err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
- &data_hdr->stream,
- sizeof(data_hdr->stream));
- if (err)
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- return SCTP_IERROR_BAD_STREAM;
- }
-
- /* Send the data up to the user. Note: Schedule the
- * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
- * chunk needs the updated rwnd.
- */
- sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
-
- return SCTP_IERROR_NO_ERROR;
-}
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
- /*
- * API 7. Socket Options (setting the default value for the endpoint)
- * All options that support specific settings on an association by
- * filling in either an association id variable or a sockaddr_storage
- * SHOULD also support setting of the same value for the entire endpoint
- * (i.e. future associations). To accomplish this the following logic is
- * used when setting one of these options:
-
- * c) If neither the sockaddr_storage or association identification is
- * set i.e. the sockaddr_storage is set to all 0's (INADDR_ANY) and
- * the association identification is 0, the settings are a default
- * and to be applied to the endpoint (all future associations).
- */
-
- /* update default value for endpoint (all future associations) */
- if (!params.spp_assoc_id &&
- sctp_is_any(( union sctp_addr *)¶ms.spp_address)) {
- if (params.spp_hbinterval)
- sctp_sk(sk)->paddrparam.spp_hbinterval =
- params.spp_hbinterval;
- if (sctp_max_retrans_path)
- sctp_sk(sk)->paddrparam.spp_pathmaxrxt =
- params.spp_pathmaxrxt;
- return 0;
- }
-
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
- /* If no association id is specified retrieve the default value
- * for the endpoint that will be used for all future associations
- */
- if (!params.spp_assoc_id &&
- sctp_is_any(( union sctp_addr *)¶ms.spp_address)) {
- params.spp_hbinterval = sctp_sk(sk)->paddrparam.spp_hbinterval;
- params.spp_pathmaxrxt = sctp_sk(sk)->paddrparam.spp_pathmaxrxt;
-
- goto done;
- }
-
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
*/
params.spp_pathmaxrxt = trans->error_threshold;
-done:
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
};
event = sctp_skb2event(f_frag);
- SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
+ SCTP_INC_STATS(SctpReasmUsrMsgs);
return event;
}
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <net/tux.h>
#include <linux/wanrouter.h>
#include <linux/if_bridge.h>
#include <linux/init.h>
* in the operation structures but are done directly via the socketcall() multiplexor.
*/
-struct file_operations socket_file_ops = {
+static struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.aio_read = sock_aio_read,
* but we take care of internal coherence yet.
*/
-struct file * sock_map_file(struct socket *sock)
+int sock_map_fd(struct socket *sock)
{
- struct file *file;
+ int fd;
struct qstr this;
char name[32];
- file = get_empty_filp();
-
- if (!file)
- return ERR_PTR(-ENFILE);
-
- sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
- this.name = name;
- this.len = strlen(name);
- this.hash = SOCK_INODE(sock)->i_ino;
-
- file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
- if (!file->f_dentry) {
- put_filp(file);
- return ERR_PTR(-ENOMEM);
- }
- file->f_dentry->d_op = &sockfs_dentry_operations;
- d_add(file->f_dentry, SOCK_INODE(sock));
- file->f_vfsmnt = mntget(sock_mnt);
-file->f_mapping = file->f_dentry->d_inode->i_mapping;
-
- if (sock->file)
- BUG();
- sock->file = file;
- file->f_op = SOCK_INODE(sock)->i_fop = &socket_file_ops;
- file->f_mode = FMODE_READ | FMODE_WRITE;
- file->f_flags = O_RDWR;
- file->f_pos = 0;
-
- return file;
-}
-
-int sock_map_fd(struct socket *sock)
-{
- int fd;
- struct file *file;
-
/*
* Find a file descriptor suitable for return to the user.
*/
-
+
fd = get_unused_fd();
- if (fd < 0)
- return fd;
-
- file = sock_map_file(sock);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
+ if (fd >= 0) {
+ struct file *file = get_empty_filp();
+
+ if (!file) {
+ put_unused_fd(fd);
+ fd = -ENFILE;
+ goto out;
+ }
+
+ sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = SOCK_INODE(sock)->i_ino;
+
+ file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
+ if (!file->f_dentry) {
+ put_filp(file);
+ put_unused_fd(fd);
+ fd = -ENOMEM;
+ goto out;
+ }
+ file->f_dentry->d_op = &sockfs_dentry_operations;
+ d_add(file->f_dentry, SOCK_INODE(sock));
+ file->f_vfsmnt = mntget(sock_mnt);
+ file->f_mapping = file->f_dentry->d_inode->i_mapping;
+
+ sock->file = file;
+ file->f_op = SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ file->f_mode = 3;
+ file->f_flags = O_RDWR;
+ file->f_pos = 0;
+ fd_install(fd, file);
}
- fd_install(fd, file);
-
+
+out:
return fd;
}
else
vx_sock_fail(sock->sk, size);
}
- vxdprintk(VXD_CBIT(net, 7),
- "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d",
+ vxdprintk("__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d\n",
sock, sock->sk,
(sock->sk)?sock->sk->sk_nx_info:0,
(sock->sk)?sock->sk->sk_vx_info:0,
(sock->sk)?sock->sk->sk_xid:0,
- (unsigned int)size, len);
+ size, len);
return len;
}
return ret;
}
-int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t size)
-{
- mm_segment_t oldfs = get_fs();
- int result;
-
- set_fs(KERNEL_DS);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- msg->msg_iov = (struct iovec *)vec,
- msg->msg_iovlen = num;
- result = sock_sendmsg(sock, msg, size);
- set_fs(oldfs);
- return result;
-}
static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
if ((len >= 0) && sock->sk)
vx_sock_recv(sock->sk, len);
- vxdprintk(VXD_CBIT(net, 7),
- "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d",
+ vxdprintk("__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d\n",
sock, sock->sk,
(sock->sk)?sock->sk->sk_nx_info:0,
(sock->sk)?sock->sk->sk_vx_info:0,
(sock->sk)?sock->sk->sk_xid:0,
- (unsigned int)size, len);
+ size, len);
return len;
}
return ret;
}
-int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num,
- size_t size, int flags)
-{
- mm_segment_t oldfs = get_fs();
- int result;
-
- set_fs(KERNEL_DS);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- msg->msg_iov = (struct iovec *)vec,
- msg->msg_iovlen = num;
- result = sock_recvmsg(sock, msg, size, flags);
- set_fs(oldfs);
- return result;
-}
-
static void sock_aio_dtor(struct kiocb *iocb)
{
kfree(iocb->private);
struct socket *sock;
int flags;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
sock = SOCKET_I(file->f_dentry->d_inode);
flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
}
out:
- if (sock->sk != sk)
- BUG();
release_sock(sock->sk);
return 0;
}
#endif
}
-int tux_Dprintk;
-int tux_TDprintk;
-
-#ifdef CONFIG_TUX_MODULE
-
-asmlinkage long (*sys_tux_ptr) (unsigned int action, user_req_t *u_info) = NULL;
-
-struct module *tux_module = NULL;
-spinlock_t tux_module_lock = SPIN_LOCK_UNLOCKED;
-
-asmlinkage long sys_tux (unsigned int action, user_req_t *u_info)
-{
- int ret;
-
- if (current->tux_info)
- return sys_tux_ptr(action, u_info);
-
- ret = -ENOSYS;
- spin_lock(&tux_module_lock);
- if (!tux_module)
- goto out_unlock;
- if (!try_module_get(tux_module))
- goto out_unlock;
- spin_unlock(&tux_module_lock);
-
- if (!sys_tux_ptr)
- TUX_BUG();
- ret = sys_tux_ptr(action, u_info);
-
- spin_lock(&tux_module_lock);
- module_put(tux_module);
-out_unlock:
- spin_unlock(&tux_module_lock);
-
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(tux_module);
-EXPORT_SYMBOL_GPL(tux_module_lock);
-EXPORT_SYMBOL_GPL(sys_tux_ptr);
-
-EXPORT_SYMBOL_GPL(tux_Dprintk);
-EXPORT_SYMBOL_GPL(tux_TDprintk);
-
-#endif
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
EXPORT_SYMBOL(sock_unregister);
EXPORT_SYMBOL(sock_wake_async);
EXPORT_SYMBOL(sockfd_lookup);
-EXPORT_SYMBOL(kernel_sendmsg);
-EXPORT_SYMBOL(kernel_recvmsg);
struct rpc_cred *ret;
get_group_info(current->group_info);
- acred.uid = current->fsuid;
- acred.gid = current->fsgid;
- acred.xid = current->xid;
+ acred.uid = XIDINO_UID(current->fsuid, current->xid);
+ acred.gid = XIDINO_GID(current->fsgid, current->xid);
acred.group_info = current->group_info;
dprintk("RPC: looking up %s cred\n",
struct rpc_cred *ret;
get_group_info(current->group_info);
- acred.uid = current->fsuid;
- acred.gid = current->fsgid;
- acred.xid = current->xid;
+ acred.uid = XIDINO_UID(current->fsuid, current->xid);
+ acred.gid = XIDINO_GID(current->fsgid, current->xid);
acred.group_info = current->group_info;
dprintk("RPC: %4d looking up %s cred\n",
static ssize_t
gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char __user *dst, size_t buflen)
+ char *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len;
#define MSG_BUF_MAXSIZE 1024
static ssize_t
-gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
{
struct xdr_netobj obj = {
.len = mlen,
struct rpc_rqst *req = task->tk_rqstp;
u32 maj_stat = 0;
struct xdr_netobj mic;
- struct kvec iov;
+ struct iovec iov;
struct xdr_buf verf_buf;
u32 service;
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
u32 seq, qop_state;
- struct kvec iov;
+ struct iovec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
u32 flav,len;
u32 *integ_len = NULL;
struct xdr_netobj mic;
u32 offset, *q;
- struct kvec *iov;
+ struct iovec *iov;
u32 maj_stat = 0;
int status = -EIO;
}
static inline int
-svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
+svc_safe_getnetobj(struct iovec *argv, struct xdr_netobj *o)
{
int l;
}
static inline int
-svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
+svc_safe_putnetobj(struct iovec *resv, struct xdr_netobj *o)
{
u32 *p;
struct xdr_buf rpchdr;
struct xdr_netobj checksum;
u32 flavor = 0;
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec iov;
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec iov;
/* data to compute the checksum over: */
iov.iov_base = rpcstart;
struct xdr_buf verf_data;
struct xdr_netobj mic;
u32 *p;
- struct kvec iov;
+ struct iovec iov;
svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS));
xdr_seq = htonl(seq);
static int
svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec *resv = &rqstp->rq_res.head[0];
u32 crlen;
struct xdr_netobj tmpobj;
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
struct xdr_buf *resbuf = &rqstp->rq_res;
struct xdr_buf integ_buf;
struct xdr_netobj mic;
- struct kvec *resv;
+ struct iovec *resv;
u32 *p;
int integ_offset, integ_len;
int stat = -EINVAL;
struct unx_cred {
struct rpc_cred uc_base;
gid_t uc_gid;
- xid_t uc_xid;
uid_t uc_puid; /* process uid */
gid_t uc_pgid; /* process gid */
- xid_t uc_pxid; /* process xid */
gid_t uc_gids[NFS_NGROUPS];
};
#define uc_uid uc_base.cr_uid
if (flags & RPC_TASK_ROOTCREDS) {
cred->uc_uid = cred->uc_puid = 0;
cred->uc_gid = cred->uc_pgid = 0;
- cred->uc_xid = cred->uc_pxid = current->xid;
cred->uc_gids[0] = NOGROUP;
} else {
int groups = acred->group_info->ngroups;
cred->uc_uid = acred->uid;
cred->uc_gid = acred->gid;
- cred->uc_xid = acred->xid;
+// cred->uc_puid = XIDINO_UID(current->uid, current->xid);
+// cred->uc_pgid = XIDINO_GID(current->gid, current->xid);
cred->uc_puid = current->uid;
cred->uc_pgid = current->gid;
- cred->uc_pxid = current->xid;
for (i = 0; i < groups; i++)
cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
if (i < NFS_NGROUPS)
if (cred->uc_uid != acred->uid
|| cred->uc_gid != acred->gid
- || cred->uc_xid != acred->xid
- || cred->uc_puid != current->uid
- || cred->uc_pgid != current->gid
- || cred->uc_pxid != current->xid)
+ || cred->uc_puid != XIDINO_UID(current->uid, current->xid)
+ || cred->uc_pgid != XIDINO_GID(current->gid, current->xid))
return 0;
groups = acred->group_info->ngroups;
struct rpc_clnt *clnt = task->tk_client;
struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred;
u32 *base, *hold;
- int i, tagxid;
+ int i;
*p++ = htonl(RPC_AUTH_UNIX);
base = p++;
* Copy the UTS nodename captured when the client was created.
*/
p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
- tagxid = task->tk_client->cl_tagxid;
/* Note: we don't use real uid if it involves raising privilege */
if (ruid && cred->uc_puid != 0 && cred->uc_pgid != 0) {
- *p++ = htonl((u32) XIDINO_UID(tagxid,
- cred->uc_puid, cred->uc_pxid));
- *p++ = htonl((u32) XIDINO_GID(tagxid,
- cred->uc_pgid, cred->uc_pxid));
+ *p++ = htonl((u32) cred->uc_puid);
+ *p++ = htonl((u32) cred->uc_pgid);
} else {
- *p++ = htonl((u32) XIDINO_UID(tagxid,
- cred->uc_uid, cred->uc_xid));
- *p++ = htonl((u32) XIDINO_GID(tagxid,
- cred->uc_gid, cred->uc_xid));
+ *p++ = htonl((u32) cred->uc_uid);
+ *p++ = htonl((u32) cred->uc_gid);
}
hold = p++;
for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
int err;
+ if (ppos != &filp->f_pos)
+ return -ESPIPE;
+
if (count == 0)
return 0;
int err;
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ if (ppos != &filp->f_pos)
+ return -ESPIPE;
+
if (count == 0)
return 0;
if (count >= sizeof(write_buf))
{
struct cache_reader *rp = NULL;
- nonseekable_open(inode, filp);
if (filp->f_mode & FMODE_READ) {
struct cache_detail *cd = PDE(inode)->data;
}
static struct file_operations cache_flush_operations = {
- .open = nonseekable_open,
.read = read_flush,
.write = write_flush,
};
struct svc_program *progp;
struct svc_version *versp = NULL; /* compiler food */
struct svc_procedure *procp = NULL;
- struct kvec * argv = &rqstp->rq_arg.head[0];
- struct kvec * resv = &rqstp->rq_res.head[0];
+ struct iovec * argv = &rqstp->rq_arg.head[0];
+ struct iovec * resv = &rqstp->rq_res.head[0];
kxdrproc_t xdr;
u32 *statp;
u32 dir, prog, vers, proc,
static int
svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec *resv = &rqstp->rq_res.head[0];
int rv=0;
struct ip_map key, *ipm;
int
svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
+ struct iovec *argv = &rqstp->rq_arg.head[0];
+ struct iovec *resv = &rqstp->rq_res.head[0];
struct svc_cred *cred = &rqstp->rq_cred;
u32 slen, i;
int len = argv->iov_len;
* Generic recvfrom routine.
*/
static int
-svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
+svc_recvfrom(struct svc_rqst *rqstp, struct iovec *iov, int nr, int buflen)
{
+ mm_segment_t oldfs;
struct msghdr msg;
struct socket *sock;
int len, alen;
msg.msg_name = &rqstp->rq_addr;
msg.msg_namelen = sizeof(rqstp->rq_addr);
+ msg.msg_iov = iov;
+ msg.msg_iovlen = nr;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_DONTWAIT;
- len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+ len = sock_recvmsg(sock, &msg, buflen, MSG_DONTWAIT);
+ set_fs(oldfs);
/* sock_recvmsg doesn't fill in the name/namelen, so we must..
* possibly we should cache this in the svc_sock structure
struct svc_sock *svsk = rqstp->rq_sock;
struct svc_serv *serv = svsk->sk_server;
int len;
- struct kvec vec[RPCSVC_MAXPAGES];
+ struct iovec vec[RPCSVC_MAXPAGES];
int pnum, vlen;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
*/
if (svsk->sk_tcplen < 4) {
unsigned long want = 4 - svsk->sk_tcplen;
- struct kvec iov;
+ struct iovec iov;
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
int sent;
u32 reclen;
- /* Set up the first element of the reply kvec.
- * Any other kvecs that may be in use have been taken
+ /* Set up the first element of the reply iovec.
+ * Any other iovecs that may be in use have been taken
* care of by the server implementation itself.
*/
reclen = htonl(0x80000000|((xbufp->len ) - 4));
static int
proc_dodebug(ctl_table *table, int write, struct file *file,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp)
{
char tmpbuf[20], c, *s;
char __user *p;
unsigned int value;
size_t left, len;
- if ((*ppos && !write) || !*lenp) {
+ if ((file->f_pos && !write) || !*lenp) {
*lenp = 0;
return 0;
}
done:
*lenp -= left;
- *ppos += *lenp;
+ file->f_pos += *lenp;
return 0;
}
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
unsigned int len)
{
- struct kvec *tail = xdr->tail;
+ struct iovec *tail = xdr->tail;
u32 *p;
xdr->pages = pages;
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
- struct kvec *head = xdr->head;
- struct kvec *tail = xdr->tail;
+ struct iovec *head = xdr->head;
+ struct iovec *tail = xdr->tail;
char *buf = (char *)head->iov_base;
unsigned int buflen = head->iov_len;
}
/*
- * Realign the kvec if the server missed out some reply elements
+ * Realign the iovec if the server missed out some reply elements
* (such as post-op attributes,...)
* Note: This is a simple implementation that assumes that
* len <= iov->iov_len !!!
* The RPC header (assumed to be the 1st element in the iov array)
* is not shifted.
*/
-void xdr_shift_iovec(struct kvec *iov, int nr, size_t len)
+void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
{
- struct kvec *pvec;
+ struct iovec *pvec;
for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
- struct kvec *svec = pvec - 1;
+ struct iovec *svec = pvec - 1;
if (len > pvec->iov_len) {
printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
}
/*
- * Map a struct xdr_buf into an kvec array.
+ * Map a struct xdr_buf into an iovec array.
*/
-int xdr_kmap(struct kvec *iov_base, struct xdr_buf *xdr, size_t base)
+int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, size_t base)
{
- struct kvec *iov = iov_base;
+ struct iovec *iov = iov_base;
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
unsigned int len, pglen = xdr->page_len;
int err, ret = 0;
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ mm_segment_t oldfs;
len = xdr->head[0].iov_len;
if (base < len || (addr != NULL && base == 0)) {
- struct kvec iov = {
+ struct iovec iov = {
.iov_base = xdr->head[0].iov_base + base,
.iov_len = len - base,
};
.msg_namelen = addrlen,
.msg_flags = msgflags,
};
+
+ if (iov.iov_len != 0) {
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ }
if (xdr->len > len)
msg.msg_flags |= MSG_MORE;
-
- if (iov.iov_len != 0)
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
- else
- err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
+ oldfs = get_fs(); set_fs(get_ds());
+ err = sock_sendmsg(sock, &msg, iov.iov_len);
+ set_fs(oldfs);
if (ret == 0)
ret = err;
else if (err > 0)
copy_tail:
len = xdr->tail[0].iov_len;
if (base < len) {
- struct kvec iov = {
+ struct iovec iov = {
.iov_base = xdr->tail[0].iov_base + base,
.iov_len = len - base,
};
struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
.msg_flags = msgflags,
};
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+ oldfs = get_fs(); set_fs(get_ds());
+ err = sock_sendmsg(sock, &msg, iov.iov_len);
+ set_fs(oldfs);
if (ret == 0)
ret = err;
else if (err > 0)
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
*
- * Shrinks XDR buffer's header kvec buf->head[0] by
+ * Shrinks XDR buffer's header iovec buf->head[0] by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
void
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
{
- struct kvec *head, *tail;
+ struct iovec *head, *tail;
size_t copy, offs;
unsigned int pglen = buf->page_len;
void
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
- struct kvec *tail;
+ struct iovec *tail;
size_t copy;
char *p;
unsigned int pglen = buf->page_len;
* @p: current pointer inside XDR buffer
*
* Note: at the moment the RPC client only passes the length of our
- * scratch buffer in the xdr_buf's header kvec. Previously this
+ * scratch buffer in the xdr_buf's header iovec. Previously this
* meant we needed to call xdr_adjust_iovec() after encoding the
* data. With the new scheme, the xdr_stream manages the details
- * of the buffer length, and takes care of adjusting the kvec
+ * of the buffer length, and takes care of adjusting the iovec
* length for us.
*/
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
- struct kvec *iov = buf->head;
+ struct iovec *iov = buf->head;
xdr->buf = buf;
xdr->iov = iov;
*
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
- * adjust the length of the current kvec.
+ * adjust the length of the current iovec.
*/
uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct kvec *iov = buf->tail;
+ struct iovec *iov = buf->tail;
buf->pages = pages;
buf->page_base = base;
buf->page_len = len;
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
- struct kvec *iov = buf->head;
+ struct iovec *iov = buf->head;
unsigned int len = iov->iov_len;
if (len > buf->len)
void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct kvec *iov;
+ struct iovec *iov;
ssize_t shift;
unsigned int end;
int padding;
}
EXPORT_SYMBOL(xdr_read_pages);
-static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
+static struct iovec empty_iov = {.iov_base = NULL, .iov_len = 0};
void
-xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
+xdr_buf_from_iov(struct iovec *iov, struct xdr_buf *buf)
{
buf->head[0] = *iov;
buf->tail[0] = empty_iov;
* length of subiov to zero. Decrements len by length of subiov, sets base
* to zero (or decrements it by length of iov if subiov is empty). */
static void
-iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
+iov_subsegment(struct iovec *iov, struct iovec *subiov, int *base, int *len)
{
if (*base > iov->iov_len) {
subiov->iov_base = NULL;
/*
* Reserve an RPC call slot.
*/
+void
+xprt_reserve(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+ task->tk_status = -EIO;
+ if (!xprt->shutdown) {
+ spin_lock(&xprt->xprt_lock);
+ do_xprt_reserve(task);
+ spin_unlock(&xprt->xprt_lock);
+ if (task->tk_rqstp)
+ del_timer_sync(&xprt->timer);
+ }
+}
+
static inline void
do_xprt_reserve(struct rpc_task *task)
{
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
-void
-xprt_reserve(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- task->tk_status = -EIO;
- if (!xprt->shutdown) {
- spin_lock(&xprt->xprt_lock);
- do_xprt_reserve(task);
- spin_unlock(&xprt->xprt_lock);
- if (task->tk_rqstp)
- del_timer_sync(&xprt->timer);
- }
-}
-
/*
* Allocate a 'unique' XID
*/
+++ /dev/null
-
-config TUX
- tristate "TUX: Threaded linUX application protocol accelerator layer"
- default y if INET=y
- select ZLIB_DEFLATE
- help
- This is the TUX content-accelerator/server
-
-menu "TUX options"
- depends on TUX
-
-config TUX_EXTCGI
- bool "External CGI module"
- default y
-
-config TUX_EXTENDED_LOG
- bool "extended TUX logging format"
- default n
-
-config TUX_DEBUG
- bool "debug TUX"
- default n
-
-endmenu
-
+++ /dev/null
-#
-# Makefile for TUX
-#
-
-obj-$(CONFIG_TUX) += tux.o
-
-tux-y := accept.o input.o userspace.o cachemiss.o output.o \
- redirect.o postpone.o logger.o proto_http.o proto_ftp.o \
- proc.o main.o mod.o abuf.o times.o directory.o gzip.o
-
-tux-$(subst m,y,$(CONFIG_TUX_EXTCGI)) += cgi.o extcgi.o
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * abuf.c: async buffer-sending
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-char * get_abuf (tux_req_t *req, unsigned int max_size)
-{
- threadinfo_t *ti = req->ti;
- struct page *page;
- char *buf;
- unsigned int offset;
- unsigned int left;
-
- if (req->abuf.page || req->abuf.buf || req->abuf.size)
- TUX_BUG();
-
- if (max_size > PAGE_SIZE)
- BUG();
- offset = ti->header_offset;
- if (offset > PAGE_SIZE)
- TUX_BUG();
- left = PAGE_SIZE - offset;
- if (!max_size)
- BUG();
- page = ti->header_cache;
- if ((left < max_size) || !page) {
- while (!(page = alloc_pages(GFP_KERNEL, 0))) {
- if (net_ratelimit())
- printk(KERN_WARNING "tux: OOM in get_abuf()!\n");
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(1);
- }
-
- if (ti->header_cache)
- __free_page(ti->header_cache);
- ti->header_cache = page;
- ti->header_offset = 0;
- offset = 0;
- }
- buf = page_address(page) + offset;
-
- if (!page)
- BUG();
- req->abuf.page = page;
- req->abuf.buf = buf;
- req->abuf.size = 0;
- req->abuf.offset = offset;
- req->abuf.flags = 0;
- get_page(req->abuf.page);
-
- return buf;
-}
-
-static void do_send_abuf (tux_req_t *req, int cachemiss);
-
-void send_abuf (tux_req_t *req, unsigned int size, unsigned long flags)
-{
- threadinfo_t *ti = req->ti;
-
- Dprintk("send_abuf(req: %p, sock: %p): %p(%p), size:%d, off:%d, flags:%08lx\n", req, req->sock, req->abuf.page, req->abuf.buf, size, req->abuf.offset, flags);
-
- ti->header_offset += size;
- if (ti->header_offset > PAGE_SIZE)
- TUX_BUG();
- if (req->abuf.offset + req->abuf.size > PAGE_SIZE)
- TUX_BUG();
-
- req->abuf.flags = flags | MSG_NOSIGNAL;
- req->abuf.size = size;
-
- add_tux_atom(req, do_send_abuf);
-}
-
-static void do_send_abuf (tux_req_t *req, int cachemiss)
-{
- int ret;
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- if (!req->sock)
- TUX_BUG();
- tcp_sk(req->sock->sk)->nonagle = 2;
-
-repeat:
- Dprintk("do_send_abuf(%p,%d): %p(%p), size:%d, off:%d, flags:%08lx\n",
- req, cachemiss,
- req->abuf.page, req->abuf.buf, req->abuf.size,
- req->abuf.offset, req->abuf.flags);
-
- if (tux_zerocopy_header)
- ret = tcp_sendpage(req->sock, req->abuf.page,
- req->abuf.offset, req->abuf.size, req->abuf.flags);
- else {
- mm_segment_t oldmm;
- oldmm = get_fs(); set_fs(KERNEL_DS);
- ret = send_sync_buf(req, req->sock, req->abuf.buf,
- req->abuf.size, req->abuf.flags);
- set_fs(oldmm);
- }
-
-
- Dprintk("do_send_abuf: ret: %d\n", ret);
- if (!ret)
- TUX_BUG();
-
- if (ret < 0) {
- if (ret != -EAGAIN) {
- TDprintk("ret: %d, req->error = TUX_ERROR_CONN_CLOSE.\n", ret);
- req->error = TUX_ERROR_CONN_CLOSE;
- req->atom_idx = 0;
- req->in_file.f_pos = 0;
- __free_page(req->abuf.page);
- memset(&req->abuf, 0, sizeof(req->abuf));
- zap_request(req, cachemiss);
- return;
- }
- add_tux_atom(req, do_send_abuf);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- return;
- }
-
- req->abuf.buf += ret;
- req->abuf.offset += ret;
- req->abuf.size -= ret;
-
- if ((int)req->abuf.size < 0)
- TUX_BUG();
- if (req->abuf.size > 0)
- goto repeat;
-
- Dprintk("DONE do_send_abuf: %p(%p), size:%d, off:%d, flags:%08lx\n",
- req->abuf.page, req->abuf.buf, req->abuf.size,
- req->abuf.offset, req->abuf.flags);
-
- __free_page(req->abuf.page);
-
- memset(&req->abuf, 0, sizeof(req->abuf));
-
- add_req_to_workqueue(req);
-}
-
-void __send_async_message (tux_req_t *req, const char *message,
- int status, unsigned int size, int push)
-{
- unsigned int flags;
- char *buf;
-
- Dprintk("TUX: sending %d reply (%d bytes)!\n", status, size);
- Dprintk("request %p, reply: %s\n", req, message);
- if (!size)
- TUX_BUG();
- buf = get_abuf(req, size);
- memcpy(buf, message, size);
-
- req->status = status;
- flags = MSG_DONTWAIT;
- if (!push)
- flags |= MSG_MORE;
- send_abuf(req, size, flags);
- add_req_to_workqueue(req);
-}
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * accept.c: accept new connections, allocate requests
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-unsigned int tux_ack_pingpong = 1;
-unsigned int tux_push_all = 0;
-unsigned int tux_zerocopy_parse = 1;
-
-static int __idle_event (tux_req_t *req);
-static int __output_space_event (tux_req_t *req);
-
-struct socket * start_listening(tux_socket_t *listen, int nr)
-{
- struct sockaddr_in sin;
- struct socket *sock = NULL;
- struct sock *sk;
- struct tcp_opt *tp;
- int err;
- u16 port = listen->port;
- u32 addr = listen->ip;
- tux_proto_t *proto = listen->proto;
-
- /* Create a listening socket: */
-
- err = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err) {
- printk(KERN_ERR "TUX: error %d creating socket.\n", err);
- goto error;
- }
-
- /* Bind the socket: */
-
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = htonl(addr);
- sin.sin_port = htons(port);
-
- sk = sock->sk;
- sk->sk_reuse = 1;
- sock_set_flag(sk, SOCK_URGINLINE);
-
- err = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
- if (err) {
- printk(KERN_ERR "TUX: error %d binding socket. This means that probably some other process is (or was a short time ago) using addr %s://%d.%d.%d.%d:%d.\n",
- err, proto->name, HIPQUAD(addr), port);
- goto error;
- }
-
- tp = tcp_sk(sk);
- Dprintk("listen sk accept_queue: %p/%p.\n",
- tp->accept_queue, tp->accept_queue_tail);
- tp->ack.pingpong = tux_ack_pingpong;
-
- sock_reset_flag(sk, SOCK_LINGER);
- sk->sk_lingertime = 0;
- tp->linger2 = tux_keepalive_timeout * HZ;
-
- if (proto->defer_accept && !tux_keepalive_timeout && tux_defer_accept)
- tp->defer_accept = 1;
-
- /* Now, start listening on the socket */
-
- err = sock->ops->listen(sock, tux_max_backlog);
- if (err) {
- printk(KERN_ERR "TUX: error %d listening on socket.\n", err);
- goto error;
- }
-
- printk(KERN_NOTICE "TUX: thread %d listens on %s://%d.%d.%d.%d:%d.\n",
- nr, proto->name, HIPQUAD(addr), port);
- return sock;
-
-error:
- if (sock)
- sock_release(sock);
- return NULL;
-}
-
-static inline void __kfree_req (tux_req_t *req, threadinfo_t * ti)
-{
- list_del(&req->all);
- DEBUG_DEL_LIST(&req->all);
- ti->nr_requests--;
- kfree(req);
-}
-
-int flush_freequeue (threadinfo_t * ti)
-{
- struct list_head *tmp;
- unsigned long flags;
- tux_req_t *req;
- int count = 0;
-
- spin_lock_irqsave(&ti->free_requests_lock,flags);
- while (ti->nr_free_requests) {
- ti->nr_free_requests--;
- tmp = ti->free_requests.next;
- req = list_entry(tmp, tux_req_t, free);
- list_del(tmp);
- DEBUG_DEL_LIST(tmp);
- DEC_STAT(nr_free_pending);
- __kfree_req(req, ti);
- count++;
- }
- spin_unlock_irqrestore(&ti->free_requests_lock,flags);
-
- return count;
-}
-
-static tux_req_t * kmalloc_req (threadinfo_t * ti)
-{
- struct list_head *tmp;
- unsigned long flags;
- tux_req_t *req;
-
- spin_lock_irqsave(&ti->free_requests_lock, flags);
- if (ti->nr_free_requests) {
- ti->nr_free_requests--;
- tmp = ti->free_requests.next;
- req = list_entry(tmp, tux_req_t, free);
- list_del(tmp);
- DEBUG_DEL_LIST(tmp);
- DEC_STAT(nr_free_pending);
- req->magic = TUX_MAGIC;
- spin_unlock_irqrestore(&ti->free_requests_lock, flags);
- } else {
- spin_unlock_irqrestore(&ti->free_requests_lock, flags);
- req = tux_kmalloc(sizeof(*req));
- ti->nr_requests++;
- memset (req, 0, sizeof(*req));
- list_add(&req->all, &ti->all_requests);
- }
- req->magic = TUX_MAGIC;
- INC_STAT(nr_allocated);
- init_waitqueue_entry(&req->sleep, current);
- init_waitqueue_entry(&req->ftp_sleep, current);
- INIT_LIST_HEAD(&req->work);
- INIT_LIST_HEAD(&req->free);
- INIT_LIST_HEAD(&req->lru);
- req->ti = ti;
- req->total_bytes = 0;
- SET_TIMESTAMP(req->accept_timestamp);
- req->first_timestamp = jiffies;
- req->fd = -1;
- init_timer(&req->keepalive_timer);
- init_timer(&req->output_timer);
-
- Dprintk("allocated NEW req %p.\n", req);
- return req;
-}
-
-void kfree_req (tux_req_t *req)
-{
- threadinfo_t * ti = req->ti;
- unsigned long flags;
-
- Dprintk("freeing req %p.\n", req);
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- spin_lock_irqsave(&ti->free_requests_lock,flags);
- req->magic = 0;
- DEC_STAT(nr_allocated);
- if (req->sock || req->dentry || req->private)
- TUX_BUG();
- if (ti->nr_free_requests > tux_max_free_requests)
- __kfree_req(req, ti);
- else {
- req->error = 0;
- ti->nr_free_requests++;
-
- // the free requests queue is LIFO
- list_add(&req->free, &ti->free_requests);
- INC_STAT(nr_free_pending);
- }
- spin_unlock_irqrestore(&ti->free_requests_lock,flags);
-}
-
-static void __add_req_to_workqueue (tux_req_t *req)
-{
- threadinfo_t *ti = req->ti;
-
- if (!list_empty(&req->work))
- TUX_BUG();
- Dprintk("work-queueing request %p at %p/%p.\n", req, __builtin_return_address(0), __builtin_return_address(1));
- if (connection_too_fast(req))
- list_add_tail(&req->work, &ti->work_pending);
- else
- list_add(&req->work, &ti->work_pending);
- INC_STAT(nr_work_pending);
- wake_up_process(ti->thread);
- return;
-}
-
-void add_req_to_workqueue (tux_req_t *req)
-{
- unsigned long flags;
- threadinfo_t *ti = req->ti;
-
- spin_lock_irqsave(&ti->work_lock, flags);
- __add_req_to_workqueue(req);
- spin_unlock_irqrestore(&ti->work_lock, flags);
-}
-
-void del_output_timer (tux_req_t *req)
-{
-#if CONFIG_SMP
- if (!spin_is_locked(&req->ti->work_lock))
- TUX_BUG();
-#endif
- if (!list_empty(&req->lru)) {
- list_del(&req->lru);
- DEBUG_DEL_LIST(&req->lru);
- req->ti->nr_lru--;
- }
- Dprintk("del output timeout for req %p.\n", req);
- del_timer(&req->output_timer);
-}
-
-static void output_timeout_fn (unsigned long data);
-
-#define OUTPUT_TIMEOUT HZ
-
-static void add_output_timer (tux_req_t *req)
-{
- struct timer_list *timer = &req->output_timer;
-
- timer->data = (unsigned long) req;
- timer->function = &output_timeout_fn;
- mod_timer(timer, jiffies + OUTPUT_TIMEOUT);
-}
-
-static void output_timeout_fn (unsigned long data)
-{
- tux_req_t *req = (tux_req_t *)data;
-
- if (connection_too_fast(req)) {
- add_output_timer(req);
-// mod_timer(&req->output_timer, jiffies + OUTPUT_TIMEOUT);
- return;
- }
- output_space_event(req);
-}
-
-void output_timeout (tux_req_t *req)
-{
- Dprintk("output timeout for req %p.\n", req);
- if (test_and_set_bit(0, &req->wait_output_space))
- TUX_BUG();
- INC_STAT(nr_output_space_pending);
- add_output_timer(req);
-}
-
-void __del_keepalive_timer (tux_req_t *req)
-{
-#if CONFIG_SMP
- if (!spin_is_locked(&req->ti->work_lock))
- TUX_BUG();
-#endif
- if (!list_empty(&req->lru)) {
- list_del(&req->lru);
- DEBUG_DEL_LIST(&req->lru);
- req->ti->nr_lru--;
- }
- Dprintk("del keepalive timeout for req %p.\n", req);
- del_timer(&req->keepalive_timer);
-}
-
-static void keepalive_timeout_fn (unsigned long data)
-{
- tux_req_t *req = (tux_req_t *)data;
-
-#if CONFIG_TUX_DEBUG
- Dprintk("req %p timed out after %d sec!\n", req, tux_keepalive_timeout);
- if (tux_Dprintk)
- print_req(req);
-#endif
- Dprintk("req->error = TUX_ERROR_CONN_TIMEOUT!\n");
- req->error = TUX_ERROR_CONN_TIMEOUT;
- if (!idle_event(req))
- output_space_event(req);
-}
-
-void __add_keepalive_timer (tux_req_t *req)
-{
- struct timer_list *timer = &req->keepalive_timer;
-
- if (!tux_keepalive_timeout)
- TUX_BUG();
-#if CONFIG_SMP
- if (!spin_is_locked(&req->ti->work_lock))
- TUX_BUG();
-#endif
-
- if (!list_empty(&req->lru))
- TUX_BUG();
- if (req->ti->nr_lru > tux_max_keepalives) {
- struct list_head *head, *last;
- tux_req_t *last_req;
-
- head = &req->ti->lru;
- last = head->prev;
- if (last == head)
- TUX_BUG();
- last_req = list_entry(last, tux_req_t, lru);
- list_del(last);
- DEBUG_DEL_LIST(last);
- req->ti->nr_lru--;
-
- Dprintk("LRU-aging req %p!\n", last_req);
- last_req->error = TUX_ERROR_CONN_TIMEOUT;
- if (!__idle_event(last_req))
- __output_space_event(last_req);
- }
- list_add(&req->lru, &req->ti->lru);
- req->ti->nr_lru++;
-
- timer->expires = jiffies + tux_keepalive_timeout * HZ;
- timer->data = (unsigned long) req;
- timer->function = &keepalive_timeout_fn;
- add_timer(timer);
-}
-
-static int __output_space_event (tux_req_t *req)
-{
- if (!req || (req->magic != TUX_MAGIC))
- TUX_BUG();
-
- if (!test_and_clear_bit(0, &req->wait_output_space)) {
- Dprintk("output space ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
- return 0;
- }
-
- Dprintk("output space ready event at <%p>, %p was waiting!\n", __builtin_return_address(0), req);
- DEC_STAT(nr_output_space_pending);
-
- del_keepalive_timer(req);
- del_output_timer(req);
-
- __add_req_to_workqueue(req);
- return 1;
-}
-
-int output_space_event (tux_req_t *req)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&req->ti->work_lock, flags);
- ret = __output_space_event(req);
- spin_unlock_irqrestore(&req->ti->work_lock, flags);
-
- return ret;
-}
-
-static int __idle_event (tux_req_t *req)
-{
- struct tcp_opt *tp;
- threadinfo_t *ti;
-
- if (!req || (req->magic != TUX_MAGIC))
- TUX_BUG();
- ti = req->ti;
-
- if (!test_and_clear_bit(0, &req->idle_input)) {
- Dprintk("data ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
- return 0;
- }
-
- Dprintk("data ready event at <%p>, %p was idle!\n", __builtin_return_address(0), req);
- del_keepalive_timer(req);
- del_output_timer(req);
- DEC_STAT(nr_idle_input_pending);
-
- tp = tcp_sk(req->sock->sk);
-
- tp->ack.pingpong = tux_ack_pingpong;
- SET_TIMESTAMP(req->accept_timestamp);
-
- __add_req_to_workqueue(req);
-
- return 1;
-}
-
-int idle_event (tux_req_t *req)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&req->ti->work_lock, flags);
- ret = __idle_event(req);
- spin_unlock_irqrestore(&req->ti->work_lock, flags);
-
- return ret;
-}
-
-#define HANDLE_CALLBACK_1(callback, tux_name, real_name, param...) \
- tux_req_t *req; \
- \
- read_lock(&sk->sk_callback_lock); \
- req = sk->sk_user_data; \
- \
- Dprintk("callback "#callback"(%p) req %p.\n", \
- sk->sk_##callback, req); \
- \
- if (!req) { \
- if (sk->sk_##callback == tux_name) { \
- printk("BUG: "#callback" "#tux_name" "#real_name" no req!"); \
- TUX_BUG(); \
- } \
- read_unlock(&sk->sk_callback_lock); \
- if (sk->sk_##callback) \
- sk->sk_##callback(param); \
- return; \
- } \
-
-#define HANDLE_CALLBACK_2(callback, tux_name, real_name, param...) \
- Dprintk(#tux_name"() on %p.\n", req); \
- if (req->magic != TUX_MAGIC) \
- TUX_BUG(); \
- if (req->real_name) \
- req->real_name(param);
-
-#define HANDLE_CALLBACK(callback, tux_name, real_name, param...) \
- HANDLE_CALLBACK_1(callback,tux_name,real_name,param) \
- HANDLE_CALLBACK_2(callback,tux_name,real_name,param)
-
-static void tux_data_ready (struct sock *sk, int len)
-{
- HANDLE_CALLBACK_1(data_ready, tux_data_ready, real_data_ready, sk, len);
-
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_write_space (struct sock *sk)
-{
- HANDLE_CALLBACK(write_space, tux_write_space, real_write_space, sk);
-
- Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
- sk->sk_wmem_queued, sk->sk_sndbuf);
-
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- if (!idle_event(req))
- output_space_event(req);
- }
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_error_report (struct sock *sk)
-{
- HANDLE_CALLBACK(error_report, tux_error_report, real_error_report, sk);
-
- req->error = TUX_ERROR_CONN_CLOSE;
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_state_change (struct sock *sk)
-{
- HANDLE_CALLBACK(state_change, tux_state_change, real_state_change, sk);
-
- if (req->sock && req->sock->sk &&
- (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
- Dprintk("req %p changed to TCP non-established!\n", req);
- Dprintk("req->sock: %p\n", req->sock);
- if (req->sock)
- Dprintk("req->sock->sk: %p\n", req->sock->sk);
- if (req->sock && req->sock->sk)
- Dprintk("TCP state: %d\n", req->sock->sk->sk_state);
- Dprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
- req->error = TUX_ERROR_CONN_CLOSE;
- }
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_destruct (struct sock *sk)
-{
- BUG();
-}
-
-static void tux_ftp_data_ready (struct sock *sk, int len)
-{
- HANDLE_CALLBACK_1(data_ready, tux_ftp_data_ready,
- ftp_real_data_ready, sk, len);
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_write_space (struct sock *sk)
-{
- HANDLE_CALLBACK_1(write_space, tux_ftp_write_space,
- ftp_real_write_space, sk);
-
- Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
- sk->sk_wmem_queued, sk->sk_sndbuf);
-
- if (sk_stream_wspace(sk) >= sk->sk_sndbuf/10*8) {
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- if (!idle_event(req))
- output_space_event(req);
- }
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_error_report (struct sock *sk)
-{
- HANDLE_CALLBACK(error_report, tux_ftp_error_report,
- ftp_real_error_report, sk);
-
- TDprintk("req %p sock %p got TCP errors on FTP data connection!\n", req, sk);
- TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
- req->error = TUX_ERROR_CONN_CLOSE;
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_state_change (struct sock *sk)
-{
- HANDLE_CALLBACK(state_change, tux_ftp_state_change,
- ftp_real_state_change, sk);
-
- if (req->sock && req->sock->sk &&
- (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
- Dprintk("req %p FTP control sock changed to TCP non-established!\n", req);
- Dprintk("req->sock: %p\n", req->sock);
- TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
-
- req->error = TUX_ERROR_CONN_CLOSE;
- }
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_create_child (struct sock *sk, struct sock *newsk)
-{
- HANDLE_CALLBACK(create_child, tux_ftp_create_child,
- ftp_real_create_child, sk, newsk);
-
- newsk->sk_user_data = NULL;
- newsk->sk_data_ready = req->ftp_real_data_ready;
- newsk->sk_state_change = req->ftp_real_state_change;
- newsk->sk_write_space = req->ftp_real_write_space;
- newsk->sk_error_report = req->ftp_real_error_report;
- newsk->sk_create_child = req->ftp_real_create_child;
- newsk->sk_destruct = req->ftp_real_destruct;
-
- if (!idle_event(req))
- output_space_event(req);
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void tux_ftp_destruct (struct sock *sk)
-{
- BUG();
-}
-
-static void link_tux_socket (tux_req_t *req, struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (req->sock)
- TUX_BUG();
- if (sk->sk_destruct == tux_destruct)
- TUX_BUG();
- /*
- * (No need to lock the socket, we just want to
- * make sure that events from now on go through
- * tux_data_ready())
- */
- write_lock_irq(&sk->sk_callback_lock);
-
- req->sock = sock;
- sk->sk_user_data = req;
-
- req->real_data_ready = sk->sk_data_ready;
- req->real_state_change = sk->sk_state_change;
- req->real_write_space = sk->sk_write_space;
- req->real_error_report = sk->sk_error_report;
- req->real_destruct = sk->sk_destruct;
-
- sk->sk_data_ready = tux_data_ready;
- sk->sk_state_change = tux_state_change;
- sk->sk_write_space = tux_write_space;
- sk->sk_error_report = tux_error_report;
- sk->sk_destruct = tux_destruct;
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- if (req->real_destruct == tux_destruct)
- TUX_BUG();
- req->client_addr = inet_sk(sk)->daddr;
- req->client_port = inet_sk(sk)->dport;
-
- add_wait_queue(sk->sk_sleep, &req->sleep);
-}
-
-void __link_data_socket (tux_req_t *req, struct socket *sock,
- struct sock *sk)
-{
- /*
- * (No need to lock the socket, we just want to
- * make sure that events from now on go through
- * tux_data_ready())
- */
- write_lock_irq(&sk->sk_callback_lock);
-
- req->data_sock = sock;
- sk->sk_user_data = req;
-
- req->ftp_real_data_ready = sk->sk_data_ready;
- req->ftp_real_state_change = sk->sk_state_change;
- req->ftp_real_write_space = sk->sk_write_space;
- req->ftp_real_error_report = sk->sk_error_report;
- req->ftp_real_create_child = sk->sk_create_child;
- req->ftp_real_destruct = sk->sk_destruct;
-
- sk->sk_data_ready = tux_ftp_data_ready;
- sk->sk_state_change = tux_ftp_state_change;
- sk->sk_write_space = tux_ftp_write_space;
- sk->sk_error_report = tux_ftp_error_report;
- sk->sk_create_child = tux_ftp_create_child;
- sk->sk_destruct = tux_ftp_destruct;
-
- if (req->ftp_real_destruct == tux_ftp_destruct)
- TUX_BUG();
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- add_wait_queue(sk->sk_sleep, &req->ftp_sleep);
-}
-
-void link_tux_data_socket (tux_req_t *req, struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (req->data_sock)
- TUX_BUG();
- if (sk->sk_destruct == tux_ftp_destruct)
- TUX_BUG();
- __link_data_socket(req, sock, sk);
-}
-
-void unlink_tux_socket (tux_req_t *req)
-{
- struct sock *sk;
-
- if (!req->sock || !req->sock->sk)
- return;
- sk = req->sock->sk;
-
- write_lock_irq(&sk->sk_callback_lock);
- if (!sk->sk_user_data)
- TUX_BUG();
- if (req->real_destruct == tux_destruct)
- TUX_BUG();
-
- sk->sk_user_data = NULL;
-
- sk->sk_data_ready = req->real_data_ready;
- sk->sk_state_change = req->real_state_change;
- sk->sk_write_space = req->real_write_space;
- sk->sk_error_report = req->real_error_report;
- sk->sk_destruct = req->real_destruct;
-
- if (sk->sk_destruct == tux_destruct)
- TUX_BUG();
-
- req->real_data_ready = NULL;
- req->real_state_change = NULL;
- req->real_write_space = NULL;
- req->real_error_report = NULL;
- req->real_destruct = NULL;
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- remove_wait_queue(sk->sk_sleep, &req->sleep);
-}
-
-void unlink_tux_data_socket (tux_req_t *req)
-{
- struct sock *sk;
-
- if (!req->data_sock || !req->data_sock->sk)
- return;
- sk = req->data_sock->sk;
-
- write_lock_irq(&sk->sk_callback_lock);
-
- if (req->real_destruct == tux_ftp_destruct)
- TUX_BUG();
-
- sk->sk_user_data = NULL;
- sk->sk_data_ready = req->ftp_real_data_ready;
- sk->sk_state_change = req->ftp_real_state_change;
- sk->sk_write_space = req->ftp_real_write_space;
- sk->sk_error_report = req->ftp_real_error_report;
- sk->sk_create_child = req->ftp_real_create_child;
- sk->sk_destruct = req->ftp_real_destruct;
-
- req->ftp_real_data_ready = NULL;
- req->ftp_real_state_change = NULL;
- req->ftp_real_write_space = NULL;
- req->ftp_real_error_report = NULL;
- req->ftp_real_create_child = NULL;
- req->ftp_real_destruct = NULL;
-
- write_unlock_irq(&sk->sk_callback_lock);
-
- if (sk->sk_destruct == tux_ftp_destruct)
- TUX_BUG();
-
- remove_wait_queue(sk->sk_sleep, &req->ftp_sleep);
-}
-
-void add_tux_atom (tux_req_t *req, atom_func_t *atom)
-{
- Dprintk("adding TUX atom %p to req %p, atom_idx: %d, at %p/%p.\n",
- atom, req, req->atom_idx, __builtin_return_address(0), __builtin_return_address(1));
- if (req->atom_idx == MAX_TUX_ATOMS)
- TUX_BUG();
- req->atoms[req->atom_idx] = atom;
- req->atom_idx++;
-}
-
-void del_tux_atom (tux_req_t *req)
-{
- if (!req->atom_idx)
- TUX_BUG();
- req->atom_idx--;
- Dprintk("removing TUX atom %p to req %p, atom_idx: %d, at %p.\n",
- req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
-}
-
-void tux_schedule_atom (tux_req_t *req, int cachemiss)
-{
- if (!list_empty(&req->work))
- TUX_BUG();
- if (!req->atom_idx)
- TUX_BUG();
- req->atom_idx--;
- Dprintk("DOING TUX atom %p, req %p, atom_idx: %d, at %p.\n",
- req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
- might_sleep();
- req->atoms[req->atom_idx](req, cachemiss);
- might_sleep();
- Dprintk("DONE TUX atom %p, req %p, atom_idx: %d, at %p.\n",
- req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
-}
-
-/*
- * Puts newly accepted connections into the inputqueue. This is the
- * first step in the life of a TUX request.
- */
-int accept_requests (threadinfo_t *ti)
-{
- int count = 0, last_count = 0, error, socknr = 0;
- struct socket *sock, *new_sock;
- struct tcp_opt *tp1, *tp2;
- tux_req_t *req;
-
- if (ti->nr_requests > tux_max_connect)
- goto out;
-
-repeat:
- for (socknr = 0; socknr < CONFIG_TUX_NUMSOCKETS; socknr++) {
- tux_listen_t *tux_listen;
-
- tux_listen = ti->listen + socknr;
- sock = tux_listen->sock;
- if (!sock)
- break;
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- break;
-
- tp1 = tcp_sk(sock->sk);
- /*
- * Quick test to see if there are connections on the queue.
- * This is cheaper than accept() itself because this saves us
- * the allocation of a new socket. (Which doesn't seem to be
- * used anyway)
- */
- if (tp1->accept_queue) {
- tux_proto_t *proto;
-
- if (!count++)
- __set_task_state(current, TASK_RUNNING);
-
- new_sock = sock_alloc();
- if (!new_sock)
- goto out;
-
- new_sock->type = sock->type;
- new_sock->ops = sock->ops;
-
- error = sock->ops->accept(sock, new_sock, O_NONBLOCK);
- if (error < 0)
- goto err;
- if (new_sock->sk->sk_state != TCP_ESTABLISHED)
- goto err;
-
- tp2 = tcp_sk(new_sock->sk);
- tp2->nonagle = 2;
- tp2->ack.pingpong = tux_ack_pingpong;
- new_sock->sk->sk_reuse = 1;
- sock_set_flag(new_sock->sk, SOCK_URGINLINE);
-
- /* Allocate a request-entry for the connection */
- req = kmalloc_req(ti);
- if (!req)
- BUG();
- link_tux_socket(req, new_sock);
-
- proto = req->proto = tux_listen->proto;
-
- proto->got_request(req);
- }
- }
- if (count != last_count) {
- last_count = count;
- goto repeat;
- }
-out:
- return count;
-err:
- sock_release(new_sock);
- goto out;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * cachemiss.c: handle the 'slow IO path' by queueing not-yet-cached
- * requests to the IO-thread pool. Dynamic load balancing is done
- * between IO threads, based on the number of requests they have pending.
- */
-
-#include <net/tux.h>
-#include <linux/delay.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-void queue_cachemiss (tux_req_t *req)
-{
- iothread_t *iot = req->ti->iot;
-
- Dprintk("queueing_cachemiss(req:%p) (req->cwd_dentry: %p) at %p:%p.\n",
- req, req->cwd_dentry, __builtin_return_address(0), __builtin_return_address(1));
- if (req->idle_input || req->wait_output_space)
- TUX_BUG();
- req->had_cachemiss = 1;
- if (!list_empty(&req->work))
- TUX_BUG();
- spin_lock(&iot->async_lock);
- if (connection_too_fast(req))
- list_add_tail(&req->work, &iot->async_queue);
- else
- list_add(&req->work, &iot->async_queue);
- iot->nr_async_pending++;
- INC_STAT(nr_cachemiss_pending);
- spin_unlock(&iot->async_lock);
-
- wake_up(&iot->async_sleep);
-}
-
-static tux_req_t * get_cachemiss (iothread_t *iot)
-{
- struct list_head *tmp;
- tux_req_t *req = NULL;
-
- spin_lock(&iot->async_lock);
- if (!list_empty(&iot->async_queue)) {
-
- tmp = iot->async_queue.next;
- req = list_entry(tmp, tux_req_t, work);
-
- Dprintk("get_cachemiss(%p): got req %p.\n", iot, req);
- list_del(tmp);
- DEBUG_DEL_LIST(tmp);
- iot->nr_async_pending--;
- DEC_STAT(nr_cachemiss_pending);
-
- if (req->ti->iot != iot)
- TUX_BUG();
- }
- spin_unlock(&iot->async_lock);
- return req;
-}
-
-struct file * tux_open_file (char *filename, int mode)
-{
- struct file *filp;
-
- if (!filename)
- TUX_BUG();
-
- /* Rule no. 3 -- Does the file exist ? */
-
- filp = filp_open(filename, mode, 0600);
-
- if (IS_ERR(filp) || !filp || !filp->f_dentry)
- goto err;
-
-out:
- return filp;
-err:
- Dprintk("filp_open() error: %d.\n", (int)filp);
- filp = NULL;
- goto out;
-}
-
-static int cachemiss_thread (void *data)
-{
- tux_req_t *req;
- struct k_sigaction *ka;
- DECLARE_WAITQUEUE(wait, current);
- iothread_t *iot = data;
- int nr = iot->ti->cpu, wake_up;
-
- Dprintk("iot %p/%p got started.\n", iot, current);
- drop_permissions();
-
- spin_lock(&iot->async_lock);
- iot->threads++;
- sprintf(current->comm, "async IO %d/%d", nr, iot->threads);
-
-
- spin_lock_irq(¤t->sighand->siglock);
- ka = current->sighand->action + SIGCHLD-1;
- ka->sa.sa_handler = SIG_IGN;
- siginitsetinv(¤t->blocked, sigmask(SIGCHLD));
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- spin_unlock(&iot->async_lock);
-#if CONFIG_SMP
- {
- cpumask_t mask;
-
- if (cpu_isset(nr, cpu_online_map)) {
- cpus_clear(mask);
- cpu_set(nr, mask);
- set_cpus_allowed(current, mask);
- }
-
- }
-#endif
-
- add_wait_queue_exclusive(&iot->async_sleep, &wait);
-
- for (;;) {
- while (!list_empty(&iot->async_queue) &&
- (req = get_cachemiss(iot))) {
-
- if (!req->atom_idx) {
- add_tux_atom(req, flush_request);
- add_req_to_workqueue(req);
- continue;
- }
- tux_schedule_atom(req, 1);
- if (signal_pending(current))
- flush_all_signals();
- }
- if (signal_pending(current))
- flush_all_signals();
- if (!list_empty(&iot->async_queue))
- continue;
- if (iot->shutdown) {
- Dprintk("iot %p/%p got shutdown!\n", iot, current);
- break;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&iot->async_queue)) {
- Dprintk("iot %p/%p going to sleep.\n", iot, current);
- schedule();
- Dprintk("iot %p/%p got woken up.\n", iot, current);
- }
- __set_current_state(TASK_RUNNING);
- }
-
- remove_wait_queue(&iot->async_sleep, &wait);
-
- wake_up = 0;
- spin_lock(&iot->async_lock);
- if (!--iot->threads)
- wake_up = 1;
- spin_unlock(&iot->async_lock);
- Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
- if (wake_up) {
- Dprintk("iot %p/%p waking up master.\n", iot, current);
- wake_up(&iot->wait_shutdown);
- }
-
- return 0;
-}
-
-static void __stop_cachemiss_threads (iothread_t *iot)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- Dprintk("stopping async IO threads %p.\n", iot);
- add_wait_queue(&iot->wait_shutdown, &wait);
-
- spin_lock(&iot->async_lock);
- if (iot->shutdown)
- TUX_BUG();
- if (!iot->threads)
- TUX_BUG();
- iot->shutdown = 1;
- wake_up_all(&iot->async_sleep);
- spin_unlock(&iot->async_lock);
-
- __set_current_state(TASK_UNINTERRUPTIBLE);
- Dprintk("waiting for async IO threads %p to exit.\n", iot);
- schedule();
- remove_wait_queue(&iot->wait_shutdown, &wait);
-
- if (iot->threads)
- TUX_BUG();
- if (iot->nr_async_pending)
- TUX_BUG();
- Dprintk("stopped async IO threads %p.\n", iot);
-}
-
-void stop_cachemiss_threads (threadinfo_t *ti)
-{
- iothread_t *iot = ti->iot;
-
- if (!iot)
- TUX_BUG();
- if (iot->nr_async_pending)
- TUX_BUG();
- __stop_cachemiss_threads(iot);
- ti->iot = NULL;
- kfree(iot);
-}
-
-int start_cachemiss_threads (threadinfo_t *ti)
-{
- int i, pid;
-
- iothread_t *iot;
-
- iot = kmalloc(sizeof(*iot), GFP_KERNEL);
- if (!iot)
- return -ENOMEM;
- memset(iot, 0, sizeof(*iot));
-
- iot->ti = ti;
- iot->async_lock = SPIN_LOCK_UNLOCKED;
- iot->nr_async_pending = 0;
- INIT_LIST_HEAD(&iot->async_queue);
- init_waitqueue_head(&iot->async_sleep);
- init_waitqueue_head(&iot->wait_shutdown);
-
- for (i = 0; i < NR_IO_THREADS; i++) {
- pid = kernel_thread(cachemiss_thread, (void *)iot, 0);
- if (pid < 0) {
- printk(KERN_ERR "TUX: error %d creating IO thread!\n",
- pid);
- __stop_cachemiss_threads(iot);
- kfree(iot);
- return pid;
- }
- }
- ti->iot = iot;
- /*
- * Wait for all cachemiss threads to start up:
- */
- while (iot->threads != NR_IO_THREADS) {
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10);
- }
- return 0;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * cgi.c: user-space CGI (and other) code execution.
- */
-
-#define __KERNEL_SYSCALLS__
-#define __KERNEL_SYSCALLS_NO_ERRNO__
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-static int exec_usermode(char *program_path, char *argv[], char *envp[])
-{
- int i, err;
-
- err = tux_chroot(tux_cgiroot);
- if (err) {
- printk(KERN_ERR "TUX: CGI chroot returned %d, /proc/sys/net/tux/cgiroot is probably set up incorrectly! Aborting CGI execution.\n", err);
- return err;
- }
-
- /* Allow execve args to be in kernel space. */
- set_fs(KERNEL_DS);
-
- flush_signals(current);
- spin_lock_irq(¤t->sighand->siglock);
- flush_signal_handlers(current, 1);
- spin_unlock_irq(¤t->sighand->siglock);
-
- for (i = 3; i < current->files->max_fds; i++ )
- if (current->files->fd[i])
- tux_close(i);
-
- err = execve(program_path, argv, envp);
- if (err < 0)
- return err;
- return 0;
-}
-
-static inline long tux_dup(unsigned int fildes)
-{
- int ret = -EBADF;
- struct file * file = fget(fildes);
-
- if (file)
- ret = dupfd(file, 0);
- return ret;
-}
-
-static int exec_helper (void * data)
-{
- exec_param_t *param = data;
- char **tmp;
- int ret;
-
- sprintf(current->comm,"doexec - %d", current->pid);
-#if CONFIG_SMP
- if (!tux_cgi_inherit_cpu) {
-
- cpumask_t cgi_mask, map;
-
- mask_to_cpumask(tux_cgi_cpu_mask, &cgi_mask);
- cpus_and(map, cpu_online_map, cgi_mask);
-
- if (!(cpus_empty(map)))
- set_cpus_allowed(current, cgi_mask);
- else
- set_cpus_allowed(current, cpu_online_map);
- }
-#endif
-
- if (!param)
- TUX_BUG();
- Dprintk("doing exec(%s).\n", param->command);
-
- Dprintk("argv: ");
- tmp = param->argv;
- while (*tmp) {
- Dprintk("{%s} ", *tmp);
- tmp++;
- }
- Dprintk("\n");
- Dprintk("envp: ");
- tmp = param->envp;
- while (*tmp) {
- Dprintk("{%s} ", *tmp);
- tmp++;
- }
- Dprintk("\n");
- /*
- * Set up stdin, stdout and stderr of the external
- * CGI application.
- */
- if (param->pipe_fds) {
- tux_close(1);
- tux_close(2);
- tux_close(4);
- if (tux_dup(3) != 1)
- TUX_BUG();
- if (tux_dup(5) != 2)
- TUX_BUG();
- tux_close(3);
- tux_close(5);
- // do not close on exec.
-#if 0
- sys_fcntl(0, F_SETFD, 0);
- sys_fcntl(1, F_SETFD, 0);
- sys_fcntl(2, F_SETFD, 0);
-#else
- spin_lock(¤t->files->file_lock);
- FD_CLR(0, current->files->close_on_exec);
- FD_CLR(1, current->files->close_on_exec);
- FD_CLR(2, current->files->close_on_exec);
- spin_unlock(¤t->files->file_lock);
-#endif
- }
- ret = exec_usermode(param->command, param->argv, param->envp);
- if (ret < 0)
- Dprintk("bug: exec() returned %d.\n", ret);
- else
- Dprintk("exec()-ed successfully!\n");
- return 0;
-}
-
-pid_t tux_exec_process (char *command, char **argv,
- char **envp, int pipe_fds,
- exec_param_t *param, int wait)
-{
- exec_param_t param_local;
- pid_t pid;
- struct k_sigaction *ka;
-
- ka = current->sighand->action + SIGCHLD-1;
- ka->sa.sa_handler = SIG_IGN;
-
- if (!param && wait)
- param = ¶m_local;
-
- param->command = command;
- param->argv = argv;
- param->envp = envp;
- param->pipe_fds = pipe_fds;
-
-repeat_fork:
- pid = kernel_thread(exec_helper, (void*) param, CLONE_SIGHAND|SIGCHLD);
- Dprintk("kernel thread created PID %d.\n", pid);
- if (pid < 0) {
- printk(KERN_ERR "TUX: could not create new CGI kernel thread due to %d... retrying.\n", pid);
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ);
- goto repeat_fork;
- }
- return pid;
-}
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * directory.c: directory listing support
- */
-
-#define __KERNEL_SYSCALLS__
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-char * tux_print_path (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt, char *buf, unsigned int max_len)
-{
- char *res;
- struct dentry *cwd, *root;
- struct vfsmount *cwd_mnt, *rootmnt;
-
- cwd = dget(dentry);
- cwd_mnt = mntget(mnt);
- root = dget(req->docroot_dentry);
- rootmnt = mntget(req->docroot_mnt);
-
- spin_lock(&dcache_lock);
- res = __d_path(cwd, cwd_mnt, root, rootmnt, buf, max_len);
- spin_unlock(&dcache_lock);
-
- dput(cwd);
- mntput(cwd_mnt);
- dput(root);
- mntput(rootmnt);
-
- return res;
-}
-
-/*
- * There are filesystems that do not fill in ->d_type correctly.
- * Determine file-type.
- */
-static int get_d_type (struct dentry *dentry)
-{
- unsigned int mode = dentry->d_inode->i_mode;
-
- if (S_ISREG(mode))
- return DT_REG;
- if (S_ISDIR(mode))
- return DT_DIR;
- if (S_ISLNK(mode))
- return DT_LNK;
- if (S_ISFIFO(mode))
- return DT_FIFO;
- if (S_ISSOCK(mode))
- return DT_SOCK;
- if (S_ISCHR(mode))
- return DT_CHR;
- if (S_ISBLK(mode))
- return DT_BLK;
- return 0;
-}
-
-static void do_dir_line (tux_req_t *req, int cachemiss)
-{
- struct linux_dirent64 *dirp, *dirp0;
- char string0[MAX_OBJECTNAME_LEN+200], *tmp;
- int len, curroff, total, str_len = 0;
- int err, flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct nameidata base;
- struct dentry *dentry = NULL;
- struct inode *inode = NULL;
- struct vfsmount *mnt = NULL;
-
- if (req->proto->check_req_err(req, cachemiss))
- return;
-
- tmp = NULL;
- dirp0 = req->dirp0;
- curroff = req->curroff;
- total = req->total;
-
- dirp = (struct linux_dirent64 *)((char *)dirp0 + curroff);
- if (!dirp->d_name || !dirp->d_name[0])
- goto next_dir;
- /*
- * Hide .xxxxx files:
- */
- if (dirp->d_name[0] == '.')
- goto next_dir;
- Dprintk("<%s T:%d (off:%Ld) (len:%d)>\n", dirp->d_name, dirp->d_type, dirp->d_off, dirp->d_reclen);
- if (tux_hide_unreadable) {
- switch (dirp->d_type) {
- default:
- goto next_dir;
- case DT_UNKNOWN:
- case DT_REG:
- case DT_DIR:
- case DT_LNK:
- /* valid entries - fall through. */
- ;
- }
- }
- len = strlen(dirp->d_name);
- if (len >= MAX_OBJECTNAME_LEN) {
- dirp->d_name[MAX_OBJECTNAME_LEN] = 0;
- len = MAX_OBJECTNAME_LEN-1;
- }
-
- if (!req->dentry)
- TUX_BUG();
-
- base.flags = flag;
- base.last_type = LAST_ROOT;
- base.dentry = dget(req->dentry);
- base.mnt = mntget(req->cwd_mnt);
-
- switch_docroot(req);
- err = path_walk(dirp->d_name, &base);
-
- Dprintk("path_walk() returned %d.\n", err);
-
- if (err) {
- if (err == -EWOULDBLOCKIO) {
- add_tux_atom(req, do_dir_line);
- queue_cachemiss(req);
- return;
- }
- goto next_dir;
- }
-
- dentry = base.dentry;
- mnt = base.mnt;
- if (!dentry)
- TUX_BUG();
- if (IS_ERR(dentry))
- TUX_BUG();
- inode = dentry->d_inode;
- if (!inode)
- TUX_BUG();
- if (!dirp->d_type)
- dirp->d_type = get_d_type(dentry);
- if (tux_hide_unreadable) {
- umode_t mode;
-
- mode = inode->i_mode;
- if (mode & tux_mode_forbidden)
- goto out_dput;
- if (!(mode & tux_mode_allowed))
- goto out_dput;
-
- err = permission(inode, MAY_READ, NULL);
- if (err)
- goto out_dput;
- if (dirp->d_type == DT_DIR) {
- err = permission(inode, MAY_EXEC, NULL);
- if (err)
- goto out_dput;
- }
- }
-
- tmp = req->proto->print_dir_line(req, string0, dirp->d_name, len, dirp->d_type, dentry, inode);
- if (tmp)
- str_len = tmp-string0;
-out_dput:
- dput(dentry);
- mntput(mnt);
-next_dir:
- curroff += dirp->d_reclen;
-
- if (tmp && (tmp != string0))
- Dprintk("writing line (len: %d): <%s>\n", strlen(string0), string0);
-
- if (curroff < total) {
- req->dirp0 = dirp0;
- req->curroff = curroff;
- add_tux_atom(req, do_dir_line);
- } else {
- kfree(dirp0);
- req->dirp0 = NULL;
- req->curroff = 0;
- // falls back to the list_directory atom
- }
- if (tmp && (tmp != string0))
- __send_async_message(req, string0, 200, str_len, 0);
- else
- add_req_to_workqueue(req);
-}
-
-#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
-#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
-#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
-
-static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
- ino_t ino, unsigned int d_type)
-{
- struct linux_dirent64 * dirent, d;
- struct getdents_callback64 * buf = (struct getdents_callback64 *) __buf;
- int reclen = ROUND_UP64(NAME_OFFSET(dirent) + namlen + 1);
-
- buf->error = -EINVAL; /* only used if we fail.. */
- if (reclen > buf->count)
- return -EINVAL;
- dirent = buf->previous;
- if (dirent) {
- d.d_off = offset;
- copy_to_user(&dirent->d_off, &d.d_off, sizeof(d.d_off));
- }
- dirent = buf->current_dir;
- buf->previous = dirent;
- memset(&d, 0, NAME_OFFSET(&d));
- d.d_ino = ino;
- d.d_reclen = reclen;
- d.d_type = d_type;
- copy_to_user(dirent, &d, NAME_OFFSET(&d));
- copy_to_user(dirent->d_name, name, namlen);
- put_user(0, dirent->d_name + namlen);
- dirent = (void *)dirent + reclen;
- buf->current_dir = dirent;
- buf->count -= reclen;
- return 0;
-}
-#define DIRENT_SIZE 3000
-
-void list_directory (tux_req_t *req, int cachemiss)
-{
- struct getdents_callback64 buf;
- struct linux_dirent64 *dirp0;
- mm_segment_t oldmm;
- int total;
-
- Dprintk("list_directory(%p, %d), dentry: %p.\n", req, cachemiss, req->dentry);
- if (!req->cwd_dentry)
- TUX_BUG();
-
- if (!cachemiss) {
- add_tux_atom(req, list_directory);
- queue_cachemiss(req);
- return;
- }
-
- dirp0 = tux_kmalloc(DIRENT_SIZE);
-
- buf.current_dir = dirp0;
- buf.previous = NULL;
- buf.count = DIRENT_SIZE;
- buf.error = 0;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- set_fs(KERNEL_DS);
- total = vfs_readdir(&req->in_file, filldir64, &buf);
- set_fs(oldmm);
-
- if (buf.previous)
- total = DIRENT_SIZE - buf.count;
-
- Dprintk("total: %d (buf.error: %d, buf.previous %p)\n",
- total, buf.error, buf.previous);
-
- if (total < 0) {
- kfree(dirp0);
- req_err(req);
- add_req_to_workqueue(req);
- return;
- }
- if (!total) {
- kfree(dirp0);
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
- return;
- }
-
- if (!req->cwd_dentry)
- TUX_BUG();
- add_tux_atom(req, list_directory);
-
- req->dirp0 = dirp0;
- req->curroff = 0;
- req->total = total;
- add_tux_atom(req, do_dir_line);
-
- add_req_to_workqueue(req);
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * extcgi.c: dynamic TUX module which forks and starts an external CGI
- */
-
-#define __KERNEL_SYSCALLS__
-#define __KERNEL_SYSCALLS_NO_ERRNO__
-
-#include <net/tux.h>
-#include "parser.h"
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#define MAX_ENVLEN 1000
-#define MAX_CGI_METAVARIABLES 32
-#define CGI_CHUNK_SIZE 1024
-#define MAX_CGI_COMMAND_LEN 256
-
-#if CONFIG_TUX_DEBUG
-#define PRINT_MESSAGE_LEFT \
- Dprintk("CGI message left at %s:%d:\n--->{%s}<---\n", \
- __FILE__, __LINE__, curr)
-#else
-#define PRINT_MESSAGE_LEFT do {} while(0)
-#endif
-
-#define GOTO_INCOMPLETE do { Dprintk("invalid CGI reply at %s:%d.\n", __FILE__, __LINE__); goto invalid; } while (0)
-
-/*
- * Please acknowledge our hard work by not changing this define, or
- * at least please acknowledge us by leaving "TUX/2.0 (Linux)" in
- * the ID string. Thanks! :-)
- */
-#define CGI_SUCCESS2 "HTTP/1.1 200 OK\r\nConnection: close\r\nServer: TUX/2.0 (Linux)\r\n"
-
-static int handle_cgi_reply (tux_req_t *req)
-{
- int first = 1;
- int len, left, total;
- char *buf, *tmp;
- mm_segment_t oldmm;
-
- buf = tux_kmalloc(CGI_CHUNK_SIZE+1);
- tux_close(3);
- tux_close(4);
- tux_close(5);
- oldmm = get_fs(); set_fs(KERNEL_DS);
- send_sync_buf(NULL, req->sock, CGI_SUCCESS2, sizeof(CGI_SUCCESS2)-1, MSG_MORE);
- set_fs(oldmm);
-
- req->bytes_sent = 0;
- /*
- * The new process is the new owner of the socket, it will
- * close it.
- */
-repeat:
- left = CGI_CHUNK_SIZE;
- len = 0;
- total = 0;
- tmp = buf;
- do {
- mm_segment_t oldmm;
-
- tmp += len;
- total += len;
- left -= len;
- if (!left)
- break;
-repeat_read:
- Dprintk("reading %d bytes via read().\n", left);
- oldmm = get_fs(); set_fs(KERNEL_DS);
- len = read(2, tmp, left);
- set_fs(oldmm);
- Dprintk("got %d bytes from read() (total: %d).\n", len, total);
- if (len > 0)
- tmp[len] = 0;
- Dprintk("CGI reply: (%d bytes, total %d).\n", len, total);
- if (len == -ERESTARTSYS) {
- flush_all_signals();
- goto repeat_read;
- }
- } while (len > 0);
- if (total > CGI_CHUNK_SIZE) {
- printk(KERN_ERR "TUX: CGI weirdness. total: %d, len: %d, left: %d.\n", total, len, left);
- TUX_BUG();
- }
- Dprintk("CGI done reply chunk: (%d bytes last, total %d).\n", len, total);
- if (total) {
- mm_segment_t oldmm;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- if (!len)
- send_sync_buf(NULL, req->sock, buf, total, 0);
- else
- send_sync_buf(NULL, req->sock, buf, total, MSG_MORE);
- set_fs(oldmm);
- req->bytes_sent += total;
- }
-
- Dprintk("bytes_sent: %d\n", req->bytes_sent);
- if ((total > 0) && first) {
- first = 0;
-
- if (buf[total])
- TUX_BUG();
- tmp = strstr(buf, "\n\n");
- if (tmp) {
- req->bytes_sent -= (tmp-buf) + 2;
- Dprintk("new bytes_sent: %d\n", req->bytes_sent);
- } else {
- req->bytes_sent = 0;
- req_err(req);
- }
- }
- if (len < 0)
- Dprintk("sys_read returned with %d.\n", len);
- else {
- if (total > 0)
- goto repeat;
- }
- tux_close(2);
-
- req->status = 200;
- add_req_to_workqueue(req);
- kfree(buf);
-
- return -1;
-}
-
-static int exec_external_cgi (void *data)
-{
- exec_param_t param;
- tux_req_t *req = data;
- char *envp[MAX_CGI_METAVARIABLES+1], **envp_p;
- char *argv[] = { "extcgi", NULL};
- char *envstr, *tmp;
- unsigned int host;
- struct k_sigaction *ka;
- int in_pipe_fds[2], out_pipe_fds[2], err_pipe_fds[2], len, err;
- char *command;
- pid_t pid;
-
- len = strlen(tux_common_docroot);
- if (req->objectname_len + len + 12 > MAX_CGI_COMMAND_LEN)
- return -ENOMEM;
- sprintf(current->comm,"cgimain - %d", current->pid);
- host = inet_sk(req->sock->sk)->daddr;
-
- envstr = tux_kmalloc(MAX_ENVLEN);
- command = tux_kmalloc(MAX_CGI_COMMAND_LEN);
-
- tmp = envstr;
- envp_p = envp;
-
-#define WRITE_ENV(str...) \
- if (envp_p >= envp + MAX_CGI_METAVARIABLES) \
- TUX_BUG(); \
- len = sprintf(tmp, str); \
- *envp_p++ = tmp; \
- tmp += len + 1; \
- if (tmp >= envstr + MAX_ENVLEN) \
- TUX_BUG();
-
- #define WRITE_ENV_STR(str,field,len) \
- do { \
- int offset; \
- \
- offset = sizeof(str)-1; \
- err = -EFAULT; \
- if (tmp - envstr + offset + len >= MAX_ENVLEN) \
- goto out; \
- if (envp_p >= envp + MAX_CGI_METAVARIABLES) \
- TUX_BUG(); \
- memcpy(tmp, str, offset); \
- memcpy(tmp + offset, field, len); \
- offset += len; \
- tmp[offset] = 0; \
- *envp_p++ = tmp; \
- tmp += offset + 1; \
- } while (0)
-
- WRITE_ENV("GATEWAY_INTERFACE=CGI/1.1");
- WRITE_ENV("CONTENT_LENGTH=%d", req->post_data_len);
- WRITE_ENV("REMOTE_ADDR=%d.%d.%d.%d", NIPQUAD(host));
- WRITE_ENV("SERVER_PORT=%d", 80);
- WRITE_ENV("SERVER_SOFTWARE=TUX/2.0 (Linux)");
-
-#if 1
- WRITE_ENV("DOCUMENT_ROOT=/");
- WRITE_ENV("PATH_INFO=/");
-#else
- WRITE_ENV_STR("DOCUMENT_ROOT=", tux_common_docroot, len);
- WRITE_ENV_STR("PATH_INFO=", tux_common_docroot, len);
-#endif
- WRITE_ENV_STR("QUERY_STRING=", req->query_str, req->query_len);
- WRITE_ENV_STR("REQUEST_METHOD=", req->method_str, req->method_len);
- WRITE_ENV_STR("SCRIPT_NAME=", req->objectname, req->objectname_len);
- WRITE_ENV_STR("SERVER_PROTOCOL=", req->version_str, req->version_len);
-
- if (req->content_type_len)
- WRITE_ENV_STR("CONTENT_TYPE=",
- req->content_type_str, req->content_type_len);
- if (req->cookies_len)
- WRITE_ENV_STR("HTTP_COOKIE=",
- req->cookies_str, req->cookies_len);
-
- if (req->host_len)
- WRITE_ENV_STR("SERVER_NAME=", req->host, req->host_len);
- else {
- const char *host = "localhost";
- WRITE_ENV_STR("SERVER_NAME=", host, strlen(host));
- }
-
- *envp_p = NULL;
-
- spin_lock_irq(¤t->sighand->siglock);
- ka = current->sighand->action + SIGPIPE-1;
- ka->sa.sa_handler = SIG_IGN;
- siginitsetinv(¤t->blocked, sigmask(SIGCHLD));
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- tux_close(0); tux_close(1);
- tux_close(2); tux_close(3);
- tux_close(4); tux_close(5);
-
- in_pipe_fds[0] = in_pipe_fds[1] = -1;
- out_pipe_fds[0] = out_pipe_fds[1] = -1;
- err_pipe_fds[0] = err_pipe_fds[1] = -1;
-
- err = -ENFILE;
- if (do_pipe(in_pipe_fds))
- goto out;
- if (do_pipe(out_pipe_fds))
- goto out;
- if (do_pipe(err_pipe_fds))
- goto out;
-
- if (in_pipe_fds[0] != 0) TUX_BUG();
- if (in_pipe_fds[1] != 1) TUX_BUG();
- if (out_pipe_fds[0] != 2) TUX_BUG();
- if (out_pipe_fds[1] != 3) TUX_BUG();
- if (err_pipe_fds[0] != 4) TUX_BUG();
- if (err_pipe_fds[1] != 5) TUX_BUG();
-
- if (req->virtual && req->host_len)
- sprintf(command, "/%s/cgi-bin/%s", req->host, req->objectname);
- else
- sprintf(command, "/cgi-bin/%s", req->objectname);
- Dprintk("before CGI exec.\n");
- pid = tux_exec_process(command, argv, envp, 1, ¶m, 0);
- Dprintk("after CGI exec.\n");
-
- if (req->post_data_len) {
- mm_segment_t oldmm;
- int ret;
-
- Dprintk("POST data to CGI:\n");
- oldmm = get_fs(); set_fs(KERNEL_DS);
- ret = write(1, req->post_data_str, req->post_data_len);
- set_fs(oldmm);
- Dprintk("write() returned: %d.\n", ret);
- if (ret != req->post_data_len)
- Dprintk("write() returned: %d.\n", ret);
- }
-
- tux_close(0);
- tux_close(1);
-
- handle_cgi_reply(req);
- err = 0;
-
-out:
- kfree(envstr);
- kfree(command);
-
- return err;
-}
-
-void start_external_cgi (tux_req_t *req)
-{
- int pid;
-
-repeat:
- pid = kernel_thread(exec_external_cgi, (void*) req, SIGCHLD);
- if (pid == -1)
- return;
- if (pid < 0) {
- printk(KERN_INFO "TUX: Could not fork external CGI process due to %d, retrying!\n", pid);
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ);
- goto repeat;
- }
-}
-
-int query_extcgi (tux_req_t *req)
-{
- clear_keepalive(req);
- start_external_cgi(req);
- return -1;
-}
-
-#define EXTCGI_INVALID_HEADER \
- "HTTP/1.1 503 Service Unavailable\r\n" \
- "Content-Length: 23\r\n\r\n"
-
-#define EXTCGI_INVALID_BODY \
- "TUX: invalid CGI reply."
-
-#define EXTCGI_INVALID EXTCGI_INVALID_HEADER EXTCGI_INVALID_BODY
-
+++ /dev/null
-/* $Id: zlib.h,v 1.2 1997/12/23 10:47:44 paulus Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/zlib.h>
-#include <net/tux.h>
-
-#define STREAM_END_SPACE 12
-
-int tux_gzip_compress (tux_req_t *req, unsigned char *data_in, unsigned char *data_out, __u32 *in_len, __u32 *out_len)
-{
- z_stream *s = &req->ti->gzip_state;
- int ret, left;
-
- down(&req->ti->gzip_sem);
- if (zlib_deflateReset(s) != Z_OK)
- BUG();
-
- s->next_in = data_in;
- s->next_out = data_out;
- s->avail_in = *in_len;
- s->avail_out = *out_len;
-
- Dprintk("calling zlib_deflate with avail_in %d, avail_out %d\n", s->avail_in, s->avail_out);
- ret = zlib_deflate(s, Z_FINISH);
- Dprintk("deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", s->avail_in, s->avail_out, s->total_in, s->total_out);
-
- if (ret != Z_STREAM_END) {
- printk("bad: deflate returned with %d! avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", ret, s->avail_in, s->avail_out, s->total_in, s->total_out);
- BUG();
- }
- *in_len = s->avail_in;
- *out_len = s->avail_out;
- left = s->avail_in;
-
- up(&req->ti->gzip_sem);
-
- return left;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * input.c: handle requests arriving on accepted connections
- */
-
-#include <net/tux.h>
-#include <linux/kmod.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-void zap_request (tux_req_t *req, int cachemiss)
-{
- if (!req->error)
- TUX_BUG();
- if (req->error == TUX_ERROR_CONN_TIMEOUT) {
- if (req->proto->request_timeout) {
- clear_keepalive(req);
- req->proto->request_timeout(req, cachemiss);
- } else {
- clear_keepalive(req);
- if (!cachemiss)
- flush_request(req, 0);
- else {
- add_tux_atom(req, flush_request);
- add_req_to_workqueue(req);
- }
- }
- return;
- }
-
- if (!cachemiss && (req->error == TUX_ERROR_CONN_CLOSE)) {
- /*
- * Zap connection as fast as possible, there is
- * no valid client connection anymore:
- */
- clear_keepalive(req);
- flush_request(req, 0);
- } else {
- if (req->error == TUX_ERROR_CONN_CLOSE) {
- clear_keepalive(req);
- add_tux_atom(req, flush_request);
- } else
- /*
- * Potentially redirect to the secondary server:
- */
- add_tux_atom(req, redirect_request);
- add_req_to_workqueue(req);
- }
-}
-
-void __switch_docroot(tux_req_t *req)
-{
- if (!req->docroot_dentry || !req->docroot_mnt)
- TUX_BUG();
- set_fs_root(current->fs, req->docroot_mnt, req->docroot_dentry);
-}
-
-struct dentry * __tux_lookup (tux_req_t *req, const char *filename,
- struct nameidata *base, struct vfsmount **mnt)
-{
- int err;
-
- err = path_walk(filename, base);
- if (err) {
- Dprintk("path_walk() returned with %d!\n", err);
- return ERR_PTR(err);
- }
- if (*mnt)
- TUX_BUG();
- *mnt = base->mnt;
-
- return base->dentry;
-}
-
-int tux_permission (struct inode *inode)
-{
- umode_t mode;
- int err;
-
- mode = inode->i_mode;
- Dprintk("URL inode mode: %08x.\n", mode);
-
- if (mode & tux_mode_forbidden)
- return -2;
- /*
- * at least one bit in the 'allowed' set has to
- * be present to allow access.
- */
- if (!(mode & tux_mode_allowed))
- return -3;
- err = permission(inode,MAY_READ,NULL);
- return err;
-}
-
-struct dentry * tux_lookup (tux_req_t *req, const char *filename,
- const unsigned int flag, struct vfsmount **mnt)
-{
- struct dentry *dentry;
- struct nameidata base;
-
- Dprintk("tux_lookup(%p, %s, %d, virtual: %d, host: %s (%d).)\n", req, filename, flag, req->virtual, req->host, req->host_len);
-
- base.flags = LOOKUP_FOLLOW|flag;
- base.last_type = LAST_ROOT;
- if (req->objectname[0] == '/') {
- base.dentry = dget(req->docroot_dentry);
- base.mnt = mntget(req->docroot_mnt);
- } else {
- if (!req->cwd_dentry) {
- req->cwd_dentry = dget(req->docroot_dentry);
- req->cwd_mnt = mntget(req->docroot_mnt);
- }
- base.dentry = req->cwd_dentry;
- dget(base.dentry);
- base.mnt = mntget(req->cwd_mnt);
- }
-
- switch_docroot(req);
- dentry = __tux_lookup (req, filename, &base, mnt);
-
- Dprintk("looked up {%s} == dentry %p.\n", filename, dentry);
-
- if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
- TUX_BUG();
- return dentry;
-}
-
-int lookup_object (tux_req_t *req, const unsigned int flag)
-{
- struct vfsmount *mnt = NULL;
- struct dentry *dentry = NULL;
- int perm;
-
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- goto cachemiss;
- goto abort;
- }
- perm = tux_permission(dentry->d_inode);
- /*
- * Only regular files allowed.
- */
- if ((perm < 0) || !S_ISREG(dentry->d_inode->i_mode)) {
- req->status = 403;
- goto abort;
- }
- req->total_file_len = dentry->d_inode->i_size;
-out:
- install_req_dentry(req, dentry, mnt);
- return 0;
-cachemiss:
- return 1;
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
- req_err(req);
- goto out;
-}
-
-void install_req_dentry (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt)
-{
- if (req->dentry)
- TUX_BUG();
- req->dentry = dentry;
- if (req->mnt)
- TUX_BUG();
- req->mnt = mnt;
- if (req->in_file.f_dentry)
- TUX_BUG();
- if (dentry)
- open_private_file(&req->in_file, dentry, FMODE_READ);
-}
-
-void release_req_dentry (tux_req_t *req)
-{
- if (!req->dentry) {
- if (req->in_file.f_dentry)
- TUX_BUG();
- return;
- }
- if (req->in_file.f_op && req->in_file.f_op->release)
- req->in_file.f_op->release(req->dentry->d_inode, &req->in_file);
- memset(&req->in_file, 0, sizeof(req->in_file));
-
- dput(req->dentry);
- req->dentry = NULL;
- mntput(req->mnt);
- req->mnt = NULL;
-}
-
-int __connection_too_fast (tux_req_t *req)
-{
- unsigned long curr_bw, delta, bytes;
-
- bytes = req->total_bytes + req->bytes_sent;
- if (!bytes)
- return 1;
-
- delta = jiffies - req->first_timestamp;
- if (!delta)
- delta++;
- curr_bw = bytes * HZ / delta;
-
- if (curr_bw > tux_max_output_bandwidth)
- return 2;
- return 0;
-}
-
-void unidle_req (tux_req_t *req)
-{
- threadinfo_t *ti = req->ti;
-
- Dprintk("UNIDLE req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
- spin_lock_irq(&ti->work_lock);
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- if (!test_and_clear_bit(0, &req->idle_input)) {
- Dprintk("unidling %p, wasnt idle!\n", req);
- if (list_empty(&req->work))
- TUX_BUG();
- list_del(&req->work);
- DEBUG_DEL_LIST(&req->work);
- DEC_STAT(nr_work_pending);
- } else {
- del_keepalive_timer(req);
- DEC_STAT(nr_idle_input_pending);
- Dprintk("unidled %p.\n", req);
- }
- if (req->idle_input)
- TUX_BUG();
- spin_unlock_irq(&ti->work_lock);
-}
-
-#define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete; } while (0)
-#define GOTO_REDIRECT do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect; } while (0)
-#define GOTO_REDIRECT_NONIDLE do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect_nonidle; } while (0)
-
-static int read_request (struct socket *sock, char *buf, int max_size)
-{
- mm_segment_t oldmm;
- struct kiocb iocb;
- struct msghdr msg;
- struct iovec iov;
-
- int len;
-
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- msg.msg_iov->iov_base = buf;
- msg.msg_iov->iov_len = max_size;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
-
-read_again:
- init_sync_kiocb(&iocb, NULL);
- len = sock->sk->sk_prot->recvmsg(&iocb, sock->sk, &msg, max_size,
- MSG_DONTWAIT, MSG_PEEK, NULL);
- if (-EIOCBQUEUED == len)
- len = wait_on_sync_kiocb(&iocb);
-
- /*
- * We must not get a signal inbetween
- */
- if ((len == -EAGAIN) || (len == -ERESTARTSYS)) {
- if (!signal_pending(current)) {
- len = 0;
- goto out;
- }
- flush_all_signals();
- goto read_again;
- }
-out:
- set_fs(oldmm);
- return len;
-}
-
-/*
- * We inline URG data so it's at the head of the normal receive queue.
- */
-static int zap_urg_data (struct socket *sock)
-{
- mm_segment_t oldmm;
- struct msghdr msg;
- struct iovec iov;
- struct kiocb iocb;
- int len;
- char buf[10];
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
-
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- msg.msg_iov->iov_base = buf;
- msg.msg_iov->iov_len = 2;
-
-read_again:
- init_sync_kiocb(&iocb, NULL);
- len = sock->sk->sk_prot->recvmsg(&iocb, sock->sk, &msg, 2,
- MSG_DONTWAIT, 0, NULL);
- if (-EIOCBQUEUED == len)
- len = wait_on_sync_kiocb(&iocb);
- Dprintk("recvmsg(MSG_OOB) returned %d.\n", len);
-
- /*
- * We must not get a signal inbetween
- */
- if ((len == -EAGAIN) || (len == -ERESTARTSYS)) {
- if (!signal_pending(current)) {
- len = 0;
- goto out;
- }
- flush_all_signals();
- goto read_again;
- }
-out:
- set_fs(oldmm);
-
- Dprintk("in out:.. and will return %d.!\n", len);
-
- return len;
-}
-
-void trunc_headers (tux_req_t *req)
-{
- struct sock *sk = req->sock->sk;
- int len, addr_len = 0;
- struct kiocb iocb;
-
- if (!req->parsed_len)
- TUX_BUG();
-repeat_trunc:
- init_sync_kiocb(&iocb, NULL);
- len = sk->sk_prot->recvmsg(&iocb, sk, NULL, req->parsed_len, 1, MSG_TRUNC, &addr_len);
- if (-EIOCBQUEUED == len)
- len = wait_on_sync_kiocb(&iocb);
- if ((len == -ERESTARTSYS) || (len == -EAGAIN)) {
- flush_all_signals();
- goto repeat_trunc;
- }
- Dprintk("truncated (TRUNC) %d bytes at %p. (wanted: %d.)\n", len, __builtin_return_address(0), req->parsed_len);
-
-
-
- req->parsed_len = 0;
-}
-
-void print_req (tux_req_t *req)
-{
- struct sock *sk;
-
- printk("PRINT req %p <%p>, sock %p\n",
- req, __builtin_return_address(0), req->sock);
- printk("... idx: %d\n", req->atom_idx);
- if (req->sock) {
- sk = req->sock->sk;
- printk("... sock %p, sk %p, sk->state: %d, sk->err: %d\n", req->sock, sk, sk->sk_state, sk->sk_err);
- printk("... write_queue: %d, receive_queue: %d, error_queue: %d, keepalive: %d, status: %d\n", !skb_queue_empty(&sk->sk_write_queue), !skb_queue_empty(&sk->sk_receive_queue), !skb_queue_empty(&sk->sk_error_queue), req->keep_alive, req->status);
- printk("...tp->send_head: %p\n", sk->sk_send_head);
- printk("...tp->snd_una: %08x\n", tcp_sk(sk)->snd_una);
- printk("...tp->snd_nxt: %08x\n", tcp_sk(sk)->snd_nxt);
- printk("...tp->packets_out: %08x\n", tcp_sk(sk)->packets_out);
- }
- printk("... meth:{%s}, uri:{%s}, query:{%s}, ver:{%s}\n", req->method_str ? req->method_str : "<null>", req->uri_str ? req->uri_str : "<null>", req->query_str ? req->query_str : "<null>", req->version_str ? req->version_str : "<null>");
- printk("... post_data:{%s}(%d).\n", req->post_data_str, req->post_data_len);
- printk("... headers: {%s}\n", req->headers);
-}
-/*
- * parse_request() reads all available TCP/IP data and prepares
- * the request if the TUX request is complete. (we can get TUX
- * requests in several packets.) Invalid requests are redirected
- * to the secondary server.
- */
-
-void parse_request (tux_req_t *req, int cachemiss)
-{
- int len, parsed_len;
- struct sock *sk = req->sock->sk;
- struct tcp_opt *tp = tcp_sk(sk);
- int was_keepalive = req->keep_alive;
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
-
- SET_TIMESTAMP(req->parse_timestamp);
-
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- INC_STAT(nr_idle_input_pending);
- spin_unlock_irq(&req->ti->work_lock);
-
- Dprintk("idled request %p.\n", req);
-
-restart:
-
- if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) {
- len = zap_urg_data(req->sock);
- if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) {
- req->error = TUX_ERROR_CONN_CLOSE;
- goto redirect_error;
- }
- }
-
- INC_STAT(input_slowpath);
-
- if (!req->headers)
- req->headers = tux_kmalloc(tux_max_header_len);
-
- /* First, read the data */
- len = read_request(req->sock, (char *)req->headers, tux_max_header_len-1);
- if (len < 0) {
- req->error = TUX_ERROR_CONN_CLOSE;
- goto redirect_error;
- }
- if (!len)
- GOTO_INCOMPLETE;
-
- /*
- * Make it a zero-delimited string to automatically get
- * protection against various buffer overflow situations.
- * Then pass it to the TUX application protocol stack.
- */
- ((char *)req->headers)[len] = 0;
- req->headers_len = len;
-
- parsed_len = req->proto->parse_message(req, len);
-
- /*
- * Is the request fully read? (or is there any error)
- */
- if (parsed_len < 0)
- GOTO_REDIRECT;
- if (!parsed_len) {
- /*
- * Push pending ACK which was delayed due to the
- * pingpong optimization:
- */
- if (was_keepalive) {
- lock_sock(sk);
- tp->ack.pingpong = 0;
- tp->ack.pending |= TCP_ACK_PUSHED;
- cleanup_rbuf(sk, 1);
- release_sock(sk);
- }
- if (len >= tux_max_header_len-1)
- GOTO_REDIRECT;
- GOTO_INCOMPLETE;
- }
- unidle_req(req);
-
- tp->nonagle = 2;
-
- add_req_to_workqueue(req);
- return;
-
-redirect:
- TDprintk("req %p will be redirected!\n", req);
- req_err(req);
-
-redirect_error:
- unidle_req(req);
-
- if (len < 0)
- req->parsed_len = 0;
- else
- req->parsed_len = len;
-
- INC_STAT(parse_static_redirect);
- if (req->headers)
- kfree(req->headers);
- req->headers = NULL;
- if (req->error)
- zap_request(req, cachemiss);
- return;
-
-incomplete:
- if (req->error)
- goto redirect_error;
- if (tp->urg_data && !(tp->urg_data & TCP_URG_READ))
- goto restart;
-
- add_tux_atom(req, parse_request);
- INC_STAT(parse_static_incomplete);
- tux_push_req(req);
-}
-
-int process_requests (threadinfo_t *ti, tux_req_t **user_req)
-{
- struct list_head *head, *curr;
- int count = 0;
- tux_req_t *req;
-
- *user_req = NULL;
-
-restart_loop:
- spin_lock_irq(&ti->work_lock);
- head = &ti->work_pending;
- curr = head->next;
-
- if (curr != head) {
- int i;
-
- req = list_entry(curr, tux_req_t, work);
- Dprintk("PROCESS req %p <%p>.\n",
- req, __builtin_return_address(0));
- for (i = 0; i < req->atom_idx; i++)
- Dprintk("... atom %d: %p\n", i, req->atoms[i]);
-
- if (req->ti != ti)
- TUX_BUG();
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
-
- if (list_empty(&req->work))
- TUX_BUG();
- list_del(curr);
- DEBUG_DEL_LIST(&req->work);
- spin_unlock_irq(&ti->work_lock);
-
- if (!req->atom_idx) {
- if (req->usermode) {
- *user_req = req;
- return count;
- }
- /*
- * idx == 0 requests are flushed automatically.
- */
- flush_request(req, 0);
- } else
- tux_schedule_atom(req, 0);
- count++;
- goto restart_loop;
- }
- spin_unlock_irq(&ti->work_lock);
-
- return count;
-}
-
-int tux_flush_workqueue (threadinfo_t *ti)
-{
- struct list_head *head, *curr, *next;
- tux_req_t *req;
- int count = 0;
-
-restart:
- spin_lock_irq(&ti->work_lock);
- head = &ti->work_pending;
- curr = head->next;
-
- if (curr != head) {
- req = list_entry(curr, tux_req_t, work);
- next = curr->next;
- clear_bit(0, &req->idle_input);
- clear_bit(0, &req->wait_output_space);
- if (list_empty(&req->work))
- TUX_BUG();
- list_del(curr);
- DEBUG_DEL_LIST(curr);
- DEC_STAT(nr_input_pending);
- spin_unlock_irq(&ti->work_lock);
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- req->atom_idx = 0;
- clear_keepalive(req);
- req->status = -1;
- if (req->usermode) {
- req->usermode = 0;
- req->private = 0;
- }
- flush_request(req, 0);
- count++;
- goto restart;
- }
- spin_unlock_irq(&ti->work_lock);
-
- return count;
-}
-
-int print_all_requests (threadinfo_t *ti)
-{
- struct list_head *head, *curr;
- tux_req_t *req;
- int count = 0;
-
- spin_lock_irq(&ti->work_lock);
- head = &ti->all_requests;
- curr = head->next;
-
- while (curr != head) {
- req = list_entry(curr, tux_req_t, all);
- curr = curr->next;
- print_req(req);
- count++;
- }
- spin_unlock_irq(&ti->work_lock);
-
- return count;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * Cleaned up logger output for Alpha.
- * -- Phil Ezolt (Phillip.Ezolt@compaq.com) & Bill Carr (wcarr92@yahoo.com)
- *
- * logger.c: log requests finished by TUX.
- */
-
-#define __KERNEL_SYSCALLS__
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
-static unsigned int log_head, log_tail;
-static char * log_buffer = NULL;
-static DECLARE_WAIT_QUEUE_HEAD(log_wait);
-static DECLARE_WAIT_QUEUE_HEAD(log_full);
-static int logger_pid = 0;
-
-/*
- * High-speed TUX logging architecture:
- *
- * All fast threads share a common log-ringbuffer. (default size 1MB)
- * Log entries are binary and are padded to be cacheline aligned, this
- * ensures that there is no cache-pingpong between fast threads.
- *
- * The logger thread writes out pending log entries within 1 second
- * (buffer-cache writes data out within 5 seconds). The logger thread
- * gets activated once we have more than 25% of the log ringbuffer
- * filled - or the 1 second log timeout expires. Fast threads block
- * if if more than 95% of the ringbuffer is filled and unblock only
- * if used logbuffer space drops below 90%.
- *
- * This architecture guarantees that 1) logging is reliable (no
- * log entry is ever lost), 2) timely (touches disk within 6 seconds),
- * 3) in the log-contention case the saturation behavior is still
- * write-clustered, but 4) if the logger thread can keep up then
- * the coupling is completely asynchron and parallel.
- *
- * The binary log format gives us about 50% saved IO/memory bandwith
- * and 50% less on-disk used log space than the traditional W3C ASCII
- * format.
- *
- * (We might switch to raw IO though to write the logfile.)
- */
-
-#define SOFT_LIMIT (LOG_LEN*25/100)
-#define HARD_LIMIT (LOG_LEN*95/100)
-#define HARD_RELAX_LIMIT (LOG_LEN*90/100)
-
-unsigned int tux_logentry_align_order = 5;
-
-#if SMP_CACHE_BYTES == 8
-# define TUX_LOGENTRY_ALIGN 3
-#else
-#if SMP_CACHE_BYTES == 16
-# define TUX_LOGENTRY_ALIGN 4
-#else
-#if SMP_CACHE_BYTES == 32
-# define TUX_LOGENTRY_ALIGN 5
-#else
-#if SMP_CACHE_BYTES == 64
-# define TUX_LOGENTRY_ALIGN 6
-#else
-#if SMP_CACHE_BYTES == 128
-# define TUX_LOGENTRY_ALIGN 7
-#else
-#if SMP_CACHE_BYTES == 256
-# define TUX_LOGENTRY_ALIGN 8
-#else
-#error Add entry!
-#endif
-#endif
-#endif
-#endif
-#endif
-#endif
-
-#define ROUND_UP(x) (((((x)-1) >> TUX_LOGENTRY_ALIGN) + 1) \
- << TUX_LOGENTRY_ALIGN)
-
-static void __throttle_logging (void)
-{
- DECLARE_WAITQUEUE(wait, current);
- int pending;
-
- add_wait_queue(&log_full, &wait);
- for (;;) {
- static unsigned long last_warning = 0;
-
- if (jiffies - last_warning > 10*HZ) {
- last_warning = jiffies;
- printk(KERN_NOTICE "TUX: log buffer overflow, have to throttle TUX thread!\n");
- }
-
- current->state = TASK_INTERRUPTIBLE;
-
- spin_lock(&log_lock);
- pending = log_head-log_tail;
- spin_unlock(&log_lock);
-
- if ((pending % LOG_LEN) < HARD_LIMIT)
- break;
-
- schedule();
- }
- current->state = TASK_RUNNING;
- remove_wait_queue(&log_full, &wait);
-}
-
-#if CONFIG_TUX_DEBUG
-#define CHECK_LOGPTR(ptr) \
-do { \
- if ((ptr < log_buffer) || (ptr > log_buffer + LOG_LEN)) { \
- printk(KERN_ERR "TUX: ouch: log ptr %p > %p + %ld!\n", \
- ptr, log_buffer, LOG_LEN); \
- TUX_BUG(); \
- } \
-} while (0)
-#else
-#define CHECK_LOGPTR(ptr) do { } while (0)
-#endif
-
-void __log_request (tux_req_t *req)
-{
- char *str, *next;
- const char *uri_str;
- unsigned int inc, len, uri_len, pending, next_head, def_vhost_len = 0;
- unsigned long flags;
-
- if (req->proto->pre_log)
- req->proto->pre_log(req);
- /*
- * Log the reply status (success, or type of failure)
- */
- if (!tux_log_incomplete && (!req->status || (req->bytes_sent == -1))) {
-
- Dprintk("not logging req %p: {%s} [%d/%d]\n", req, req->uri_str, req->status, req->bytes_sent);
- return;
- }
- Dprintk("uri: {%s} [%d]\n", req->uri_str, req->uri_len);
-
-#define NO_URI "<none>"
- if (req->uri_len) {
- uri_len = req->uri_len;
- uri_str = req->uri_str;
- } else {
- uri_str = NO_URI;
- uri_len = sizeof(NO_URI)-1;
- }
- len = uri_len + 1;
-
- if (req->virtual) {
- if (req->host_len)
- len += req->host_len;
- else {
- def_vhost_len = strlen(tux_default_vhost);
- len += def_vhost_len;
- }
- }
-
- Dprintk("method_str: {%s} [%d]\n", req->method_str, req->method_len);
- len += req->method_len + 1;
-
- Dprintk("version_str: {%s} [%d]\n", req->version_str, req->version_len);
- len += req->version_len + 1;
-
-#if CONFIG_TUX_EXTENDED_LOG
- Dprintk("user_agent_str: {%s} [%d]\n", req->user_agent_str, req->user_agent_len);
- len += req->user_agent_len + 1;
-#endif
- if (tux_referer_logging) {
- Dprintk("referer_str: {%s} [%d]\n", req->referer_str, req->referer_len);
- len += req->referer_len;
- }
- len++;
-
- inc = 5*sizeof(u32) + len;
-#if CONFIG_TUX_EXTENDED_LOG
- inc += 7*sizeof(u32);
-#endif
-
- spin_lock_irqsave(&log_lock, flags);
-
- next_head = ROUND_UP(log_head + inc);
-
- if (next_head < LOG_LEN) {
- str = log_buffer + log_head;
- if (str > log_buffer + LOG_LEN)
- TUX_BUG();
- log_head = next_head;
- } else {
- if (log_head < LOG_LEN)
- memset(log_buffer+log_head, 0, LOG_LEN-log_head);
- str = log_buffer;
- log_head = ROUND_UP(inc);
- }
-
- if (str < log_buffer || str+inc >= log_buffer+LOG_LEN)
- TUX_BUG();
-
- /*
- * Log record signature - this makes finding the next entry
- * easier (since record length is variable), and makes the
- * binary logfile more robust against potential data corruption
- * and other damage. The signature also servers as a log format
- * version identifier.
- */
-#if CONFIG_TUX_EXTENDED_LOG
- *(u32 *)str = 0x2223beef;
-#else
- *(u32 *)str = 0x1112beef;
-#endif
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
- *(u32 *)str = 0;
- /*
- * Log the client IP address:
- */
- if (tux_ip_logging)
- *(u32 *)str = req->client_addr;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
-#if CONFIG_TUX_EXTENDED_LOG
- /*
- * Log the client port number:
- */
- *(u32 *)str = 0;
- if (tux_ip_logging)
- *(u32 *)str = req->client_port;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-#endif
-
- /*
- * Log the request timestamp, in units of 'seconds since 1970'.
- */
- *(u32 *)str = CURRENT_TIME.tv_sec;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
-#if CONFIG_TUX_EXTENDED_LOG
- *(u32 *)str = req->accept_timestamp; str += sizeof(u32);
- *(u32 *)str = req->parse_timestamp; str += sizeof(u32);
- *(u32 *)str = req->output_timestamp; str += sizeof(u32);
- *(u32 *)str = req->flush_timestamp; str += sizeof(u32);
- *(u32 *)str = req->had_cachemiss; str += sizeof(u32);
- *(u32 *)str = req->keep_alive; str += sizeof(u32);
-#endif
- /*
- * Log the requested file size (in fact, log actual bytes sent.)
- */
- *(u32 *)str = req->bytes_sent;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
- *(u32 *)str = req->status;
- str += sizeof(u32);
- CHECK_LOGPTR(str);
-
- /*
- * Zero-terminated method, (base) URI, query and version string.
- */
- if (req->method_len) {
- memcpy(str, req->method_str, req->method_len);
- str += req->method_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
-
- if (req->virtual) {
- if (req->host_len) {
- memcpy(str, req->host, req->host_len);
- str += req->host_len;
- } else {
- memcpy(str, tux_default_vhost, def_vhost_len);
- str += def_vhost_len;
- }
- CHECK_LOGPTR(str);
- }
-
- memcpy(str, uri_str, uri_len);
- str += uri_len;
- *str++ = 0;
-
- CHECK_LOGPTR(str);
-
- if (req->version_len) {
- memcpy(str, req->version_str, req->version_len);
- str += req->version_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
-#if CONFIG_TUX_EXTENDED_LOG
- if (req->user_agent_len) {
- memcpy(str, req->user_agent_str, req->user_agent_len);
- str += req->user_agent_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
-#endif
- CHECK_LOGPTR(str);
-
- if (tux_referer_logging && req->referer_len) {
- memcpy(str, req->referer_str, req->referer_len);
- str += req->referer_len;
- CHECK_LOGPTR(str);
- }
- *str++ = 0;
- CHECK_LOGPTR(str);
- /*
- * pad with spaces to next cacheline, with an ending newline.
- * (not needed for the user-space log utility, but results in
- * a more readable binary log file, and reduces the amount
- * of cache pingpong.)
- */
- next = (char *)ROUND_UP((unsigned long)str);
-
- CHECK_LOGPTR(next);
- len = next-str;
- memset(str, ' ', len);
-
- pending = (log_head-log_tail) % LOG_LEN;
- spin_unlock_irqrestore(&log_lock, flags);
-
- if (pending >= SOFT_LIMIT)
- wake_up(&log_wait);
-
- if (pending >= HARD_LIMIT)
- __throttle_logging();
-}
-
-void tux_push_pending (struct sock *sk)
-{
- struct tcp_opt *tp = tcp_sk(sk);
-
- Dprintk("pushing pending frames on sock %p.\n", sk);
- lock_sock(sk);
- if ((sk->sk_state == TCP_ESTABLISHED) && !sk->sk_err) {
- tp->ack.pingpong = tux_ack_pingpong;
- tp->nonagle = 1;
- __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 0), TCP_NAGLE_OFF);
- }
- release_sock(sk);
-}
-
-inline void tux_push_req (tux_req_t *req)
-{
- if (req->sock)
- tux_push_pending(req->sock->sk);
- if (req->data_sock)
- tux_push_pending(req->data_sock->sk);
-}
-
-void __put_data_sock (tux_req_t *req)
-{
- unlink_tux_data_socket(req);
- if (req->data_sock->file)
- fput(req->data_sock->file);
- else
- sock_release(req->data_sock);
- req->data_sock = NULL;
-}
-
-/* open-coded sys_close */
-
-long tux_close(unsigned int fd)
-{
- struct file * filp;
- struct files_struct *files = current->files;
-
- spin_lock(&files->file_lock);
- if (fd >= files->max_fds)
- goto out_unlock;
- filp = files->fd[fd];
- if (!filp)
- goto out_unlock;
- files->fd[fd] = NULL;
- FD_CLR(fd, files->close_on_exec);
- /* __put_unused_fd(files, fd); */
- __FD_CLR(fd, files->open_fds);
- if (fd < files->next_fd)
- files->next_fd = fd;
- spin_unlock(&files->file_lock);
- return filp_close(filp, files);
-
-out_unlock:
- spin_unlock(&files->file_lock);
- return -EBADF;
-}
-
-void flush_request (tux_req_t *req, int cachemiss)
-{
- struct socket *sock;
- struct sock *sk;
- int keep_alive;
-
- if (cachemiss)
- TUX_BUG();
- __set_task_state(current, TASK_RUNNING);
-
- if (req->magic != TUX_MAGIC)
- TUX_BUG();
- if (req->ti->thread != current)
- TUX_BUG();
-#if CONFIG_TUX_DEBUG
- if (req->bytes_expected && (req->bytes_sent != req->bytes_expected)) {
- printk("hm, bytes_expected: %d != bytes_sent: %d!\n",
- req->bytes_expected, req->bytes_sent);
- TUX_BUG();
- }
-#endif
- SET_TIMESTAMP(req->flush_timestamp);
-
- log_request(req);
- sock = req->sock;
- sk = NULL;
- if (sock)
- sk = sock->sk;
- Dprintk("FLUSHING req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), sock, sk, req->keep_alive, req->status);
- if (req->in_file.f_pos)
- /*TUX_BUG()*/;
- release_req_dentry(req);
- req->private = 0;
-
- if (req->docroot_dentry) {
- dput(req->docroot_dentry);
- req->docroot_dentry = NULL;
- if (!req->docroot_mnt)
- TUX_BUG();
- }
- if (req->docroot_mnt) {
- mntput(req->docroot_mnt);
- req->docroot_mnt = NULL;
- }
-
- req->offset_start = 0;
- req->offset_end = 0;
- req->output_len = 0;
- req->total_file_len = 0;
- req->lendigits = 0;
- req->mtime = 0;
- req->etaglen = 0;
- req->etag[0] = 0;
- req->ftp_command = 0;
-
- if (req->postponed)
- TUX_BUG();
- if (test_bit(0, &req->idle_input))
- TUX_BUG();
- if (test_bit(0, &req->wait_output_space))
- TUX_BUG();
- if (req->parsed_len)
- trunc_headers(req);
- if (req->parsed_len)
- TUX_BUG();
- req->attr = NULL;
- req->usermode = 0;
- req->usermodule_idx = 0;
- req->atom_idx = 0;
- if (req->module_dentry) {
- dput(req->module_dentry);
- req->module_dentry = NULL;
- }
- if (req->headers)
- kfree(req->headers);
- req->headers = NULL;
- req->headers_len = 0;
-
- req->method = METHOD_NONE;
- req->method_len = 0;
- req->method_str = NULL;
- req->version = 0;
- req->version_str = NULL;
- req->version_len = 0;
-
- req->uri_str = NULL;
- req->uri_len = 0;
-
- req->objectname[0] = 0;
- req->objectname_len = 0;
-
- req->query_str = NULL;
- req->query_len = 0;
-
- req->cookies_str = NULL;
- req->cookies_len = 0;
- req->parse_cookies = 0;
-
- req->contentlen_str = NULL;
- req->contentlen_len = 0;
- req->content_len = 0;
-
- req->user_agent_str = NULL;
- req->user_agent_len = 0;
-
- req->may_send_gzip = 0;
- req->content_gzipped = 0;
-
- req->content_type_str = NULL;
- req->content_type_len = 0;
-
- req->accept_str = NULL;
- req->accept_len = 0;
-
- req->accept_charset_str = NULL;
- req->accept_charset_len = 0;
-
- req->accept_encoding_str = NULL;
- req->accept_encoding_len = 0;
-
- req->accept_language_str = NULL;
- req->accept_language_len = 0;
-
- req->cache_control_str = NULL;
- req->cache_control_len = 0;
-
- req->if_modified_since_str = NULL;
- req->if_modified_since_len = 0;
-
- req->if_none_match_str = NULL;
- req->if_none_match_len = 0;
-
- req->if_range_str = NULL;
- req->if_range_len = 0;
-
- req->negotiate_str = NULL;
- req->negotiate_len = 0;
-
- req->pragma_str = NULL;
- req->pragma_len = 0;
-
- req->referer_str = NULL;
- req->referer_len = 0;
-
- req->post_data_str = NULL;
- req->post_data_len = 0;
-
- SET_TIMESTAMP(req->accept_timestamp);
-#if CONFIG_TUX_EXTENDED_LOG
- req->parse_timestamp = 0;
- req->output_timestamp = 0;
- req->flush_timestamp = 0;
-#endif
- req->status = 0;
-
- req->total_bytes += req->bytes_sent;
- req->bytes_sent = 0;
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->body_len = 0;
- keep_alive = req->keep_alive;
- clear_keepalive(req);
- req->had_cachemiss = 0;
- // first_timestamp and total_bytes is kept!
- req->event = 0;
- req->lookup_dir = 0;
- req->lookup_404 = 0;
-
- req->error = 0;
- req->user_error = 0;
-
- if (req->abuf.page)
- __free_page(req->abuf.page);
- memset(&req->abuf, 0, sizeof(req->abuf));
-
- if (sk && keep_alive) {
- add_tux_atom(req, parse_request);
- if (skb_queue_empty(&sk->sk_receive_queue)) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- /*
- * Avoid the race with the event callback:
- */
- if (skb_queue_empty(&sk->sk_receive_queue) ||
- !test_and_clear_bit(0, &req->idle_input)) {
- INC_STAT(nr_idle_input_pending);
- spin_unlock_irq(&req->ti->work_lock);
- tux_push_req(req);
- goto out;
- }
- del_keepalive_timer(req);
- spin_unlock_irq(&req->ti->work_lock);
- }
- Dprintk("KEEPALIVE PENDING req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
- add_req_to_workqueue(req);
- INC_STAT(nr_keepalive_optimized);
- goto out;
- }
-
- del_timer_sync(&req->keepalive_timer);
- del_timer_sync(&req->output_timer);
-
- if (timer_pending(&req->keepalive_timer))
- TUX_BUG();
- if (timer_pending(&req->output_timer))
- TUX_BUG();
- if (!list_empty(&req->lru))
- TUX_BUG();
- req->nr_keepalives = 0;
- req->client_addr = 0;
- req->client_port = 0;
- req->virtual = 0;
- req->ftp_offset_start = 0;
-
- req->host[0] = 0;
- req->host_len = 0;
-
- if (req->cwd_dentry) {
- dput(req->cwd_dentry);
- req->cwd_dentry = NULL;
- if (!req->cwd_mnt)
- TUX_BUG();
- }
- if (req->cwd_mnt) {
- mntput(req->cwd_mnt);
- req->cwd_mnt = NULL;
- }
- put_data_sock(req);
- req->prev_pos = 0;
- req->curroff = 0;
- req->total = 0;
- if (req->dirp0) {
- kfree(req->dirp0);
- req->dirp0 = NULL;
- }
-
- if (sk)
- unlink_tux_socket(req);
- req->sock = NULL;
- /*
- * Close potential user-space file descriptors.
- */
- {
- int fd = req->fd, ret;
-
- if (fd != -1) {
- req->fd = -1;
- ret = tux_close(fd);
- if (ret)
- TUX_BUG();
- } else
- if (sock)
- sock_release(sock);
- }
- kfree_req(req);
-out:
- ;
-}
-
-static int warn_once = 1;
-
-static unsigned int writeout_log (void)
-{
- unsigned int len, pending, next_log_tail;
- mm_segment_t oldmm = get_fs();
- struct file *log_filp;
- char * str;
- unsigned int ret;
-
- if (tux_logging)
- Dprintk("TUX logger: opening log file {%s}.\n", tux_logfile);
- log_filp = tux_open_file(tux_logfile, O_CREAT|O_APPEND|O_WRONLY|O_LARGEFILE);
- if (!log_filp) {
- if (warn_once) {
- printk(KERN_ERR "TUX: could not open log file {%s}!\n",
- tux_logfile);
- warn_once = 0;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- return 0;
- }
- spin_lock(&log_lock);
- str = log_buffer + log_tail;
- if (log_head < log_tail) {
- len = LOG_LEN-log_tail;
- next_log_tail = 0;
- } else {
- len = log_head-log_tail;
- next_log_tail = log_head;
- }
- if (!len)
- goto out;
- spin_unlock(&log_lock);
-
- set_fs(KERNEL_DS);
- ret = log_filp->f_op->write(log_filp, str, len, &log_filp->f_pos);
- set_fs(oldmm);
-
- if (len != ret) {
- if (ret == -ENOSPC) {
- printk(KERN_ERR "TUX: trying to write TUX logfile %s, but filesystem is full! Lost %d bytes of log data.\n", tux_logfile, len);
- } else {
- printk(KERN_ERR "TUX: log write %d != %d.\n", ret, len);
- printk(KERN_ERR "TUX: log_filp: %p, str: %p, len: %d str[len-1]: %d.\n", log_filp, str, len, str[len-1]);
- }
- goto out_lock;
- }
-
- /*
- * Sync log data to disk:
- */
- if (log_filp->f_op && log_filp->f_op->fsync) {
- down(&log_filp->f_dentry->d_inode->i_sem);
- log_filp->f_op->fsync(log_filp, log_filp->f_dentry, 1);
- up(&log_filp->f_dentry->d_inode->i_sem);
- }
-
- /*
- * Reduce the cache footprint of the logger file - it's
- * typically write-once.
- */
- invalidate_inode_pages(log_filp->f_dentry->d_inode->i_mapping);
-
-out_lock:
- spin_lock(&log_lock);
-out:
- log_tail = next_log_tail;
- pending = (log_head-log_tail) % LOG_LEN;
- spin_unlock(&log_lock);
-
- if (pending < HARD_LIMIT)
- wake_up(&log_full);
-
- fput(log_filp);
- return pending;
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(stop_logger_wait);
-static int stop_logger = 0;
-
-static int logger_thread (void *data)
-{
- DECLARE_WAITQUEUE(wait, current);
- mm_segment_t oldmm;
-
- daemonize("TUX logger");
-
- oldmm = get_fs();
- set_fs(KERNEL_DS);
- printk(KERN_NOTICE "TUX: logger thread started.\n");
-#if CONFIG_SMP
- {
- cpumask_t log_mask, map;
-
- mask_to_cpumask(log_cpu_mask, &log_mask);
- cpus_and(map, cpu_online_map, log_mask);
- if(!(cpus_empty(map)))
- set_cpus_allowed(current, map);
-
- }
-#endif
-
-
- spin_lock_irq(¤t->sighand->siglock);
- siginitsetinv(¤t->blocked, 0);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (log_buffer)
- TUX_BUG();
- log_buffer = vmalloc(LOG_LEN);
- memset(log_buffer, 0, LOG_LEN);
- log_head = log_tail = 0;
-
- current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-
- add_wait_queue(&log_wait, &wait);
- for (;;) {
- if (tux_logging)
- Dprintk("logger does writeout - stop:%d.\n", stop_logger);
-
- while (writeout_log() >= SOFT_LIMIT) {
- if (stop_logger)
- break;
- }
- if (stop_logger)
- break;
- /* nothing */;
-
- if (tux_logging)
- Dprintk("logger does sleep - stop:%d.\n", stop_logger);
- __set_current_state(TASK_INTERRUPTIBLE);
- if (log_head != log_tail) {
- __set_current_state(TASK_RUNNING);
- continue;
- }
- schedule_timeout(HZ);
- if (tux_logging)
- Dprintk("logger back from sleep - stop:%d.\n", stop_logger);
- if (signal_pending(current))
- flush_all_signals();
- }
- remove_wait_queue(&log_wait, &wait);
-
- vfree(log_buffer);
- log_buffer = NULL;
- stop_logger = 0;
- wake_up(&stop_logger_wait);
-
- set_fs(oldmm);
-
- return 0;
-}
-
-void start_log_thread (void)
-{
- warn_once = 1;
-
- logger_pid = kernel_thread(logger_thread, NULL, 0);
- if (logger_pid < 0)
- TUX_BUG();
-}
-
-void stop_log_thread (void)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- Dprintk("stopping logger thread %d ...\n", logger_pid);
-
- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&stop_logger_wait, &wait);
- stop_logger = 1;
- wake_up(&log_wait);
- schedule();
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&stop_logger_wait, &wait);
-
- Dprintk("logger thread stopped!\n");
-}
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * main.c: main management and initialization routines
- */
-
-#define __KERNEL_SYSCALLS__
-#define __KERNEL_SYSCALLS_NO_ERRNO__
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-/*
- * Threads information.
- */
-unsigned int nr_tux_threads;
-static atomic_t nr_tux_threads_running = ATOMIC_INIT(0);
-static int stop_threads = 0;
-
-threadinfo_t threadinfo[CONFIG_TUX_NUMTHREADS];
-
-static void flush_all_requests (threadinfo_t *ti);
-
-void flush_all_signals (void)
-{
- spin_lock_irq(¤t->sighand->siglock);
- flush_signals(current);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-}
-
-int nr_requests_used (void)
-{
- unsigned int i, nr = 0;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- nr += ti->nr_requests - ti->nr_free_requests;
- }
-
- return nr;
-}
-
-static inline int accept_pending (threadinfo_t *ti)
-{
- int j;
-
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) {
- if (!ti->listen[j].proto)
- break;
- if (!ti->listen[j].sock)
- break;
- if (tcp_sk(ti->listen[j].sock->sk)->accept_queue)
- return 1;
- }
- return 0;
-}
-
-static inline int requests_pending (threadinfo_t *ti)
-{
- if (!list_empty(&ti->work_pending))
- return 1;
- return 0;
-}
-
-static int event_loop (threadinfo_t *ti)
-{
- tux_req_t *req;
- int work_done;
-
-repeat_accept:
- if (ti->thread != current)
- TUX_BUG();
-
- /*
- * Any (relevant) event on the socket will change this
- * thread to TASK_RUNNING because we add it to both
- * the main listening and the connection request socket
- * waitqueues. Thus we can do 'lazy checking' of work
- * to be done and schedule away only if the thread is
- * still TASK_INTERRUPTIBLE. This makes TUX fully
- * event driven.
- */
- set_task_state(current, TASK_INTERRUPTIBLE);
- current->flags |= PF_MEMALLOC;
- work_done = 0;
- if (accept_pending(ti))
- work_done = accept_requests(ti);
-
- if (requests_pending(ti)) {
- work_done = process_requests(ti, &req);
- if (req)
- goto handle_userspace_req;
- }
-
- /*
- * Be nice to other processes:
- */
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
- __set_task_state(current, TASK_RUNNING);
- schedule();
- goto repeat_accept;
- }
-
- if (ti->userspace_req)
- TUX_BUG();
- if (unlikely(stop_threads))
- goto handle_stop;
-
- /* Any signals? */
- if (unlikely(signal_pending(current)))
- goto handle_signal;
-
- if (work_done)
- goto repeat_accept;
- /*
- * Any socket event either on the listen socket
- * or on the request sockets will wake us up:
- */
- if ((current->state != TASK_RUNNING) &&
- !requests_pending(ti) && !accept_pending(ti)) {
- Dprintk("fast thread: no work to be done, sleeping.\n");
- schedule();
- Dprintk("fast thread: back from sleep!\n");
- goto repeat_accept;
- }
- goto repeat_accept;
-
-handle_userspace_req:
- if (req->attr)
- TUX_BUG();
- switch_docroot(req);
- ti->userspace_req = req;
- __set_task_state(current, TASK_RUNNING);
- return TUX_RETURN_USERSPACE_REQUEST;
-
-handle_signal:
- __set_task_state(current, TASK_RUNNING);
- return TUX_RETURN_SIGNAL;
-
-handle_stop:
- __set_task_state(current, TASK_RUNNING);
- return TUX_RETURN_EXIT;
-}
-
-static int init_queues (int nr_tux_threads)
-{
- int i;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
-
- INIT_LIST_HEAD(&ti->all_requests);
-
- ti->free_requests_lock = SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&ti->free_requests);
-
- ti->work_lock = SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&ti->work_pending);
- INIT_LIST_HEAD(&ti->lru);
-
- }
- return 0;
-}
-
-int tux_chroot (char *dir)
-{
- kernel_cap_t saved_cap = current->cap_effective;
- mm_segment_t oldmm;
- int err;
-
- /* Allow chroot dir to be in kernel space. */
- oldmm = get_fs(); set_fs(KERNEL_DS);
- set_fs(KERNEL_DS);
- cap_raise (current->cap_effective, CAP_SYS_CHROOT);
-
- err = chroot(dir);
- if (!err)
- chdir("/");
-
- current->cap_effective = saved_cap;
- set_fs(oldmm);
-
- return err;
-}
-
-/*
- * Right now this is not fully SMP-safe against multiple TUX
- * managers. It's just a rudimentary protection against typical
- * mistakes.
- */
-static int initialized = 0;
-
-#define MAX_DOCROOTLEN 500
-
-static int lookup_docroot(struct nameidata *docroot, const char *name)
-{
- int err;
-
- docroot->mnt = mntget(current->fs->rootmnt);
- docroot->dentry = dget(current->fs->root);
- docroot->last.len = 0;
- docroot->flags = LOOKUP_FOLLOW;
-
- err = path_walk(name, docroot);
- if (err) {
- mntput(docroot->mnt);
- docroot->mnt = NULL;
- return err;
- }
- return 0;
-}
-
-static int user_req_startup (void)
-{
- char name[MAX_DOCROOTLEN];
- struct nameidata *docroot;
- unsigned int i;
- int err;
-
- if (initialized)
- return -EINVAL;
- initialized = 1;
-
- /*
- * Look up the HTTP and FTP document root.
- * (typically they are shared, but can be
- * different directories.)
- */
- docroot = &tux_proto_http.main_docroot;
- if (docroot->mnt)
- TUX_BUG();
- strcpy(name, tux_common_docroot);
- strcat(name, tux_http_subdocroot);
-
- err = lookup_docroot(docroot, name);
- if (err) {
- initialized = 0;
- printk(KERN_ERR "TUX: could not look up HTTP documentroot: \"%s\"\n", name);
- return err;
- }
-
- docroot = &tux_proto_ftp.main_docroot;
- if (docroot->mnt)
- TUX_BUG();
- strcpy(name, tux_common_docroot);
- strcat(name, tux_ftp_subdocroot);
-
- err = lookup_docroot(docroot, name);
- if (err) {
-abort:
- docroot = &tux_proto_http.main_docroot;
- path_release(docroot);
- memset(docroot, 0, sizeof(*docroot));
- initialized = 0;
- printk(KERN_ERR "TUX: could not look up FTP documentroot: \"%s\"\n", name);
- return err;
- }
-
- /*
- * Start up the logger thread. (which opens the logfile)
- */
- start_log_thread();
-
- nr_tux_threads = tux_threads;
- if (nr_tux_threads < 1)
- nr_tux_threads = 1;
- if (nr_tux_threads > CONFIG_TUX_NUMTHREADS)
- nr_tux_threads = CONFIG_TUX_NUMTHREADS;
- tux_threads = nr_tux_threads;
-
- /*
- * Set up per-thread work-queues:
- */
- memset(threadinfo, 0, CONFIG_TUX_NUMTHREADS*sizeof(threadinfo_t));
- init_queues(nr_tux_threads);
-
- /*
- * Prepare the worker thread structures.
- */
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- ti->cpu = i;
- ti->gzip_state.workspace =
- vmalloc(zlib_deflate_workspacesize());
- if (!ti->gzip_state.workspace ||
- (zlib_deflateInit(&ti->gzip_state, 6) != Z_OK)) {
- stop_log_thread();
- goto abort;
- }
- init_MUTEX(&ti->gzip_sem);
- }
-
- __module_get(tux_module);
-
- return 0;
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_stop);
-static DECLARE_WAIT_QUEUE_HEAD(thread_stopped);
-
-static int user_req_shutdown (void)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct nameidata *docroot;
- int i, err = -EINVAL;
-
- lock_kernel();
- if (!initialized) {
- Dprintk("TUX is not up - cannot shut down.\n");
- goto err;
- }
- initialized = 0;
- stop_threads = 1;
- add_wait_queue(&thread_stopped, &wait);
-
-wait_more:
- /*
- * Wake up all the worker threads so they notice
- * that we are being stopped.
- */
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&nr_tux_threads_running)) {
- Dprintk("TUX: shutdown, %d threads still running.\n",
- atomic_read(&nr_tux_threads_running));
- wake_up(&wait_stop);
- schedule();
- goto wait_more;
- }
- set_task_state(current, TASK_RUNNING);
- stop_threads = 0;
- remove_wait_queue(&thread_stopped, &wait);
-
- if (nr_async_io_pending())
- TUX_BUG();
-
- stop_log_thread();
-
- docroot = &tux_proto_http.main_docroot;
- path_release(docroot);
- memset(docroot, 0, sizeof(*docroot));
- docroot = &tux_proto_ftp.main_docroot;
- path_release(docroot);
- memset(docroot, 0, sizeof(*docroot));
- err = 0;
-
- flush_dentry_attributes();
- free_mimetypes();
- unregister_all_tuxmodules();
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- vfree(ti->gzip_state.workspace);
- }
-
- module_put(tux_module);
-
-err:
- unlock_kernel();
- return err;
-}
-
-void drop_permissions (void)
-{
- /*
- * Userspace drops privileges already, and group
- * membership is important to keep.
- */
- /* Give the new process no privileges.. */
- current->uid = current->euid =
- current->suid = current->fsuid = tux_cgi_uid;
- current->gid = current->egid =
- current->sgid = current->fsgid = tux_cgi_gid;
- cap_clear(current->cap_permitted);
- cap_clear(current->cap_inheritable);
- cap_clear(current->cap_effective);
-}
-
-static int wait_for_others (void)
-{
- threadinfo_t *ti;
- unsigned int cpu;
-
-repeat:
- if (signal_pending(current))
- return -1;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10);
-
- for (cpu = 0; cpu < nr_tux_threads; cpu++) {
- ti = threadinfo + cpu;
- if (ti->listen_error)
- return -1;
- if (!ti->started)
- goto repeat;
- }
- /* ok, all threads have started up. */
- return 0;
-}
-
-static void zap_listen_sockets (threadinfo_t *ti)
-{
- struct socket *sock;
- int i;
-
- for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
- if (!ti->listen[i].proto)
- break;
- sock = ti->listen[i].sock;
- if (!ti->listen[i].cloned && sock) {
- while (waitqueue_active(sock->sk->sk_sleep))
- yield();
- sock_release(sock);
- }
- ti->listen[i].sock = NULL;
- ti->listen[i].proto = NULL;
- ti->listen[i].cloned = 0;
- }
-}
-
-static DECLARE_MUTEX(serialize_startup);
-
-static int user_req_start_thread (threadinfo_t *ti)
-{
- unsigned int err, cpu, i, j, k;
- struct k_sigaction *ka;
-
- cpu = ti->cpu;
-#if CONFIG_SMP
- {
- unsigned int mask;
- cpumask_t cpu_mask, map;
-
- mask = 1 << ((cpu + tux_cpu_offset) % num_online_cpus());
-
- mask_to_cpumask(mask, &cpu_mask);
- cpus_and(map, cpu_mask, cpu_online_map);
- if(!(cpus_empty(map)))
- set_cpus_allowed(current, map);
- }
-#endif
- ti->thread = current;
- atomic_inc(&nr_tux_threads_running);
-
- err = start_cachemiss_threads(ti);
- if (err)
- goto out;
-
- init_waitqueue_entry(&ti->stop, current);
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
- init_waitqueue_entry(ti->wait_event + j, current);
-
- ka = current->sighand->action + SIGCHLD-1;
- ka->sa.sa_handler = SIG_IGN;
-
- /* Block all signals except SIGKILL, SIGSTOP, SIGHUP and SIGCHLD */
- spin_lock_irq(¤t->sighand->siglock);
- siginitsetinv(¤t->blocked, sigmask(SIGKILL) |
- sigmask(SIGSTOP)| sigmask(SIGHUP) | sigmask(SIGCHLD));
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (!tux_listen[cpu][0].proto) {
- printk(KERN_ERR "no listen socket specified for TUX thread %d, in /proc/net/tux/%d/listen/, aborting.\n", cpu, cpu);
- goto error;
- }
-
- /*
- * Serialize startup so that listen sockets can be
- * created race-free.
- */
- down(&serialize_startup);
-
- Dprintk("thread %d initializing sockets.\n", cpu);
-
- for (k = 0; k < CONFIG_TUX_NUMSOCKETS; k++) {
- tux_socket_t *e1, *e2;
-
- e1 = tux_listen[cpu] + k;
- if (!e1->proto)
- break;
- for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++) {
- if (i == cpu)
- continue;
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) {
- e2 = tux_listen[i] + j;
- if (!e2->proto)
- continue;
- if ((e1->ip == e2->ip) && (e1->port == e2->port) && (e1->proto == e2->proto) && threadinfo[i].listen[j].proto) {
- ti->listen[k] = threadinfo[i].listen[j];
- ti->listen[k].cloned = 1;
- Dprintk("cloned socket %d from thread %d's socket %d.\n", k, i, j);
- goto next_socket;
- }
- }
- }
-
- ti->listen[k].sock = start_listening(tux_listen[cpu] + k, cpu);
- if (!ti->listen[k].sock)
- goto error_unlock;
- ti->listen[k].cloned = 0;
- ti->listen[k].proto = tux_listen[cpu][k].proto;
- Dprintk("thread %d got sock %p (%d), proto %s.\n", cpu, ti->listen[k].sock, k, ti->listen[k].proto->name);
-next_socket:
- ;
- }
- Dprintk("thread %d done initializing sockets.\n", cpu);
- up(&serialize_startup);
-
- if (wait_for_others())
- goto error_nomsg;
-
- if (!ti->listen[0].proto) {
- printk("hm, socket 0 has no protocol.\n");
- goto error;
- }
-
- add_wait_queue(&wait_stop, &ti->stop);
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
- if (ti->listen[j].proto)
- add_wait_queue_exclusive(ti->listen[j].sock->sk->sk_sleep,
- ti->wait_event + j);
- drop_permissions();
-
- __module_get(tux_module);
- return 0;
-
-error_unlock:
- up(&serialize_startup);
-error:
- printk(KERN_NOTICE "TUX: could not start worker thread %d.\n", ti->cpu);
-
-error_nomsg:
- ti->listen_error = 1;
- ti->started = 0;
-
- zap_listen_sockets(ti);
- flush_all_requests(ti);
- stop_cachemiss_threads(ti);
-
- err = -EINVAL;
-
-out:
- /*
- * Last thread close the door:
- */
- if (atomic_dec_and_test(&nr_tux_threads_running))
- user_req_shutdown();
-
- return -err;
-}
-
-static int flush_idleinput (threadinfo_t * ti)
-{
- struct list_head *head, *tmp;
- tux_req_t *req;
- int count = 0;
-
- head = &ti->all_requests;
- tmp = head->next;
-
- while (tmp != head) {
- req = list_entry(tmp, tux_req_t, all);
- tmp = tmp->next;
- if (test_bit(0, &req->idle_input)) {
- idle_event(req);
- count++;
- }
- }
- return count;
-}
-
-static int flush_waitoutput (threadinfo_t * ti)
-{
- struct list_head *head, *tmp;
- tux_req_t *req;
- int count = 0;
-
- head = &ti->all_requests;
- tmp = head->next;
-
- while (tmp != head) {
- req = list_entry(tmp, tux_req_t, all);
- tmp = tmp->next;
- if (test_bit(0, &req->wait_output_space)) {
- output_space_event(req);
- count++;
- }
- }
- return count;
-}
-
-static void flush_all_requests (threadinfo_t *ti)
-{
- for (;;) {
- int count;
-
- count = flush_idleinput(ti);
- count += flush_waitoutput(ti);
- count += tux_flush_workqueue(ti);
- count += flush_freequeue(ti);
- if (!ti->nr_requests)
- break;
- /*
- * Go through again if we advanced:
- */
- if (count)
- continue;
- Dprintk("flush_all_requests: %d requests still waiting.\n", ti->nr_requests);
-#if TUX_DEBUG
- count = print_all_requests(ti);
- Dprintk("flush_all_requests: printed %d requests.\n", count);
-#endif
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ/10);
- }
-}
-
-int nr_async_io_pending (void)
-{
- unsigned int i, sum = 0;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- if (ti->iot)
- sum += ti->iot->nr_async_pending;
- }
- return sum;
-}
-
-static int user_req_stop_thread (threadinfo_t *ti)
-{
- int j;
-
- printk(KERN_NOTICE "TUX: thread %d stopping ...\n",
- (int)(ti-threadinfo));
-
- if (!ti->started)
- TUX_BUG();
- for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
- if (ti->listen[j].proto)
- remove_wait_queue(ti->listen[j].sock->sk->sk_sleep,
- ti->wait_event + j);
- remove_wait_queue(&wait_stop, &ti->stop);
-
- Dprintk(KERN_NOTICE "TUX: thread %d waiting for sockets to go inactive ...\n", (int)(ti-threadinfo));
- zap_listen_sockets(ti);
-
- Dprintk(KERN_NOTICE "TUX: thread %d has all sockets inactive.\n", (int)(ti-threadinfo));
-
- flush_all_requests(ti);
- stop_cachemiss_threads(ti);
-
- if (ti->nr_requests)
- TUX_BUG();
- ti->started = 0;
-
- printk(KERN_INFO "TUX: thread %d stopped.\n", ti->cpu);
-
- ti->thread = NULL;
- current->tux_info = NULL;
- current->tux_exit = NULL;
- atomic_dec(&nr_tux_threads_running);
- wake_up(&thread_stopped);
-
- module_put(tux_module);
-
- return 0;
-}
-
-#define COPY_INT(u_field, k_field) \
-do { \
- if (__copy_to_user(&u_info->u_field, &req->k_field, \
- sizeof(req->k_field))) \
- return_EFAULT; \
-} while (0)
-
-#define GETLEN(k_field, maxlen) \
- ((req->k_field##_len < maxlen) ? \
- req->k_field##_len : maxlen-1)
-
-#define COPY_STR(u_field, k_field, maxlen) \
-do { \
- if (__copy_to_user(u_info->u_field, req->k_field##_str, \
- GETLEN(k_field, maxlen))) \
- return_EFAULT; \
-} while (0)
-
-#define COPY_COND_STR(u_field,k_field,maxlen) \
-do { \
- if (req->k_field##_len) \
- COPY_STR(u_field, k_field, maxlen); \
- if (__put_user((char)0, u_info->u_field + \
- GETLEN(k_field, maxlen))) \
- return_EFAULT; \
-} while (0)
-
-static void finish_userspace_req (tux_req_t *req)
-{
- threadinfo_t *ti = req->ti;
-
- ti->userspace_req = NULL;
- req->usermode = 0;
- req->private = 0;
- req->error = 0;
- DEC_STAT(nr_userspace_pending);
- flush_request(req, 0);
-}
-
-static void zap_userspace_req (tux_req_t *req)
-{
- clear_keepalive(req);
- finish_userspace_req(req);
-}
-
-/*
- * Fills in the user-space request structure:
- */
-static int prepare_userspace_req (threadinfo_t *ti, user_req_t *u_info)
-{
- u64 u_req;
- tux_req_t *req = ti->userspace_req;
- unsigned int tmp;
- int filelen;
- int fd;
-
- Dprintk("prepare_userspace_req(%p).\n", req);
- if (!req)
- TUX_BUG();
- if (req->error) {
- TDprintk("userspace request has error %d.\n", req->error);
- return -1;
- }
- fd = req->fd;
- if (fd == -1) {
- fd = sock_map_fd(req->sock);
- Dprintk("sock_map_fd(%p) :%d.\n", req, fd);
- if (fd < 0) {
- Dprintk("sock_map_fd() returned %d.\n", fd);
- return -EMFILE;
- }
- req->fd = fd;
- }
-
-#define return_EFAULT do { Dprintk("-EFAULT at %d:%s.\n", __LINE__, __FILE__); return -EFAULT; } while (0)
-
- if (!access_ok(VERIFY_WRITE, u_info, sizeof(*u_info)))
- return_EFAULT;
- if (__copy_to_user(&u_info->sock, &fd, sizeof(fd)))
- return_EFAULT;
- if (req->attr)
- TUX_BUG();
-
- COPY_INT(module_index, usermodule_idx);
-
- COPY_COND_STR(query, query, MAX_URI_LEN);
-
- COPY_INT(event, event);
- Dprintk("prepare userspace, user error: %d, event %d.\n", req->user_error, req->event);
- COPY_INT(error, user_error);
- req->user_error = 0;
-
- filelen = req->total_file_len;
- if (filelen < 0)
- filelen = 0;
- if (__copy_to_user(&u_info->objectlen, &filelen, sizeof(filelen)))
- return_EFAULT;
- if ((req->method == METHOD_POST) && !filelen)
- if (__copy_to_user(&u_info->objectlen,
- &req->content_len, sizeof(filelen)))
- return_EFAULT;
- if (req->objectname_len) {
- if (req->objectname[req->objectname_len])
- TUX_BUG();
- if (__copy_to_user(u_info->objectname, req->objectname,
- req->objectname_len + 1))
- return_EFAULT;
- } else
- if (__put_user((char)0, u_info->objectname))
- return_EFAULT;
-
- COPY_INT(http_version, version);
- COPY_INT(http_method, method);
- COPY_INT(keep_alive, keep_alive);
-
- COPY_INT(cookies_len, cookies_len);
- if (req->cookies_len)
- COPY_STR(cookies, cookies, MAX_COOKIE_LEN);
- if (__put_user((char)0, u_info->cookies + req->cookies_len))
- return_EFAULT;
-
- u_req = (u64)(unsigned long)req;
- if (__copy_to_user(&u_info->id, &u_req, sizeof(u_req)))
- return_EFAULT;
- COPY_INT(priv, private);
- COPY_INT(bytes_sent, bytes_sent);
-
- tmp = inet_sk(req->sock->sk)->daddr;
- if (__copy_to_user(&u_info->client_host, &tmp, sizeof(tmp)))
- return_EFAULT;
-
- COPY_COND_STR(content_type, content_type, MAX_FIELD_LEN);
- COPY_COND_STR(user_agent, user_agent, MAX_FIELD_LEN);
- COPY_COND_STR(accept, accept, MAX_FIELD_LEN);
- COPY_COND_STR(accept_charset, accept_charset, MAX_FIELD_LEN);
- COPY_COND_STR(accept_encoding, accept_encoding, MAX_FIELD_LEN);
- COPY_COND_STR(accept_language, accept_language, MAX_FIELD_LEN);
- COPY_COND_STR(cache_control, cache_control, MAX_FIELD_LEN);
- COPY_COND_STR(if_modified_since, if_modified_since, MAX_FIELD_LEN);
- COPY_COND_STR(negotiate, negotiate, MAX_FIELD_LEN);
- COPY_COND_STR(pragma, pragma, MAX_FIELD_LEN);
- COPY_COND_STR(referer, referer, MAX_FIELD_LEN);
-
- return TUX_RETURN_USERSPACE_REQUEST;
-}
-
-#define GOTO_ERR_no_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_no_unlock; } while (0)
-#define GOTO_ERR_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_unlock; } while (0)
-
-static int register_mimetype(user_req_t *u_info)
-{
- char extension[MAX_URI_LEN], mimetype[MAX_URI_LEN], expires[MAX_URI_LEN];
- u64 u_addr;
- char *addr;
- int ret;
-
- ret = strncpy_from_user(extension, u_info->objectname, MAX_URI_LEN);
- if (ret <= 0)
- GOTO_ERR_no_unlock;
- extension[ret] = 0;
- Dprintk("got MIME extension: %s.\n", extension);
- ret = copy_from_user(&u_addr, &u_info->object_addr, sizeof(u_addr));
- if (ret)
- GOTO_ERR_no_unlock;
- addr = (char *)(unsigned long)u_addr;
- ret = strncpy_from_user(mimetype, addr, MAX_URI_LEN);
- if (ret <= 0)
- GOTO_ERR_no_unlock;
- mimetype[ret] = 0;
- Dprintk("got MIME type: %s.\n", mimetype);
- ret = strncpy_from_user(expires, u_info->cache_control, MAX_URI_LEN);
- if (ret >= 0)
- expires[ret] = 0;
- else
- expires[0] = 0;
- Dprintk("got expires header: %s.\n", expires);
-
- add_mimetype(extension, mimetype, expires);
- ret = 0;
-err_no_unlock:
- return ret;
-}
-
-void user_send_buffer (tux_req_t *req, int cachemiss)
-{
- int ret;
-
-
- SET_TIMESTAMP(req->output_timestamp);
-
-repeat:
- ret = send_sync_buf(req, req->sock, req->userbuf, req->userlen, MSG_DONTWAIT | MSG_MORE);
- switch (ret) {
- case -EAGAIN:
- add_tux_atom(req, user_send_buffer);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- INC_STAT(user_sendbuf_write_misses);
- break;
- default:
- if (ret <= 0) {
- req_err(req);
- req->usermode = 0;
- req->private = 0;
- add_req_to_workqueue(req);
- break;
- }
- req->userbuf += ret;
- req->userlen -= ret;
- if ((int)req->userlen < 0)
- TUX_BUG();
- if (req->userlen)
- goto repeat;
- add_req_to_workqueue(req);
- break;
- }
-}
-
-void user_send_object (tux_req_t *req, int cachemiss)
-{
- int ret;
-
-
- SET_TIMESTAMP(req->output_timestamp);
-
-repeat:
- ret = generic_send_file(req, req->sock, cachemiss);
- switch (ret) {
- case -5:
- add_tux_atom(req, user_send_object);
- output_timeout(req);
- break;
- case -4:
- add_tux_atom(req, user_send_object);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- INC_STAT(user_sendobject_write_misses);
- break;
- case -3:
- INC_STAT(user_sendobject_cachemisses);
- add_tux_atom(req, user_send_object);
- queue_cachemiss(req);
- break;
- case -1:
- break;
- default:
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
- break;
- }
-}
-
-void user_get_object (tux_req_t *req, int cachemiss)
-{
- int missed;
-
- if (!req->dentry) {
- req->usermode = 0;
- missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
- if (req->usermode)
- TUX_BUG();
- req->usermode = 1;
- if (!missed && !req->dentry) {
- req->error = 0;
- req->user_error = -ENOENT;
- add_req_to_workqueue(req);
- return;
- }
- if (missed) {
- if (cachemiss)
- TUX_BUG();
- INC_STAT(user_lookup_cachemisses);
-fetch_missed:
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- add_tux_atom(req, user_get_object);
- queue_cachemiss(req);
- return;
- }
- }
- req->total_file_len = req->dentry->d_inode->i_size;
- if (!req->output_len)
- req->output_len = req->total_file_len;
- if (tux_fetch_file(req, !cachemiss)) {
- INC_STAT(user_fetch_cachemisses);
- goto fetch_missed;
- }
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
-}
-
-asmlinkage long __sys_tux (unsigned int action, user_req_t *u_info)
-{
- int ret = -1;
- threadinfo_t *ti;
- tux_req_t *req;
-
- if (action != TUX_ACTION_CURRENT_DATE)
- Dprintk("got sys_tux(%d, %p).\n", action, u_info);
-
- if (action >= MAX_TUX_ACTION)
- GOTO_ERR_no_unlock;
-
- ti = (threadinfo_t *) current->tux_info;
- if (ti)
- if (ti->thread != current)
- TUX_BUG();
-
- if (!capable(CAP_SYS_ADMIN)
- && (action != TUX_ACTION_CONTINUE_REQ) &&
- (action != TUX_ACTION_STOPTHREAD))
- goto userspace_actions;
-
- switch (action) {
- case TUX_ACTION_CONTINUE_REQ:
- ret = continue_request((int)(long)u_info);
- goto out;
-
- case TUX_ACTION_STARTUP:
- lock_kernel();
- ret = user_req_startup();
- unlock_kernel();
- goto out;
-
- case TUX_ACTION_SHUTDOWN:
- lock_kernel();
- ret = user_req_shutdown();
- unlock_kernel();
- goto out;
-
- case TUX_ACTION_REGISTER_MODULE:
- ret = user_register_module(u_info);
- goto out;
-
- case TUX_ACTION_UNREGISTER_MODULE:
- ret = user_unregister_module(u_info);
- goto out;
-
- case TUX_ACTION_STARTTHREAD:
- {
- unsigned int nr;
-
- ret = copy_from_user(&nr, &u_info->thread_nr,
- sizeof(int));
- if (ret)
- GOTO_ERR_no_unlock;
- if (nr >= nr_tux_threads)
- GOTO_ERR_no_unlock;
- ti = threadinfo + nr;
- if (ti->started)
- GOTO_ERR_unlock;
- ti->started = 1;
- current->tux_info = ti;
- current->tux_exit = tux_exit;
- if (ti->thread)
- TUX_BUG();
- Dprintk("TUX: current open files limit for TUX%d: %ld.\n", nr, current->rlim[RLIMIT_NOFILE].rlim_cur);
- lock_kernel();
- ret = user_req_start_thread(ti);
- unlock_kernel();
- if (ret) {
- current->tux_info = NULL;
- current->tux_exit = NULL;
- } else {
- if (ti->thread != current)
- TUX_BUG();
- }
- goto out_userreq;
- }
-
- case TUX_ACTION_STOPTHREAD:
- if (!ti)
- GOTO_ERR_no_unlock;
- if (!ti->started)
- GOTO_ERR_unlock;
- req = ti->userspace_req;
- if (req)
- zap_userspace_req(req);
-
- lock_kernel();
- ret = user_req_stop_thread(ti);
- unlock_kernel();
- goto out_userreq;
-
- case TUX_ACTION_CURRENT_DATE:
- ret = strncpy_from_user(tux_date, u_info->new_date,
- DATE_LEN);
- if (ret <= 0)
- GOTO_ERR_no_unlock;
- goto out;
-
- case TUX_ACTION_REGISTER_MIMETYPE:
- ret = register_mimetype(u_info);
- if (ret)
- GOTO_ERR_no_unlock;
- goto out;
-
- case TUX_ACTION_QUERY_VERSION:
- ret = (TUX_MAJOR_VERSION << 24) | (TUX_MINOR_VERSION << 16) | TUX_PATCHLEVEL_VERSION;
- goto out;
- default:
- ;
- }
-
-userspace_actions:
-
- if (!ti)
- GOTO_ERR_no_unlock;
-
- if (!ti->started)
- GOTO_ERR_unlock;
-
- req = ti->userspace_req;
- if (!req) {
- if (action == TUX_ACTION_EVENTLOOP)
- goto eventloop;
- GOTO_ERR_unlock;
- }
- if (!req->usermode)
- TUX_BUG();
-
- ret = copy_from_user(&req->event, &u_info->event, sizeof(int));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_from_user(&req->status, &u_info->http_status, sizeof(int));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_from_user(&req->bytes_sent, &u_info->bytes_sent, sizeof(int));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_from_user(&req->private, &u_info->priv, sizeof(req->private));
- if (ret)
- GOTO_ERR_unlock;
-
- switch (action) {
-
- case TUX_ACTION_EVENTLOOP:
-eventloop:
- req = ti->userspace_req;
- if (req)
- zap_userspace_req(req);
- ret = event_loop(ti);
- goto out_userreq;
-
- /*
- * Module forces keepalive off, server will close
- * the connection.
- */
- case TUX_ACTION_FINISH_CLOSE_REQ:
- clear_keepalive(req);
-
- case TUX_ACTION_FINISH_REQ:
- finish_userspace_req(req);
- goto eventloop;
-
- case TUX_ACTION_REDIRECT_REQ:
-
- ti->userspace_req = NULL;
- req->usermode = 0;
- req->private = 0;
- req->error = TUX_ERROR_REDIRECT;
- DEC_STAT(nr_userspace_pending);
- add_tux_atom(req, redirect_request);
- add_req_to_workqueue(req);
-
- goto eventloop;
-
- case TUX_ACTION_POSTPONE_REQ:
-
- postpone_request(req);
- ti->userspace_req = NULL;
- ret = TUX_RETURN_USERSPACE_REQUEST;
- break;
-
- case TUX_ACTION_GET_OBJECT:
- release_req_dentry(req);
- ret = strncpy_from_user(req->objectname,
- u_info->objectname, MAX_URI_LEN-1);
- if (ret <= 0) {
- req->objectname[0] = 0;
- req->objectname_len = 0;
- GOTO_ERR_unlock;
- }
- req->objectname[ret] = 0; // string delimit
- req->objectname_len = ret;
-
- Dprintk("got objectname {%s} (%d) from user-space req %p (req: %p).\n", req->objectname, req->objectname_len, u_info, req);
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- user_get_object(req, 0);
- goto eventloop;
-
- case TUX_ACTION_READ_OBJECT:
- {
- u64 u_addr;
- char *addr;
- loff_t ppos = 0;
- struct file *filp;
-
- if (!req->dentry)
- GOTO_ERR_unlock;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
- filp = dentry_open(req->dentry, O_RDONLY, 0);
- dget(req->dentry);
- generic_file_read(filp, addr, req->total_file_len, &ppos);
- fput(filp);
- ret = TUX_RETURN_USERSPACE_REQUEST;
- break;
- }
-
- case TUX_ACTION_SEND_OBJECT:
- if (!req->dentry)
- GOTO_ERR_unlock;
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- user_send_object(req, 0);
- goto eventloop;
-
- case TUX_ACTION_SEND_BUFFER:
- {
- u64 u_addr;
- char *addr;
- unsigned int len;
-
- ret = copy_from_user(&u_addr,
- &u_info->object_addr, sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
- ret = copy_from_user(&len,
- &u_info->objectlen, sizeof(addr));
- if (ret)
- GOTO_ERR_unlock;
- if ((int)len <= 0)
- GOTO_ERR_unlock;
-
- ret = -EFAULT;
- if (!access_ok(VERIFY_READ, addr, len))
- GOTO_ERR_unlock;
- req->userbuf = addr;
- req->userlen = len;
-
- req->ti->userspace_req = NULL;
- DEC_STAT(nr_userspace_pending);
- user_send_buffer(req, 0);
- ret = 0;
- goto eventloop;
- }
-
- case TUX_ACTION_READ_HEADERS:
- {
- char *addr;
- u64 u_addr;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
- ret = copy_to_user(&u_info->objectlen,
- &req->headers_len, sizeof(req->headers_len));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_to_user(addr,req->headers, req->headers_len);
- if (ret)
- GOTO_ERR_unlock;
- break;
- }
-
- case TUX_ACTION_READ_POST_DATA:
- {
- char *addr;
- unsigned int size;
- u64 u_addr;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- addr = (char *)(unsigned long)u_addr;
-
- ret = copy_from_user(&size, &u_info->objectlen,
- sizeof(size));
- if (ret)
- GOTO_ERR_unlock;
- Dprintk("READ_POST_DATA: got %p(%d).\n", addr, size);
- if (req->post_data_len < size)
- size = req->post_data_len;
- Dprintk("READ_POST_DATA: writing %d.\n", size);
- ret = copy_to_user(&u_info->objectlen,
- &size, sizeof(size));
- if (ret)
- GOTO_ERR_unlock;
- ret = copy_to_user(addr, req->post_data_str, size);
- if (ret)
- GOTO_ERR_unlock;
- goto out;
- }
-
- case TUX_ACTION_WATCH_PROXY_SOCKET:
- {
- struct socket *sock;
- int err;
- long fd;
- u64 u_addr;
-
- ret = copy_from_user(&u_addr, &u_info->object_addr,
- sizeof(u_addr));
- if (ret)
- GOTO_ERR_unlock;
- fd = (int)(unsigned long)u_addr;
-
- sock = sockfd_lookup(fd, &err);
- if (!sock)
- GOTO_ERR_unlock;
- put_data_sock(req);
- link_tux_data_socket(req, sock);
-
- ret = 0;
- goto out;
- }
-
- case TUX_ACTION_WAIT_PROXY_SOCKET:
- {
- if (!req->data_sock)
- GOTO_ERR_unlock;
- if (socket_input(req->data_sock)) {
- ret = TUX_RETURN_USERSPACE_REQUEST;
- goto out_userreq;
- }
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (socket_input(req->data_sock)) {
- unidle_req(req);
- ret = TUX_RETURN_USERSPACE_REQUEST;
- goto out_userreq;
- }
- req->ti->userspace_req = NULL;
- goto eventloop;
- }
-
- default:
- GOTO_ERR_unlock;
- }
-
-out_userreq:
- req = ti->userspace_req;
- if (req) {
- ret = prepare_userspace_req(ti, u_info);
- if (ret < 0) {
- TDprintk("hm, user req %p returned %d, zapping.\n",
- req, ret);
- zap_userspace_req(req);
- goto eventloop;
- }
- }
-out:
- if (action != TUX_ACTION_CURRENT_DATE)
- Dprintk("sys_tux(%d, %p) returning %d.\n", action, u_info, ret);
- while (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
- __set_task_state(current, TASK_RUNNING);
- schedule();
- }
- return ret;
-err_unlock:
-err_no_unlock:
- Dprintk("sys_tux(%d, %p) returning -EINVAL (ret:%d)!\n", action, u_info, ret);
- while (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
- __set_task_state(current, TASK_RUNNING);
- schedule();
- }
- return -EINVAL;
-}
-
-/*
- * This gets called if a TUX thread does an exit().
- */
-void tux_exit (void)
-{
- __sys_tux(TUX_ACTION_STOPTHREAD, NULL);
-}
-
-int tux_init(void)
-{
- start_sysctl();
-
-#if CONFIG_TUX_MODULE
- spin_lock(&tux_module_lock);
- sys_tux_ptr = __sys_tux;
- tux_module = THIS_MODULE;
- spin_unlock(&tux_module_lock);
-#endif
-
- return 0;
-}
-
-void tux_cleanup (void)
-{
-#if CONFIG_TUX_MODULE
- spin_lock(&tux_module_lock);
- tux_module = NULL;
- sys_tux_ptr = NULL;
- spin_unlock(&tux_module_lock);
-#endif
-
- end_sysctl();
-}
-
-module_init(tux_init)
-module_exit(tux_cleanup)
-
-MODULE_LICENSE("GPL");
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * mod.c: loading/registering of dynamic TUX modules
- */
-
-#include <net/tux.h>
-#include <linux/kmod.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-spinlock_t tuxmodules_lock = SPIN_LOCK_UNLOCKED;
-static LIST_HEAD(tuxmodules_list);
-
-tcapi_template_t * get_first_usermodule (void)
-{
- tcapi_template_t *tcapi;
- struct list_head *head, *curr, *next;
-
- spin_lock(&tuxmodules_lock);
- head = &tuxmodules_list;
- next = head->next;
-
- while ((curr = next) != head) {
- tcapi = list_entry(curr, tcapi_template_t, modules);
- next = curr->next;
- if (tcapi->userspace_id) {
- spin_unlock(&tuxmodules_lock);
- return tcapi;
- }
- }
- spin_unlock(&tuxmodules_lock);
- return NULL;
-}
-
-static tcapi_template_t * lookup_module (const char *vfs_name)
-{
- tcapi_template_t *tcapi;
- struct list_head *head, *curr, *next;
-
- while (*vfs_name == '/')
- vfs_name++;
- Dprintk("looking up TUX module {%s}.\n", vfs_name);
- head = &tuxmodules_list;
- next = head->next;
-
- while ((curr = next) != head) {
- tcapi = list_entry(curr, tcapi_template_t, modules);
- next = curr->next;
- Dprintk("checking module {%s} == {%s}?\n", vfs_name, tcapi->vfs_name);
- if (!strcmp(tcapi->vfs_name, vfs_name))
- return tcapi;
- }
- return NULL;
-}
-
-/*
- * Attempt to load a TUX application module.
- * This is the slow path, we cache ('link') the module's
- * API vector to the inode.
- * The module loading path is serialized, and we handshake
- * with the loaded module and fetch its API vector.
- */
-tcapi_template_t * lookup_tuxmodule (const char *filename)
-{
- tcapi_template_t *tcapi;
-
- spin_lock(&tuxmodules_lock);
- tcapi = lookup_module(filename);
- if (!tcapi)
- Dprintk("did not find module vfs:{%s}\n", filename);
- spin_unlock(&tuxmodules_lock);
- return tcapi;
-}
-
-
-int register_tuxmodule (tcapi_template_t *tcapi)
-{
- int ret = -EEXIST;
-
- spin_lock(&tuxmodules_lock);
-
- if (lookup_module(tcapi->vfs_name)) {
- Dprintk("module with VFS binding '%s' already registered!\n",
- tcapi->vfs_name);
- goto out;
- }
-
- list_add(&tcapi->modules, &tuxmodules_list);
- ret = 0;
- Dprintk("TUX module %s registered.\n", tcapi->vfs_name);
-out:
- spin_unlock(&tuxmodules_lock);
-
- return ret;
-}
-
-void unregister_all_tuxmodules (void)
-{
- tcapi_template_t *tcapi;
- struct list_head *curr;
-
- spin_lock(&tuxmodules_lock);
- while (((curr = tuxmodules_list.next)) != &tuxmodules_list) {
- tcapi = list_entry(curr, tcapi_template_t, modules);
- list_del(curr);
- kfree(tcapi->vfs_name);
- kfree(tcapi);
- }
- spin_unlock(&tuxmodules_lock);
-}
-
-tcapi_template_t * unregister_tuxmodule (char *vfs_name)
-{
- tcapi_template_t *tcapi;
- int err = 0;
-
- spin_lock(&tuxmodules_lock);
- tcapi = lookup_module(vfs_name);
- if (!tcapi) {
- Dprintk("huh, module %s not registered??\n", vfs_name);
- err = -1;
- } else {
- list_del(&tcapi->modules);
- Dprintk("TUX module %s unregistered.\n", vfs_name);
- }
- spin_unlock(&tuxmodules_lock);
-
- return tcapi;
-}
-
-static int check_module_version (user_req_t *u_info)
-{
- int major, minor, patch, ret;
-
- ret = copy_from_user(&major, &u_info->version_major, sizeof(int));
- ret += copy_from_user(&minor, &u_info->version_minor, sizeof(int));
- ret += copy_from_user(&patch, &u_info->version_patch, sizeof(int));
- if (ret)
- return -EFAULT;
-
- if ((major != TUX_MAJOR_VERSION) || (minor > TUX_MINOR_VERSION)) {
-
- printk(KERN_ERR "TUX: module version %d:%d incompatible with kernel version %d:%d!\n", major, minor, TUX_MAJOR_VERSION, TUX_MINOR_VERSION);
- return -EINVAL;
- }
- return 0;
-}
-
-int user_register_module (user_req_t *u_info)
-{
- int idx, len, ret;
- tcapi_template_t *tcapi;
- char modulename [MAX_URI_LEN+1];
-
- ret = check_module_version(u_info);
- if (ret)
- return ret;
-
- /*
- * Check module name length.
- */
- ret = strnlen_user(u_info->objectname, MAX_URI_LEN+2);
- if (ret < 0)
- goto out;
- ret = -EINVAL;
- if (ret >= MAX_URI_LEN)
- goto out;
-
- Dprintk("register user-module, %p.\n", u_info);
- ret = strncpy_from_user(modulename, u_info->objectname, MAX_URI_LEN);
- if (ret < 0)
- goto out;
- modulename[ret] = 0;
- Dprintk("... user-module is: {%s}.\n", modulename);
- len = strlen(modulename);
- if (!len)
- printk(KERN_ERR "no module name provided: please upgrade your TUX user-space utilities!\n");
- if (!len || (len > MAX_URI_LEN))
- return -EINVAL;
- Dprintk("... user-module len is: %d.\n", len);
-
- ret = copy_from_user(&idx, &u_info->module_index, sizeof(int));
- if (ret || !idx)
- goto out;
- Dprintk("... user-module index is: %d.\n", idx);
-
- ret = -ENOMEM;
- tcapi = (tcapi_template_t *) kmalloc(sizeof(*tcapi), GFP_KERNEL);
- if (!tcapi)
- goto out;
- memset(tcapi, 0, sizeof(*tcapi));
-
- tcapi->vfs_name = (char *) kmalloc(len+1, GFP_KERNEL);
- if (!tcapi->vfs_name) {
- kfree(tcapi);
- goto out;
- }
- strcpy(tcapi->vfs_name, modulename);
- tcapi->userspace_id = idx;
-
- Dprintk("... registering module {%s}.\n", tcapi->vfs_name);
- ret = register_tuxmodule(tcapi);
-out:
- return ret;
-}
-
-int user_unregister_module (user_req_t *u_info)
-{
- int len, ret;
- tcapi_template_t *tcapi;
- char modulename [MAX_URI_LEN+1];
-
- /*
- * Check module name length.
- */
- ret = strnlen_user(u_info->objectname, MAX_URI_LEN+2);
- if (ret < 0)
- goto out;
- ret = -EINVAL;
- if (ret >= MAX_URI_LEN)
- goto out;
- Dprintk("unregister user-module, %p.\n", u_info);
- ret = strncpy_from_user(modulename, u_info->objectname, MAX_URI_LEN);
- if (ret <= 0)
- goto out;
- modulename[ret] = 0;
- Dprintk("... user-module is: {%s}.\n", modulename);
- len = strlen(modulename);
- if (!len || (len > MAX_URI_LEN))
- return -EINVAL;
- Dprintk("... user-module len is: %d.\n", len);
-
- Dprintk("... unregistering module {%s}.\n", modulename);
- tcapi = unregister_tuxmodule(modulename);
- ret = -EINVAL;
- if (tcapi) {
- ret = 0;
- kfree(tcapi->vfs_name);
- kfree(tcapi);
- }
-out:
- return ret;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * output.c: Send data to clients
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-int send_sync_buf (tux_req_t *req, struct socket *sock, const char *buf, const size_t length, unsigned long flags)
-{
- struct msghdr msg;
- struct iovec iov;
- int len, written = 0, left = length;
- struct tcp_opt *tp = tcp_sk(sock->sk);
-
- tp->nonagle = 2;
-
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = flags | MSG_NOSIGNAL;
-repeat_send:
- msg.msg_iov->iov_len = left;
- msg.msg_iov->iov_base = (char *) buf + written;
-
- len = sock_sendmsg(sock, &msg, left);
-
- Dprintk("sendmsg ret: %d, written: %d, left: %d.\n", len,written,left);
- if ((len == -ERESTARTSYS) || (!(flags & MSG_DONTWAIT) &&
- (len == -EAGAIN))) {
- flush_all_signals();
- goto repeat_send;
- }
- if (len > 0) {
- written += len;
- left -= len;
- if (left)
- goto repeat_send;
- }
- if (len >= 0) {
- if (written != length)
- TUX_BUG();
- if (left)
- TUX_BUG();
- }
- if (req && (written > 0))
- req->bytes_sent += written;
- Dprintk("sendmsg FINAL ret: %d, written: %d, left: %d.\n", len,written,left);
- return written ? written : len;
-}
-
-unsigned int tux_zerocopy_sendfile = 1;
-
-typedef struct sock_send_desc
-{
- struct socket *sock;
- tux_req_t *req;
-} sock_send_desc_t;
-
-static int sock_send_actor (read_descriptor_t * desc, struct page *page,
- unsigned long offset, unsigned long orig_size)
-{
- sock_send_desc_t *sock_desc = (sock_send_desc_t *)desc->arg.buf;
- struct socket *sock = sock_desc->sock;
- tux_req_t *req = sock_desc->req;
- unsigned int flags;
- ssize_t written;
- char *buf = NULL;
- unsigned int size;
-
- flags = MSG_DONTWAIT | MSG_NOSIGNAL;
- if (desc->count < orig_size)
- orig_size = desc->count;
- if (desc->count > orig_size)
- flags |= MSG_MORE;
- Dprintk("sock_send_actor(), page: %p, offset: %ld, orig_size: %ld, sock: %p, desc->count: %d, desc->written: %d, MSG_MORE: %d.\n", page, offset, orig_size, sock, desc->count, desc->written, flags & MSG_MORE);
-
- if (req->content_gzipped >= 2) {
- unsigned int gzip_left;
- struct msghdr msg;
- struct iovec iov;
- mm_segment_t oldmm;
- char *kaddr = kmap(page);
- __u32 in_len, out_len;
- out_len = orig_size*101/100 + 12;
- buf = tux_kmalloc(out_len);
- in_len = orig_size;
- size = out_len;
- gzip_left = 0;
-// 8b1f 0808 fdc4 3bd8 0300 79
-buf[1] = 0x8b; buf[0] = 0x1f; buf[3] = 0x08; buf[2] = 0x08;
-buf[5] = 0xfd; buf[4] = 0xc4; buf[7] = 0x3b; buf[6] = 0xd8;
-buf[9] = 0x03; buf[8] = 0x00; buf[10] = 0x79;
- size += 11;
- Dprintk("pre-compress: in_len: %d, out_len: %d, gzip_left: %d, uncompressed size: %d.\n", in_len, out_len, gzip_left, size);
- gzip_left = tux_gzip_compress(req, kaddr, buf+11, &in_len, &out_len);
- size -= out_len;
- buf[11] = 0x79; buf[12] = 0x00;
-
- Dprintk("post-compress: in_len: %d, out_len: %d, gzip_left: %d, compressed size: %d.\n", in_len, out_len, gzip_left, size);
- kunmap(page);
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- flags &= ~MSG_DONTWAIT;
- msg.msg_flags = flags;
- iov.iov_base = buf;
- iov.iov_len = size;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- written = sock_sendmsg(sock, &msg, size);
- set_fs(oldmm);
-
- Dprintk("buf: %p, offset: %ld, size: %d, written: %d.\n", buf, offset, size, written);
- if (written == size)
- written = orig_size;
- else
- written = size;
-
- } else {
- size = orig_size;
- if (tux_zerocopy_sendfile && sock->ops->sendpage &&
- (sock->sk->sk_route_caps&NETIF_F_SG)) {
- written = sock->ops->sendpage(sock, page, offset, size, flags);
- } else {
- struct msghdr msg;
- struct iovec iov;
- char *kaddr;
- mm_segment_t oldmm;
-
- if (offset+size > PAGE_SIZE)
- return -EFAULT;
-
- kaddr = kmap(page);
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = flags;
- iov.iov_base = kaddr + offset;
- iov.iov_len = size;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- written = sock_sendmsg(sock, &msg, size);
- set_fs(oldmm);
-
- Dprintk("kaddr: %p, offset: %ld, size: %d, written: %d.\n", kaddr, offset, size, written);
- kunmap(page);
- }
- }
- if (written < 0) {
- desc->error = written;
- written = 0;
- }
- Dprintk("desc->count: %d, desc->written: %d, written: %d.\n", desc->count, desc->written, written);
- desc->count -= written;
- if ((int)desc->count < 0)
- TUX_BUG();
- desc->written += written;
-
- if (buf)
- kfree(buf);
-
- return written;
-}
-
-/*
- * Return 1 if the output space condition went away
- * before adding the handler.
- */
-int add_output_space_event (tux_req_t *req, struct socket *sock)
-{
- struct sock *sk = sock->sk;
- /*
- * blocked due to socket IO?
- */
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0,&req->wait_output_space))
- TUX_BUG();
- INC_STAT(nr_output_space_pending);
-
- if ((sk->sk_state == TCP_ESTABLISHED) && enough_wspace(sk)) {
- if (test_and_clear_bit(0, &req->wait_output_space)) {
- DEC_STAT(nr_output_space_pending);
- del_keepalive_timer(req);
- spin_unlock_irq(&req->ti->work_lock);
- return 1;
- }
- }
- spin_unlock_irq(&req->ti->work_lock);
-
- return 0;
-}
-
-#define SEND_BLOCKSIZE (164*1024)
-
-int generic_send_file (tux_req_t *req, struct socket *sock, int cachemiss)
-{
- sock_send_desc_t sock_desc;
- int len, want, nonblock = !cachemiss;
- struct tcp_opt *tp = tcp_sk(sock->sk);
-
- tp->nonagle = 2;
-
- sock_desc.sock = sock;
- sock_desc.req = req;
-
-repeat:
- Dprintk("generic_send_file(%p,%d,%p) called, f_pos: %Ld, output_len: %Ld.\n", req, nonblock, sock, req->in_file.f_pos, req->output_len);
-
- if (req->proto->check_req_err(req, cachemiss))
- return -1;
- if (connection_too_fast(req) == 2) {
- len = -5;
- goto out;
- }
- if (req->total_file_len < req->in_file.f_pos)
- TUX_BUG();
-
- req->desc.written = 0;
- /*
- * Careful, output_len can be 64-bit, while 'want' can be 32-bit.
- */
- if (req->output_len > SEND_BLOCKSIZE)
- want = SEND_BLOCKSIZE;
- else
- want = req->output_len;
- req->desc.count = want;
- req->desc.arg.buf = (char *) &sock_desc;
- req->desc.error = 0;
- Dprintk("sendfile(), desc.count: %d.\n", req->desc.count);
- do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc, sock_send_actor, nonblock);
- if (req->desc.written > 0) {
- req->bytes_sent += req->desc.written;
- req->output_len -= req->desc.written;
- }
- if (!nonblock && (req->desc.error == -EWOULDBLOCKIO))
- TUX_BUG();
- Dprintk("sendfile() wrote: %d bytes.\n", req->desc.written);
- if (req->output_len && !req->desc.written && !req->desc.error) {
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return -1;
- }
-
- switch (req->desc.error) {
-
- case -EWOULDBLOCKIO:
- len = -3;
- break;
- case -EAGAIN:
-no_write_space:
- Dprintk("sk->wmem_queued: %d, sk->sndbuf: %d.\n",
- sock->sk->sk_wmem_queued, sock->sk->sk_sndbuf);
- len = -4;
- break;
- default:
- len = req->desc.written;
-#if CONFIG_TUX_DEBUG
- if (req->desc.error)
- TDprintk("TUX: sendfile() returned error %d (signals pending: %08lx)!\n", req->desc.error, current->pending.signal.sig[0]);
-#endif
- if (!req->desc.error) {
- if (req->output_len < 0)
- BUG();
- if (req->output_len) {
- if (test_bit(SOCK_NOSPACE, &sock->flags))
- goto no_write_space;
- goto repeat;
- }
- }
-#if CONFIG_TUX_DEBUG
- if (req->desc.written != want)
- TDprintk("TUX: sendfile() wrote %d bytes, wanted %d! (pos %Ld) (signals pending: %08lx).\n", req->desc.written, want, req->in_file.f_pos, current->pending.signal.sig[0]);
- else
- Dprintk("TUX: sendfile() FINISHED for req %p, wrote %d bytes.\n", req, req->desc.written);
- req->bytes_expected = 0;
-#endif
- break;
- }
-
-out:
- Dprintk("sendfile() wrote %d bytes.\n", len);
-
- return len;
-}
-
-static int file_fetch_actor (read_descriptor_t * desc, struct page *page,
- unsigned long offset, unsigned long size)
-{
- if (desc->count < size)
- size = desc->count;
-
- desc->count -= size;
- desc->written += size;
-
- return size;
-}
-
-int tux_fetch_file (tux_req_t *req, int nonblock)
-{
- int len;
-
- req->desc.written = 0;
- req->desc.count = req->output_len;
- req->desc.arg.buf = NULL;
- req->desc.error = 0;
-
- do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc,
- file_fetch_actor, nonblock);
- if (nonblock && (req->desc.error == -EWOULDBLOCKIO))
- return 1;
- len = req->desc.written;
- if (req->desc.error)
- Dprintk("fetchfile() returned %d error!\n", req->desc.error);
- Dprintk("fetchfile() fetched %d bytes.\n", len);
- return 0;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, Ingo Molnar <mingo@redhat.com>
- *
- * parser.h: generic parsing routines
- */
-
-#define get_c(ptr,left) \
-({ \
- char __ret; \
- \
- if (!left) \
- GOTO_INCOMPLETE; \
- left--; \
- __ret = *((ptr)++); \
- if (!__ret) \
- GOTO_REDIR; \
- __ret; \
-})
-
-#define PARSE_TOKEN(ptr,str,left) \
- ({ \
- int __ret; \
- \
- if (!left) \
- GOTO_INCOMPLETE; \
- if (sizeof(str)-1 > left) { \
- if (memcmp(ptr, str, left)) \
- GOTO_REDIR; \
- GOTO_INCOMPLETE; \
- } \
- \
- if (memcmp(ptr, str, sizeof(str)-1)) \
- __ret = 0; \
- else { \
- ptr += sizeof(str)-1; \
- left -= sizeof(str)-1; \
- __ret = 1; \
- } \
- __ret; \
- })
-
-#define PARSE_METHOD(req,ptr,name,left) \
- ({ \
- int __ret; \
- \
- if (PARSE_TOKEN(ptr,#name" ",left)) { \
- req->method = METHOD_##name; \
- __ret = 1; \
- } else \
- __ret = 0; \
- __ret; \
- })
-
-#define COPY_LINE(ptr,target,left) \
- do { \
- char prev_c = 0, c; \
- while (((c = get_c(ptr,left))) != '\n') \
- *target++ = prev_c = c; \
- if (prev_c != '\r') \
- GOTO_REDIR; \
- } while (0)
-
-#define COPY_LINE_TOLOWER(ptr,target,left,limit) \
- do { \
- char prev_c = 0, c; \
- while (((c = get_c(ptr,left))) != '\n') { \
- if ((c >= 'A') && (c <= 'Z')) \
- c -= 'A'-'a'; \
- *target++ = prev_c = c; \
- if (target == (limit)) \
- GOTO_REDIR; \
- } \
- if (prev_c != '\r') \
- GOTO_REDIR; \
- } while (0)
-
-#define COPY_FIELD(ptr,target,left) \
- do { \
- char c; \
- while ((c = get_c(ptr,left)) != ' ') \
- *target++ = c; \
- } while (0)
-
-#define SKIP_LINE(ptr,left) \
- do { \
- char prev_c = 0, c; \
- while (((c = get_c(ptr,left))) != '\n') \
- prev_c = c; \
- if (prev_c != '\r') \
- GOTO_REDIR; \
- } while (0)
-
-#define SKIP_WHITESPACE(curr,left) \
-do { \
- while ((left) && (*(curr) == ' ')) \
- (curr)++, (left)--; \
- if (!(left)) \
- GOTO_REDIR; \
-} while (0)
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * postpone.c: postpone/continue userspace requests
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-void postpone_request (tux_req_t *req)
-{
- if (!req->usermode)
- TUX_BUG();
- INC_STAT(nr_postpone_pending);
- req->postponed = 1;
-}
-
-/*
- * Continue a postponed request. The request will show up in the
- * userspace queue and will be handled by the fast thread.
- * A request can only be postponed in a TUX process, but can be
- * continued from any process that has access to the socket file
- * descriptor.
- */
-int continue_request (int fd)
-{
- threadinfo_t *ti;
- struct socket *sock;
- tux_req_t *req;
- int err;
-
- sock = sockfd_lookup(fd, &err);
- if (!sock || !sock->sk)
- goto out;
- req = sock->sk->sk_user_data;
-
- err = -EINVAL;
- if (!req)
- goto out_put;
- ti = req->ti;
- if (!req->postponed)
- goto out_unlock_put;
- if (!req->usermode)
- TUX_BUG();
-
- req->postponed = 0;
- DEC_STAT(nr_postpone_pending);
-
- Dprintk("continuing postponed req %p.\n", req);
- add_req_to_workqueue(req);
-
-out_unlock_put:
- err = 0;
-out_put:
- fput(sock->file);
-out:
- return err;
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * proc.c: /proc/sys/tux handling
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-char tux_common_docroot[200] = "/var/www/tux/";
-char tux_http_subdocroot[200] = "";
-char tux_ftp_subdocroot[200] = "";
-char tux_logfile[200] = "/var/log/tux";
-char tux_cgiroot[200] = "/var/www/tux/cgiroot/";
-char tux_404_page[200] = "404.html";
-char tux_default_vhost[200] = "default";
-char tux_extra_html_header[600];
-unsigned int tux_extra_html_header_size = 0;
-
-int tux_cgi_uid = -1;
-int tux_cgi_gid = -1;
-unsigned int tux_clientport = 8080;
-unsigned int tux_logging = 0;
-unsigned int tux_threads = 2;
-unsigned int tux_max_connect = 10000;
-unsigned int tux_max_keepalives = 10000;
-unsigned int tux_max_backlog = 2048;
-unsigned int tux_keepalive_timeout = 0;
-unsigned int tux_max_output_bandwidth = 0;
-unsigned int tux_defer_accept = 1;
-unsigned int tux_mode_forbidden = 0 /*S_IXUGO*/; /* do not allow executable (CGI) files */
-unsigned int tux_mode_allowed = S_IROTH; /* allow access if read-other is set */
-unsigned int tux_virtual_server = 0;
-unsigned int tux_ftp_virtual_server = 0;
-unsigned int mass_hosting_hash = 0;
-unsigned int strip_host_tail = 0;
-unsigned int tux_max_object_size = 0;
-unsigned int log_cpu_mask = ~0;
-unsigned int tux_compression = 0;
-unsigned int tux_noid = 0;
-unsigned int tux_cgi_inherit_cpu = 0;
-unsigned int tux_cgi_cpu_mask = ~0;
-unsigned int tux_zerocopy_header = 1;
-unsigned int tux_max_free_requests = 1000;
-unsigned int tux_ignore_query = 0;
-unsigned int tux_all_userspace = 0;
-unsigned int tux_redirect_logging = 1;
-unsigned int tux_max_header_len = 3000;
-unsigned int tux_referer_logging = 0;
-unsigned int tux_generate_etags = 1;
-unsigned int tux_generate_last_mod = 1;
-unsigned int tux_generate_cache_control = 1;
-unsigned int tux_ip_logging = 1;
-unsigned int tux_ftp_wait_close = 1;
-unsigned int tux_ftp_log_retr_only = 0;
-unsigned int tux_hide_unreadable = 1;
-unsigned int tux_http_dir_indexing = 0;
-unsigned int tux_log_incomplete = 0;
-unsigned int tux_cpu_offset = 0;
-unsigned int tux_ftp_login_message = 0;
-
-static struct ctl_table_header *tux_table_header;
-
-static ctl_table tux_table[] = {
- { NET_TUX_DOCROOT,
- "documentroot",
- &tux_common_docroot,
- sizeof(tux_common_docroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_DOCROOT,
- "http_subdocroot",
- &tux_http_subdocroot,
- sizeof(tux_http_subdocroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_DOCROOT,
- "ftp_subdocroot",
- &tux_ftp_subdocroot,
- sizeof(tux_ftp_subdocroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGFILE,
- "logfile",
- &tux_logfile,
- sizeof(tux_logfile),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_THREADS,
- "threads",
- &tux_threads,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_KEEPALIVE_TIMEOUT,
- "keepalive_timeout",
- &tux_keepalive_timeout,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_KEEPALIVE_BW,
- "max_output_bandwidth",
- &tux_max_output_bandwidth,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_DEFER_ACCEPT,
- "defer_accept",
- &tux_defer_accept,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_BACKLOG,
- "max_backlog",
- &tux_max_backlog,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_CONNECT,
- "max_connect",
- &tux_max_connect,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_KEEPALIVES,
- "max_keepalives",
- &tux_max_keepalives,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MODE_FORBIDDEN,
- "mode_forbidden",
- &tux_mode_forbidden,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MODE_ALLOWED,
- "mode_allowed",
- &tux_mode_allowed,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_UID,
- "cgi_uid",
- &tux_cgi_uid,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_GID,
- "cgi_gid",
- &tux_cgi_gid,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGIROOT,
- "cgiroot",
- &tux_cgiroot,
- sizeof(tux_cgiroot),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_404_PAGE,
- "404_page",
- &tux_404_page,
- sizeof(tux_404_page),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_404_PAGE,
- "default_vhost",
- &tux_default_vhost,
- sizeof(tux_default_vhost),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_404_PAGE,
- "extra_html_header",
- &tux_extra_html_header,
- sizeof(tux_extra_html_header),
- 0644,
- NULL,
- proc_dostring,
- &sysctl_string,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "extra_html_header_size",
- &tux_extra_html_header_size,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "clientport",
- &tux_clientport,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "generate_etags",
- &tux_generate_etags,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "generate_last_mod",
- &tux_generate_last_mod,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "generate_cache_control",
- &tux_generate_cache_control,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "ip_logging",
- &tux_ip_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "ftp_wait_close",
- &tux_ftp_wait_close,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "ftp_log_retr_only",
- &tux_ftp_log_retr_only,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "http_dir_indexing",
- &tux_http_dir_indexing,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "hide_unreadable",
- &tux_hide_unreadable,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CLIENTPORT,
- "log_incomplete",
- &tux_log_incomplete,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGGING,
- "TDprintk",
- &tux_TDprintk,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGGING,
- "Dprintk",
- &tux_Dprintk,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
-#if TUX_DPRINTK
-#endif
- { NET_TUX_LOGGING,
- "logging",
- &tux_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_LOGENTRY_ALIGN_ORDER,
- "logentry_align_order",
- &tux_logentry_align_order,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ACK_PINGPONG,
- "ack_pingpong",
- &tux_ack_pingpong,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_PUSH_ALL,
- "push_all",
- &tux_push_all,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ZEROCOPY_PARSE,
- "zerocopy_parse",
- &tux_zerocopy_parse,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "virtual_server",
- &tux_virtual_server,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "mass_hosting_hash",
- &mass_hosting_hash,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "strip_host_tail",
- &strip_host_tail,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_VIRTUAL_SERVER,
- "ftp_virtual_server",
- &tux_ftp_virtual_server,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_OBJECT_SIZE,
- "max_object_size",
- &tux_max_object_size,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_COMPRESSION,
- "compression",
- &tux_compression,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_NOID,
- "noid",
- &tux_noid,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_INHERIT_CPU,
- "cgi_inherit_cpu",
- &tux_cgi_inherit_cpu,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_CGI_CPU_MASK,
- "cgi_cpu_mask",
- &tux_cgi_cpu_mask,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ZEROCOPY_HEADER,
- "zerocopy_header",
- &tux_zerocopy_header,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ZEROCOPY_SENDFILE,
- "zerocopy_sendfile",
- &tux_zerocopy_sendfile,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_FREE_REQUESTS,
- "max_free_requests",
- &tux_max_free_requests,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_ALL_USERSPACE,
- "all_userspace",
- &tux_all_userspace,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REDIRECT_LOGGING,
- "redirect_logging",
- &tux_redirect_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_IGNORE_QUERY,
- "ignore_query",
- &tux_ignore_query,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REFERER_LOGGING,
- "referer_logging",
- &tux_referer_logging,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REFERER_LOGGING,
- "cpu_offset",
- &tux_cpu_offset,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_REFERER_LOGGING,
- "ftp_login_message",
- &tux_ftp_login_message,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- { NET_TUX_MAX_HEADER_LEN,
- "max_header_len",
- &tux_max_header_len,
- sizeof(int),
- 0644,
- NULL,
- proc_dointvec,
- &sysctl_intvec,
- NULL,
- NULL,
- NULL
- },
- {0,0,0,0,0,0,0,0,0,0,0} };
-
-
-static ctl_table tux_dir_table[] = {
- {NET_TUX, "tux", NULL, 0, 0555, tux_table,0,0,0,0,0},
- {0,0,0,0,0,0,0,0,0,0,0}
-};
-
-static ctl_table tux_root_table[] = {
- {CTL_NET, "net", NULL, 0, 0555, tux_dir_table,0,0,0,0,0},
- {0,0,0,0,0,0,0,0,0,0,0}
-};
-
-
-static struct proc_dir_entry * root_tux_dir;
-static struct proc_dir_entry * log_cpu_mask_entry;
-static struct proc_dir_entry * stat_entry;
-static struct proc_dir_entry * tux_dir [CONFIG_TUX_NUMTHREADS];
-static struct proc_dir_entry * listen_dir [CONFIG_TUX_NUMTHREADS];
-
-tux_socket_t tux_listen [CONFIG_TUX_NUMTHREADS][CONFIG_TUX_NUMSOCKETS] =
- { [0 ... CONFIG_TUX_NUMTHREADS-1] = { {&tux_proto_http, 0, 80, NULL}, } };
-
-#define HEX_DIGITS 8
-
-static int hex_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- if (count < HEX_DIGITS+1)
- return -EINVAL;
- return sprintf (page, "%08x\n", *(unsigned int *)data);
-}
-
-static int hex_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char hexnum [HEX_DIGITS];
- unsigned int new_value;
- unsigned int i, full_count = count;
-
- if (!count)
- return -EINVAL;
- if (count > HEX_DIGITS)
- count = HEX_DIGITS;
- if (copy_from_user(hexnum, buffer, count))
- return -EFAULT;
-
- /*
- * Parse the first 8 characters as a hex string, any non-hex char
- * is end-of-string. '00e1', 'e1', '00E1', 'E1' are the same.
- */
- new_value = 0;
-
- for (i = 0; i < count; i++) {
- unsigned int c = hexnum[i];
-
- switch (c) {
- case '0' ... '9': c -= '0'; break;
- case 'a' ... 'f': c -= 'a'-10; break;
- case 'A' ... 'F': c -= 'A'-10; break;
- default:
- goto out;
- }
- new_value = (new_value << 4) | c;
- }
-out:
- *(int *)data = new_value;
-
- return full_count;
-}
-
-#define LINE_SIZE 1024
-#define LINE_MASK (LINE_SIZE-1)
-
-static int print_request_stats (threadinfo_t *ti, char *page, unsigned int skip_count, unsigned int max_count)
-{
- struct list_head *head, *curr;
- tux_req_t *req;
- unsigned int count = 0, size, line_off, len;
- char stat_line [LINE_SIZE];
-
- if (!max_count)
- BUG();
-
- head = &ti->all_requests;
- curr = head->next;
-
- while (curr != head) {
- req = list_entry(curr, tux_req_t, all);
- curr = curr->next;
- count++;
- if (count <= skip_count)
- continue;
- line_off = 0;
-#define SP(x...) \
- line_off += sprintf(stat_line + line_off, x)
-
- if (req->proto == &tux_proto_http)
- SP("0 ");
- else
- SP("1 ");
-
- SP("%p ", req);
- SP("%d ", req->atom_idx);
- if (req->atom_idx >= 1)
- SP("%p ", req->atoms[0]);
- else
- SP("........ ");
- if (req->atom_idx >= 2)
- SP("%p ", req->atoms[1]);
- else
- SP("........ ");
- if (!list_empty(&req->work)) SP("W"); else SP(".");
- if (!list_empty(&req->free)) SP("F"); else SP(".");
- if (!list_empty(&req->lru)) SP("L"); else SP(".");
- if (req->keep_alive) SP("K"); else SP(".");
- if (req->idle_input) SP("I"); else SP(".");
- if (timer_pending(&req->keepalive_timer))
- SP("T(%lu/%lu)",jiffies,req->keepalive_timer.expires); else SP(".");
- if (req->wait_output_space) SP("O"); else SP(".");
- if (timer_pending(&req->output_timer))
- SP("T"); else SP(".");
- SP(" %d ", req->error);
- SP(" %d ", req->status);
-
-#define SP_HOST(ip,port) \
- SP("%d.%d.%d.%d:%d ",NIPQUAD(ip),port)
-
- if (req->sock) {
- if (req->sock->sk)
- SP("%d:", req->sock->sk->sk_state);
- else
- SP("-2:");
- } else
- SP("-1:");
- SP_HOST(req->client_addr, req->client_port);
-
- SP("%Ld ", req->total_file_len);
- SP("%Ld ", req->in_file.f_pos);
- if (req->proto == &tux_proto_http) {
- SP("%d ", req->method);
- SP("%d ", req->version);
- }
- if (req->proto == &tux_proto_ftp) {
- SP("%d ", req->ftp_command);
- if (req->data_sock) {
- if (req->data_sock->sk)
- SP("%d:",req->data_sock->sk->sk_state);
- else
- SP("-2:");
- if (req->data_sock->sk)
- SP_HOST(inet_sk(req->data_sock->sk)->daddr,
- inet_sk(req->data_sock->sk)->dport);
- else
- SP("-1:-1 ");
- } else
- SP("-1 ");
- }
- SP("%p/%p %p/%p ", req->sock, req->sock ? req->sock->sk : (void *)-1, req->data_sock, req->data_sock ? req->data_sock->sk : (void *)-1);
-
- SP("%d\n", req->parsed_len);
- len = req->headers_len;
- if (len > 500)
- len = 500;
- SP("\n%d\n", len);
- memcpy(stat_line + line_off, req->headers, len);
- line_off += len;
- len = req->objectname_len;
- if (len > 100)
- len = 100;
- SP("\n%d\n", len);
- memcpy(stat_line + line_off, req->objectname, len);
- line_off += len;
- SP("\n\n<END>");
- if (line_off >= LINE_SIZE)
- BUG();
- Dprintk("printing req %p, count %d, page %p: {%s}.\n", req, count, page, stat_line);
- size = sprintf(page, "%-*s\n", LINE_SIZE-1, stat_line);
- if (size != LINE_SIZE)
- BUG();
- page += LINE_SIZE;
- if (count-skip_count >= max_count)
- break;
- }
-
- Dprintk("count: %d.\n", count-skip_count);
- return count - skip_count;
-}
-
-static int stat_read_proc (char *page, char **start, off_t off,
- int max_size, int *eof, void *data)
-{
- unsigned int i, nr_total = 0, nr, nr_off, nr_skip, size = 0, nr_wanted;
-
- Dprintk("START, page: %p, max_size: %d, off: %ld.\n", page, max_size, off);
- *eof = 1;
- if (max_size & LINE_MASK)
- return 0;
- if (off & LINE_MASK)
- return 0;
- if (!max_size)
- return 0;
-
- nr_off = off/LINE_SIZE;
-
- for (i = 0; i < nr_tux_threads; i++) {
- threadinfo_t *ti = threadinfo + i;
- spin_lock_irq(&ti->work_lock);
- nr = ti->nr_requests;
- Dprintk("ti: %p, nr: %d, nr_total: %d, nr_off: %d.\n", ti, nr, nr_total, nr_off);
- nr_total += nr;
- if (nr_off >= nr_total) {
- spin_unlock_irq(&ti->work_lock);
- continue;
- }
- nr_skip = nr_off - (nr_total - nr);
- nr_wanted = (max_size-size) / LINE_SIZE;
- Dprintk("nr_skip: %d, nr_wanted: %d.\n", nr_skip, nr_wanted);
- nr = print_request_stats(ti, page + size, nr_skip, nr_wanted);
- spin_unlock_irq(&ti->work_lock);
- nr_off += nr;
- size += nr * LINE_SIZE;
- Dprintk("ret: %d requests, size: %d.\n", nr, size);
- if (size > max_size)
- BUG();
- if (size == max_size)
- break;
- }
- Dprintk("DONE: size: %d.\n", size);
-
- *start = page;
-
- if (size)
- *eof = 0;
- return size;
-}
-
-static int stat_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- return -EINVAL;
-}
-
-#define MAX_STRING "http://255.255.255.255:65535"
-#define MAX_STRINGLEN (sizeof(MAX_STRING))
-
-#define INACTIVE_1 "[inactive]\n"
-#define INACTIVE_2 "0\n"
-
-static int listen_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- tux_socket_t *listen = data;
-
- if (count < MAX_STRINGLEN)
- return -EINVAL;
-
- if (!listen->proto)
- return sprintf(page, INACTIVE_1);
-
- return sprintf (page, "%s://%u.%u.%u.%u:%hu\n", listen->proto->name,
- HIPQUAD(listen->ip), listen->port);
-}
-
-static int listen_write_proc (struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- char string [MAX_STRINGLEN];
- unsigned int d1, d2, d3, d4;
- unsigned short port;
- tux_socket_t *listen = data;
-
- if (!count)
- return -EINVAL;
- if (count > MAX_STRINGLEN)
- count = MAX_STRINGLEN;
- if (copy_from_user(string, buffer, count))
- return -EFAULT;
- string[count] = 0;
-
- if (!strcmp(string, INACTIVE_1) || !strcmp(string, INACTIVE_2)) {
- listen->proto = NULL;
- listen->ip = 0;
- listen->port = 0;
- return count;
- }
-
-#define MK_IP(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | d)
-
- if (sscanf(string, "http://%u.%u.%u.%u:%hu\n",
- &d1, &d2, &d3, &d4, &port) == 5) {
- listen->ip = MK_IP(d1,d2,d3,d4);
- listen->port = port;
- listen->proto = &tux_proto_http;
- return count;
- }
-
- if (sscanf(string, "ftp://%u.%u.%u.%u:%hu\n",
- &d1, &d2, &d3, &d4, &port) == 5) {
- listen->ip = MK_IP(d1,d2,d3,d4);
- listen->port = port;
- listen->proto = &tux_proto_ftp;
- return count;
- }
- printk(KERN_ERR "tux: invalid listen-socket parameters: %s\n", string);
- return -EINVAL;
-}
-
-#define MAX_NAMELEN 10
-
-static void register_tux_proc (unsigned int nr)
-{
- struct proc_dir_entry *entry;
- char name [MAX_NAMELEN];
- int i;
-
- if (!root_tux_dir)
- TUX_BUG();
-
- sprintf(name, "%d", nr);
-
- /* create /proc/net/tux/1234/ */
- tux_dir[nr] = proc_mkdir(name, root_tux_dir);
-
- /* create /proc/net/tux/1234/listen/ */
- listen_dir[nr] = proc_mkdir("listen", tux_dir[nr]);
-
- /* create /proc/net/tux/1234/listen/ */
- for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
- sprintf(name, "%d", i);
- entry = create_proc_entry(name, 0700, listen_dir[nr]);
-
- entry->nlink = 1;
- entry->data = (void *)(tux_listen[nr] + i);
- entry->read_proc = listen_read_proc;
- entry->write_proc = listen_write_proc;
- tux_listen[nr][i].entry = entry;
- }
-}
-
-static void unregister_tux_proc (unsigned int nr)
-{
- int i;
-
- for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
- remove_proc_entry(tux_listen[nr][i].entry->name,listen_dir[nr]);
- tux_listen[nr][i].entry = NULL;
- }
-
- remove_proc_entry(listen_dir[nr]->name, tux_dir[nr]);
-
- remove_proc_entry(tux_dir[nr]->name, root_tux_dir);
-}
-
-static void cleanup_tux_proc (void)
-{
- int i;
-
- Dprintk("cleaning up /proc/net/tux/\n");
-
- for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++)
- unregister_tux_proc(i);
- remove_proc_entry(stat_entry->name, root_tux_dir);
- remove_proc_entry(log_cpu_mask_entry->name, root_tux_dir);
- remove_proc_entry(root_tux_dir->name, proc_net);
-}
-
-static void init_tux_proc (void)
-{
- struct proc_dir_entry *entry;
- int i;
-
- if (root_tux_dir)
- return;
-
- /* create /proc/net/tux */
- root_tux_dir = proc_mkdir("tux", proc_net);
-
- entry = create_proc_entry("log_cpu_mask", 0700, root_tux_dir);
-
- entry->nlink = 1;
- entry->data = (void *)&log_cpu_mask;
- entry->read_proc = hex_read_proc;
- entry->write_proc = hex_write_proc;
-
- log_cpu_mask_entry = entry;
-
- entry = create_proc_entry("stat", 0700, root_tux_dir);
-
- entry->nlink = 1;
- entry->data = NULL;
- entry->read_proc = stat_read_proc;
- entry->write_proc = stat_write_proc;
-
- stat_entry = entry;
-
- /*
- * Create entries for all existing threads.
- */
- for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++)
- register_tux_proc(i);
-}
-
-void start_sysctl(void)
-{
- init_tux_proc();
- tux_table_header = register_sysctl_table(tux_root_table,1);
-}
-
-void end_sysctl(void)
-{
- cleanup_tux_proc();
- unregister_sysctl_table(tux_table_header);
-}
-
-#if CONFIG_SMP
-void mask_to_cpumask(unsigned int mask, cpumask_t *cpu_mask)
-{
-
- unsigned int bit_mask, i;
-
- bit_mask = 1 << 31;
-
- for (i=NR_CPUS-1; i--; i >= 0) {
- if(mask & bit_mask)
- cpu_set(i, *cpu_mask);
- else
- cpu_clear(i, *cpu_mask);
- mask <<= 1;
- }
-
-}
-#endif
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * ftp_proto.c: FTP application protocol support
- */
-
-#define __KERNEL_SYSCALLS__
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#define HELLO "220 Linux/TUX 3.0 FTP server welcomes you!\r\n"
-#define WRITE_DONE "226 Transfer complete.\r\n"
-#define BAD_FILENAME "550 No such file or directory.\r\n"
-#define GOOD_DIR "250 CWD command successful.\r\n"
-#define LIST_ERR "503 LIST without PORT! Closing connection.\r\n"
-#define LIST_ERR_MEM "503 LIST could not allocate memory! Closing connection.\r\n"
-#define WRITE_FILE "150 Opening BINARY mode data connection.\r\n"
-#define WRITE_LIST "150 Opening ASCII mode data connection.\r\n"
-#define RETR_ERR "503 RETR without PORT! Closing connection.\r\n"
-#define PORT_OK "200 PORT command successful.\r\n"
-#define LOGIN_OK "230-There are currently %d users logged in, out of %d maximum.\r\n230-Bandwidth served by TUX currently: %d KB/sec\r\n230 TUX Guest login ok.\r\n"
-#define LOGIN_OK_ONE "230-There is currently 1 user logged in, out of %d maximum.\r\n230-Bandwidth served by TUX currently: %d KB/sec\r\n230 TUX Guest login ok.\r\n"
-#define LOGIN_OK_PASS "230 TUX Guest login ok.\r\n"
-#define LOGIN_FORBIDDEN "530 Sorry, Login Denied!\r\n"
-#define TYPE_OK "200 Type set to I.\r\n"
-#define BYE "221 Thank You for using TUX!\r\n"
-#define NOT_IMPLEMENTED "502 Command not implemented.\r\n"
-#define CLOSE_2 "221 Cannot handle request, closing connection!\r\n"
-#define CLOSE "500 Unknown command.\r\n"
-#define CLOSE_TIMEOUT "421 Timeout, closing connection!\r\n"
-#define LINUX_SYST "215 UNIX Type: L8, Linux/TUX/3.0\r\n"
-#define COMMAND_OK "200 Command OK.\r\n"
-#define REST_OK "350 Restart offset OK.\r\n"
-#define WRITE_ABORTED "426 Transfer aborted, data connection closed.\r\n"
-#define SITE "214 No SITE commands are recognized.\r\n"
-
-#define INTERVAL 10
-
-unsigned long last_measurement;
-unsigned int ftp_bytes_sent;
-unsigned int ftp_bandwidth;
-
-static void __update_bandwidth (tux_req_t *req, unsigned int bytes)
-{
- /*
- * Bandwidth measurement. Not completely accurate,
- * but it's good enough and lightweight enough.
- */
- if (jiffies >= last_measurement + INTERVAL*HZ) {
- ftp_bandwidth = (ftp_bytes_sent + 1023)/INTERVAL/1024;
- ftp_bytes_sent = 0;
- last_measurement = jiffies;
- }
- if (bytes)
- atomic_add(bytes, (atomic_t *)&ftp_bytes_sent);
- Dprintk("update_bandwidth(%p,%d), bytes_sent: %d, bandwidth: %d.\n",
- req, bytes, ftp_bytes_sent, ftp_bandwidth);
-}
-
-#define update_bandwidth(req,bytes) \
- do { \
- if (unlikely(tux_ftp_login_message)) \
- __update_bandwidth(req, bytes); \
- } while (0)
-
-static inline void __ftp_send_async_message (tux_req_t *req,
- const char *message, int status, unsigned int size)
-{
- update_bandwidth(req, size);
- __send_async_message(req, message, status, size, 1);
-}
-
-#define ftp_send_async_message(req,str,status) \
- __ftp_send_async_message(req,str,status,sizeof(str)-1)
-
-
-static void ftp_flush_req (tux_req_t *req, int cachemiss)
-{
- tux_push_pending(req->sock->sk);
- add_req_to_workqueue(req);
-}
-
-static void ftp_execute_command (tux_req_t *req, int cachemiss);
-
-static void ftp_lookup_vhost (tux_req_t *req, int cachemiss)
-{
- struct dentry *dentry;
- struct nameidata base;
- struct vfsmount *mnt = NULL;
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- char ip[3+1+3+1+3+1+3 + 2];
-
- sprintf(ip, "%d.%d.%d.%d", NIPQUAD(inet_sk(req->sock->sk)->rcv_saddr));
- Dprintk("ftp_lookup_vhost(%p, %d, virtual: %d, host: %s.)\n",
- req, flag, req->virtual, ip);
-
- base.flags = LOOKUP_FOLLOW|flag;
- base.last_type = LAST_ROOT;
- base.dentry = dget(req->proto->main_docroot.dentry);
- base.mnt = mntget(req->proto->main_docroot.mnt);
-
- dentry = __tux_lookup(req, ip, &base, &mnt);
-
- Dprintk("looked up dentry %p.\n", dentry);
- if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
- TUX_BUG();
-
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- add_tux_atom(req, ftp_lookup_vhost);
- queue_cachemiss(req);
- return;
- }
- goto abort;
- }
-
- req->docroot_dentry = dentry;
- req->docroot_mnt = mnt;
-
- add_tux_atom(req, ftp_execute_command);
- add_req_to_workqueue(req);
- return;
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
- req_err(req);
- add_req_to_workqueue(req);
-}
-
-static void ftp_got_request (tux_req_t *req)
-{
- add_tux_atom(req, parse_request);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, HELLO, 220);
-}
-
-#define GOTO_ERR { TDprintk("FTP protocol error at: %s:%d\n", \
- __FILE__, __LINE__); goto error; }
-
-static void zap_data_socket (tux_req_t *req)
-{
- if (!req->data_sock)
- return;
- Dprintk("zapping req %p's data socket %p.\n", req, req->data_sock);
-
- unlink_tux_data_socket(req);
- sock_release(req->data_sock);
- req->data_sock = NULL;
-}
-
-static int parse_ftp_message (tux_req_t *req, const int total_len)
-{
- int comm, comm1 = 0, comm2 = 0, comm3 = 0, comm4 = 0;
- int newline_pos, i;
- const char *mess, *curr;
-
- curr = mess = req->headers;
-
- Dprintk("FTP parser got %d bytes: --->{%s}<---\n", total_len, curr);
-
- newline_pos = -1;
- for (i = 0; i < total_len; i++, curr++) {
- if (!*curr)
- GOTO_ERR;
- if (!(*curr == '\r') || !(*(curr+1) == '\n'))
- continue;
- newline_pos = i;
- break;
- }
- Dprintk("Newline pos: %d\n", newline_pos);
- if (newline_pos == -1) {
- Dprintk("incomplete mess on req %p!\n", req);
- return 0;
- }
- if (newline_pos < 3)
- GOTO_ERR;
-
-#define toup(c) ((((c) >= 'a') && ((c) <= 'z')) ? ((c) + 'A' - 'a') : (c))
-
-#define STRING_VAL(c1,c2,c3,c4) \
- (toup(c1) + (toup(c2) << 8) + (toup(c3) << 16) + (toup(c4) << 24))
-
-#define STRING_VAL_STR(str) \
- STRING_VAL(str[0], str[1], str[2], str[3])
-
- Dprintk("string val (%c%c%c%c): %08x\n",
- mess[0], mess[1], mess[2], mess[3],
- STRING_VAL_STR(mess));
-
-#define PARSE_FTP_COMM(c1,c2,c3,c4,name,num) \
- if (STRING_VAL_STR(mess) == STRING_VAL(c1,c2,c3,c4)) \
- { \
- Dprintk("parsed "#name".\n"); \
- comm##num = FTP_COMM_##name; \
- }
-
- PARSE_FTP_COMM('A','C','C','T', ACCT,2);
- PARSE_FTP_COMM('C','D','U','P', CDUP,3);
- PARSE_FTP_COMM('S','M','N','T', SMNT,4);
- PARSE_FTP_COMM('Q','U','I','T', QUIT,1);
- PARSE_FTP_COMM('R','E','I','N', REIN,2);
- PARSE_FTP_COMM('P','A','S','V', PASV,3);
- PARSE_FTP_COMM('S','T','R','U', STRU,4);
- PARSE_FTP_COMM('S','T','O','R', STOR,2);
- PARSE_FTP_COMM('S','T','O','U', STOU,3);
- PARSE_FTP_COMM('A','P','P','E', APPE,4);
- PARSE_FTP_COMM('A','L','L','O', ALLO,1);
- PARSE_FTP_COMM('R','N','F','R', RNFR,2);
- PARSE_FTP_COMM('R','N','T','O', RNTO,3);
- PARSE_FTP_COMM('A','B','O','R', ABOR,4);
- PARSE_FTP_COMM('D','E','L','E', DELE,1);
- PARSE_FTP_COMM('R','M','D',' ', RMD, 2);
- PARSE_FTP_COMM('M','K','D',' ', MKD, 3);
- PARSE_FTP_COMM('P','W','D',' ', PWD, 4);
- PARSE_FTP_COMM('S','Y','S','T', SYST,2);
- PARSE_FTP_COMM('N','O','O','P', NOOP,3);
- PARSE_FTP_COMM('F','E','A','T', FEAT,4);
-
- comm = comm1 | comm2 | comm3 | comm4;
-
- if (comm) {
- if (newline_pos != 4)
- GOTO_ERR;
- req->ftp_command = comm;
- goto out;
- }
-
- switch (STRING_VAL(mess[0], mess[1], mess[2], mess[3])) {
-
-#define PARSE_FTP_COMM_3CHAR(c1,c2,c3,name) \
- case STRING_VAL(c1,c2,c3,'\r'): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- if (newline_pos != 3) \
- GOTO_ERR; \
- }
-
-#define PARSE_FTP_3CHAR_COMM_IGNORE(c1,c2,c3,name) \
- case STRING_VAL(c1,c2,c3,' '): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- }
-
-#define PARSE_FTP_COMM_IGNORE(c1,c2,c3,c4,name) \
- case STRING_VAL(c1,c2,c3,c4): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- }
-
-#define PARSE_FTP_3CHAR_COMM_1_FIELD(c1,c2,c3,name,field,field_len,max) \
- case STRING_VAL(c1,c2,c3,' '): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- if (newline_pos == 4) \
- GOTO_ERR; \
- if (newline_pos >= 5) { \
- curr = mess + 3; \
- if (*curr++ != ' ') \
- GOTO_ERR; \
- *(field_len) = newline_pos-4; \
- if (*(field_len) >= max) \
- GOTO_ERR; \
- memcpy(field, curr, *(field_len)); \
- (field)[*(field_len)] = 0; \
- } \
- }
-
-#define PARSE_FTP_COMM_1_FIELD(c1,c2,c3,c4,name,field,field_len,max) \
- case STRING_VAL(c1,c2,c3,c4): \
- { \
- Dprintk("parsed "#name".\n"); \
- req->ftp_command = FTP_COMM_##name; \
- if (newline_pos < 4) \
- GOTO_ERR; \
- if (newline_pos == 4) \
- *(field_len) = 0; \
- else { \
- curr = mess + 4; \
- if (*curr++ != ' ') \
- GOTO_ERR; \
- *(field_len) = newline_pos-5; \
- if (*(field_len) >= max) \
- GOTO_ERR; \
- memcpy(field, curr, *(field_len)); \
- (field)[*(field_len)] = 0; \
- } \
- }
-
- PARSE_FTP_COMM_1_FIELD('U','S','E','R', USER,
- req->username, &req->username_len,
- MAX_USERNAME_LEN-1);
- if (!req->username_len)
- GOTO_ERR;
- break;
-
- {
- #define MAX_PASS_LEN 100
- char pass[MAX_PASS_LEN];
- unsigned int pass_len;
- PARSE_FTP_COMM_1_FIELD('P','A','S','S', PASS,
- pass, &pass_len,
- MAX_PASS_LEN-1);
- if (!pass_len)
- GOTO_ERR;
- break;
- }
-
- PARSE_FTP_3CHAR_COMM_1_FIELD('C','W','D', CWD,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len)
- GOTO_ERR;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_3CHAR('P','W','D', PWD); break;
-
- {
- char type[3];
- unsigned int type_len;
-
- PARSE_FTP_COMM_1_FIELD('T','Y','P','E', TYPE,
- type, &type_len, 2);
- if (!type_len)
- GOTO_ERR;
- if ((type[0] != 'I') && (type[0] != 'A'))
- GOTO_ERR;
- }
- break;
-
- PARSE_FTP_COMM_1_FIELD('R','E','T','R', RETR,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len) {
- zap_data_socket(req);
- req->ftp_command = FTP_COMM_NONE;
- }
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_1_FIELD('S','I','Z','E', SIZE,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len)
- req->ftp_command = FTP_COMM_NONE;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_1_FIELD('M','D','T','M', MDTM,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (!req->objectname_len)
- req->ftp_command = FTP_COMM_NONE;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- break;
-
- PARSE_FTP_COMM_IGNORE('M','O','D','E', MODE);
- break;
-
- PARSE_FTP_COMM_IGNORE('S','T','A','T', STAT);
- break;
-
- PARSE_FTP_COMM_IGNORE('S','I','T','E', SITE);
- break;
-
- PARSE_FTP_COMM_1_FIELD('L','I','S','T', LIST,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (req->objectname[0] == '-') {
- req->objectname_len = 0;
- req->objectname[0] = 0;
- }
- if (req->objectname_len) {
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- }
- break;
-
- PARSE_FTP_COMM_1_FIELD('N','L','S','T', NLST,
- req->objectname, &req->objectname_len,
- MAX_OBJECTNAME_LEN-1);
- if (req->objectname[0] == '-') {
- req->objectname_len = 0;
- req->objectname[0] = 0;
- }
- if (req->objectname_len) {
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
- }
- break;
-
- PARSE_FTP_COMM_IGNORE('H','E','L','P', HELP);
- break;
-
- PARSE_FTP_COMM_IGNORE('C','L','N','T', CLNT);
- break;
-
-#define IS_NUM(n) (((n) >= '0') && ((n) <= '9'))
-
-#define GET_DIGIT(curr,n) \
- n += (*curr) - '0'; \
- curr++; \
- if (IS_NUM(*curr)) { \
- n *= 10;
-
-#define PARSE_PORTNUM(curr,n) \
-do { \
- Dprintk("PORT NUM parser:--->{%s}<---\n", curr);\
- if (!IS_NUM(*curr)) \
- GOTO_ERR; \
- n = 0; \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- }}} \
- if (n > 255) \
- GOTO_ERR; \
- Dprintk("PORT NUM parser:--->{%s}<---\n", curr);\
- Dprintk("PORT NUM parser parsed %d.\n", n); \
-} while (0)
-
-#define PARSE_NUM(curr,n) \
-do { \
- Dprintk("NUM parser:--->{%s}<---\n", curr); \
- if (!IS_NUM(*curr)) \
- GOTO_ERR; \
- n = 0; \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- GET_DIGIT(curr,n); \
- }}}}}}}}}} \
- Dprintk("NUM parser:--->{%s}<---\n", curr); \
- Dprintk("NUM parser parsed %d.\n", n); \
-} while (0)
-
- case STRING_VAL('P','O','R','T'):
- {
- unsigned int h1, h2, h3, h4, p1, p2;
- if (req->data_sock)
- zap_data_socket(req);
- /*
- * Minimum size: "PORT 0,0,0,0,0,0", 16 bytes.
- */
- if (newline_pos < 16)
- GOTO_ERR;
- Dprintk("parsed PORT.\n");
- if (req->data_sock)
- GOTO_ERR;
- curr = mess + 4;
- if (*curr++ != ' ')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h1);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h2);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h3);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,h4);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,p1);
- if (*curr++ != ',')
- GOTO_ERR;
- PARSE_PORTNUM(curr,p2);
- if (curr-mess != newline_pos)
- GOTO_ERR;
- req->ftp_command = FTP_COMM_PORT;
- req->ftp_user_addr = (h1<<24) + (h2<<16) + (h3<<8) + h4;
- req->ftp_user_port = (p1<<8) + p2;
- Dprintk("FTP PORT got: %d.%d.%d.%d:%d.\n",
- h1, h2, h3, h4, req->ftp_user_port);
- Dprintk("FTP user-addr: %08x (htonl: %08x), socket: %08x.\n",
- req->ftp_user_addr, htonl(req->ftp_user_addr),
- inet_sk(req->sock->sk)->daddr);
- /*
- * Do not allow redirection of connections, and do
- * not allow reserved ports to be accessed.
- */
- if (inet_sk(req->sock->sk)->daddr != htonl(req->ftp_user_addr))
- GOTO_ERR;
- if (req->ftp_user_port < 1024)
- GOTO_ERR;
- break;
- }
- case STRING_VAL('R','E','S','T'):
- {
- unsigned int offset;
-
- /*
- * Minimum size: "REST 0", 6 bytes.
- */
- if (newline_pos < 6)
- GOTO_ERR;
- Dprintk("parsed REST.\n");
- curr = mess + 4;
- if (*curr++ != ' ')
- GOTO_ERR;
- PARSE_NUM(curr,offset);
- if (curr-mess != newline_pos)
- GOTO_ERR;
- req->ftp_command = FTP_COMM_REST;
- req->ftp_offset_start = offset;
- Dprintk("FTP REST got: %d bytes offset.\n", offset);
-
- break;
- }
- default:
- req->ftp_command = FTP_COMM_NONE;
- break;
- }
-
-out:
- req->parsed_len = newline_pos + 2;
-
- req->virtual = tux_ftp_virtual_server;
- if (req->virtual)
- add_tux_atom(req, ftp_lookup_vhost);
- else {
- req->docroot_dentry = dget(req->proto->main_docroot.dentry);
- req->docroot_mnt = mntget(req->proto->main_docroot.mnt);
- add_tux_atom(req, ftp_execute_command);
- }
-
- return req->parsed_len;
-error:
- clear_keepalive(req);
- TDprintk("rejecting FTP session!\n");
- TDprintk("mess :--->{%s}<---\n", mess);
- TDprintk("mess left:--->{%s}<---\n", curr);
- req_err(req);
- return -1;
-}
-
-static void ftp_wait_close (tux_req_t *req, int cachemiss);
-static void ftp_wait_syn (tux_req_t *req, int cachemiss);
-
-static int ftp_check_req_err (tux_req_t *req, int cachemiss)
-{
- int state = req->sock->sk->sk_state;
- int err = req->sock->sk->sk_err | req->error;
- int urg = tcp_sk(req->sock->sk)->urg_data;
-
- if (req->data_sock) {
- urg |= tcp_sk(req->data_sock->sk)->urg_data;
- state |= req->data_sock->sk->sk_state;
- err |= req->data_sock->sk->sk_err;
- }
-
- if ((state <= TCP_SYN_RECV) && !err) {
- if (!urg)
- return 0;
- req->in_file.f_pos = 0;
- add_tux_atom(req, flush_request);
- zap_data_socket(req);
- ftp_send_async_message(req, WRITE_ABORTED, 426);
- return 1;
- }
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
- if (tux_TDprintk)
- dump_stack();
-#endif
- req->in_file.f_pos = 0;
- TDprintk("zapping, data sock state: %d (err: %d, urg: %d)\n",
- state, err, urg);
- /*
- * We are in the middle of a file transfer,
- * zap it immediately:
- */
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return 1;
-}
-
-void ftp_send_file (tux_req_t *req, int cachemiss)
-{
- int ret;
-
- SET_TIMESTAMP(req->output_timestamp);
-repeat:
- ret = generic_send_file(req, req->data_sock, cachemiss);
- update_bandwidth(req, req->in_file.f_pos - req->prev_pos);
- req->prev_pos = req->in_file.f_pos;
-
- switch (ret) {
- case -5:
- add_tux_atom(req, ftp_send_file);
- output_timeout(req);
- break;
- case -4:
- add_tux_atom(req, ftp_send_file);
- if (add_output_space_event(req, req->data_sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- break;
- case -3:
- add_tux_atom(req, ftp_send_file);
- queue_cachemiss(req);
- break;
- case -1:
- break;
- default:
- req->in_file.f_pos = 0;
-
- if (tux_ftp_wait_close) {
- req->data_sock->ops->shutdown(req->data_sock, SEND_SHUTDOWN);
- add_tux_atom(req, ftp_wait_close);
- add_req_to_workqueue(req);
- return;
- }
- Dprintk("FTP send file req %p finished!\n", req);
- zap_data_socket(req);
- add_tux_atom(req, ftp_flush_req);
- if (req->error)
- ftp_send_async_message(req, BAD_FILENAME, 200);
- else
- ftp_send_async_message(req, WRITE_DONE, 200);
- break;
- }
-}
-
-#define sk_syn(sk) \
- (!(sk)->sk_err && ((1 << (sk)->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)))
-#define req_syn(req) \
- (!(req)->error && sk_syn((req)->data_sock->sk))
-
-static void ftp_wait_syn (tux_req_t *req, int cachemiss)
-{
- Dprintk("ftp_wait_syn in: data socket state %d.\n", req->data_sock->state);
- if (req_syn(req)) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (req_syn(req)) {
- add_tux_atom(req, ftp_wait_syn);
- return;
- }
- unidle_req(req);
- }
- Dprintk("ftp_wait_syn out: data socket state %d.\n", req->data_sock->state);
- add_req_to_workqueue(req);
-}
-
-static void ftp_wait_close (tux_req_t *req, int cachemiss)
-{
- struct sock *sk = req->data_sock->sk;
-
- Dprintk("ftp_wait_close: data socket state %d.\n", sk->sk_state);
-
- if (!req->error && (sk->sk_state <= TCP_FIN_WAIT1) && !sk->sk_err) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (!req->error && (sk->sk_state <= TCP_FIN_WAIT1) && !sk->sk_err) {
- add_tux_atom(req, ftp_wait_close);
- return;
- }
- unidle_req(req);
- }
- zap_data_socket(req);
- add_tux_atom(req, ftp_flush_req);
- if (req->error)
- ftp_send_async_message(req, BAD_FILENAME, 200);
- else
- ftp_send_async_message(req, WRITE_DONE, 200);
-}
-
-void ftp_get_size (tux_req_t *req, int cachemiss)
-{
- char file_size[200];
- int missed, len;
-
- if (!req->dentry) {
- missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
- if (!missed && !req->dentry) {
- ftp_send_async_message(req, BAD_FILENAME, 200);
- return;
- }
- if (missed) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_get_size);
- queue_cachemiss(req);
- return;
- }
- }
- req->in_file.f_pos = 0;
- len = sprintf(file_size, "213 %Li\r\n", req->dentry->d_inode->i_size);
- __ftp_send_async_message(req, file_size, 200, len);
-}
-
-void ftp_get_mdtm (tux_req_t *req, int cachemiss)
-{
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- char file_mdtm[200];
- unsigned int len;
- int err;
-
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_get_mdtm);
- queue_cachemiss(req);
- return;
- }
- goto out_err;
- }
- err = permission(dentry->d_inode, MAY_READ, NULL);
- if (err)
- goto out_err_put;
-
- req->in_file.f_pos = 0;
- len = mdtm_time (file_mdtm, dentry->d_inode->i_mtime.tv_sec);
- dput(dentry);
- mntput(mnt);
- __ftp_send_async_message(req, file_mdtm, 200, len);
- return;
-
-out_err_put:
- dput(dentry);
- mntput(mnt);
-out_err:
- ftp_send_async_message(req, BAD_FILENAME, 550);
-}
-
-static void ftp_get_file (tux_req_t *req, int cachemiss)
-{
- int missed;
-
- if (!req->dentry) {
- missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
- if (!missed && !req->dentry) {
- ftp_send_async_message(req, BAD_FILENAME, 200);
- return;
- }
- if (missed) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_get_file);
- queue_cachemiss(req);
- return;
- }
- }
- Dprintk("ftp_send_file %p, ftp_offset: %Ld, total_len: %Ld.\n", req, req->ftp_offset_start, req->total_file_len);
- req->in_file.f_pos = 0;
- if (req->ftp_offset_start) {
- if (req->ftp_offset_start <= req->total_file_len) {
- req->offset_start = req->ftp_offset_start;
- req->in_file.f_pos = req->offset_start;
- }
- req->ftp_offset_start = 0;
- }
- req->output_len = req->total_file_len - req->offset_start;
- req->prev_pos = req->in_file.f_pos;
- Dprintk("ftp_send_file %p, f_pos: %Ld (out_len: %Ld).\n", req, req->in_file.f_pos, req->output_len);
- add_tux_atom(req, ftp_send_file);
- add_tux_atom(req, ftp_wait_syn);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, WRITE_FILE, 200);
-}
-
-static void __exchange_sockets (tux_req_t *req)
-{
- struct socket *tmp;
-
- tmp = req->data_sock;
- req->data_sock = req->sock;
- req->sock = tmp;
-
- req->in_file.f_pos = 0;
-}
-
-static void ftp_do_ls_start (tux_req_t *req, int cachemiss)
-{
- Dprintk("ftp_do_ls_start(%p, %d).\n", req, cachemiss);
- if (!req->cwd_dentry)
- TUX_BUG();
- __exchange_sockets(req);
- queue_cachemiss(req);
-}
-
-static void ftp_do_ls_end (tux_req_t *req, int cachemiss)
-{
- Dprintk("ftp_do_ls_end(%p, %d).\n", req, cachemiss);
- __exchange_sockets(req);
- if (tux_ftp_wait_close) {
- req->data_sock->ops->shutdown(req->data_sock, SEND_SHUTDOWN);
- add_tux_atom(req, ftp_wait_close);
- add_req_to_workqueue(req);
- return;
- }
- zap_data_socket(req);
- add_tux_atom(req, ftp_flush_req);
- if (req->error)
- ftp_send_async_message(req, BAD_FILENAME, 200);
- else
- ftp_send_async_message(req, WRITE_DONE, 200);
-}
-
-static void ftp_chdir (tux_req_t *req, int cachemiss)
-{
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- int err;
-
- Dprintk("ftp_chdir(%p, %d, {%s})\n", req, cachemiss, req->objectname);
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_chdir);
- queue_cachemiss(req);
- return;
- }
- goto out_err;
- }
- err = permission(dentry->d_inode, MAY_EXEC, NULL);
- if (err)
- goto out_err_put;
- req->cwd_dentry = dentry;
- req->cwd_mnt = mnt;
- ftp_send_async_message(req, GOOD_DIR, 200);
- return;
-
-out_err_put:
- dput(dentry);
- mntput(mnt);
-out_err:
- ftp_send_async_message(req, BAD_FILENAME, 550);
-}
-
-void ftp_accept_pasv (tux_req_t *req, int cachemiss)
-{
- struct socket *sock, *new_sock = NULL;
- struct tcp_opt *tp1, *tp2;
- int err;
-
- tp1 = tcp_sk(req->data_sock->sk);
-
- Dprintk("PASV accept on req %p, accept_queue: %p.\n",
- req, tp1->accept_queue);
- if (req->error || (req->data_sock->sk->sk_state != TCP_LISTEN))
- goto error;
-new_socket:
- if (!tp1->accept_queue) {
- spin_lock_irq(&req->ti->work_lock);
- add_keepalive_timer(req);
- if (test_and_set_bit(0, &req->idle_input))
- TUX_BUG();
- spin_unlock_irq(&req->ti->work_lock);
- if (!tp1->accept_queue) {
- add_tux_atom(req, ftp_accept_pasv);
- return;
- }
- unidle_req(req);
- }
- new_sock = sock_alloc();
- if (!new_sock)
- goto error;
- sock = req->data_sock;
- new_sock->type = sock->type;
- new_sock->ops = sock->ops;
-
- err = sock->ops->accept(sock, new_sock, O_NONBLOCK);
- Dprintk("PASV accept() returned %d (state %d).\n", err, new_sock->sk->sk_state);
- if (err < 0)
- goto error;
- if (new_sock->sk->sk_state != TCP_ESTABLISHED)
- goto error;
- /*
- * Do not allow other clients to steal the FTP connection!
- */
- if (inet_sk(new_sock->sk)->daddr != inet_sk(req->sock->sk)->daddr) {
- Dprintk("PASV: ugh, unauthorized connect?\n");
- sock_release(new_sock);
- new_sock = NULL;
- goto new_socket;
- }
- /*
- * Zap the listen socket:
- */
- zap_data_socket(req);
-
- tp2 = tcp_sk(new_sock->sk);
- tp2->nonagle = 2;
- tp2->ack.pingpong = tux_ack_pingpong;
- new_sock->sk->sk_reuse = 1;
- sock_set_flag(new_sock->sk, SOCK_URGINLINE);
- sock_reset_flag(new_sock->sk, SOCK_LINGER);
-
- link_tux_data_socket(req, new_sock);
- add_req_to_workqueue(req);
- return;
-
-error:
- if (new_sock)
- sock_release(new_sock);
- req_err(req);
- zap_data_socket(req);
- ftp_send_async_message(req, CLOSE, 500);
-}
-
-static char * ftp_print_dir_line (tux_req_t *req, char *tmp, char *d_name, int d_len, int d_type, struct dentry *dentry, struct inode *inode)
-{
- char *string0 = tmp;
- unsigned int size;
-
- if (req->ftp_command == FTP_COMM_NLST) {
- memcpy(tmp, d_name, d_len);
- tmp += d_len;
- *tmp++ = '\r';
- *tmp++ = '\n';
- *tmp = 0;
- return tmp;
- }
- switch (d_type) {
- default:
- case DT_UNKNOWN:
- case DT_WHT:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = '?';
- break;
-
- case DT_FIFO:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 'p';
- break;
-
- case DT_CHR:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 'c';
- break;
-
- case DT_DIR:
- *tmp++ = 'd';
- break;
-
- case DT_BLK:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 'b';
- break;
-
- case DT_REG:
- *tmp++ = '-';
- break;
-
- case DT_LNK:
- *tmp++ = 'l';
- break;
-
- case DT_SOCK:
- if (tux_hide_unreadable)
- goto out_dput;
- *tmp++ = 's';
- break;
- }
-
- if (inode->i_mode & S_IRUSR) *tmp++ = 'r'; else *tmp++ = '-';
- if (inode->i_mode & S_IWUSR) *tmp++ = 'w'; else *tmp++ = '-';
- if (inode->i_mode & S_IXUSR) *tmp++ = 'x'; else *tmp++ = '-';
- if (inode->i_mode & S_IRGRP) *tmp++ = 'r'; else *tmp++ = '-';
- if (inode->i_mode & S_IWGRP) *tmp++ = 'w'; else *tmp++ = '-';
- if (inode->i_mode & S_IXGRP) *tmp++ = 'x'; else *tmp++ = '-';
- if (inode->i_mode & S_IROTH) *tmp++ = 'r'; else *tmp++ = '-';
- if (inode->i_mode & S_IWOTH) *tmp++ = 'w'; else *tmp++ = '-';
- if (inode->i_mode & S_IXOTH) *tmp++ = 'x'; else *tmp++ = '-';
-
- *tmp++ = ' ';
-
- size = sprintf(tmp, "%4i %d", inode->i_nlink, inode->i_uid);
- tmp += size;
-
- size = 14 - size;
- if (size <= 0)
- size = 1;
- memset(tmp, ' ', size);
- tmp += size;
-
- size = sprintf(tmp, "%d", inode->i_gid);
- tmp += size;
-
- size = 9 - size;
- if (size <= 0)
- size = 1;
- memset(tmp, ' ', size);
- tmp += size;
-
- tmp += sprintf(tmp, "%8Li", inode->i_size);
- *tmp++ = ' ';
-
- tmp += time_unix2ls(inode->i_mtime.tv_sec, tmp);
- *tmp++ = ' ';
-
- memcpy(tmp, d_name, d_len);
- tmp += d_len;
-
- if (d_type == DT_LNK) {
- int len = 0, max_len;
- #define ARROW " -> "
-
- memcpy(tmp, ARROW, sizeof(ARROW)-1);
- tmp += sizeof(ARROW)-1;
- max_len = MAX_OBJECTNAME_LEN-(tmp-string0);
- if (inode->i_op && inode->i_op->readlink) {
- mm_segment_t oldmm;
-
- oldmm = get_fs(); set_fs(KERNEL_DS);
- set_fs(KERNEL_DS);
- len = inode->i_op->readlink(dentry, tmp, max_len);
- set_fs(oldmm);
- }
- if (len > 0)
- tmp += len;
- else
- Dprintk("hm, readlink() returned %d.\n", len);
- }
- *tmp++ = '\r';
- *tmp++ = '\n';
- *tmp = 0;
-
- return tmp;
-out_dput:
- return NULL;
-}
-
-static void ftp_do_ls_onefile (tux_req_t *req, int cachemiss)
-{
- char string0[MAX_OBJECTNAME_LEN+200], *tmp;
-
- tmp = ftp_print_dir_line(req, string0, req->objectname, req->objectname_len,
-DT_REG, req->dentry, req->dentry->d_inode);
- if (!tmp) {
- req_err(req);
- add_req_to_workqueue(req);
- return;
- }
- if (tmp - string0 >= MAX_OBJECTNAME_LEN+200)
- BUG();
- __ftp_send_async_message(req, string0, 200, tmp - string0);
-}
-
-static void ftp_lookup_listfile (tux_req_t *req, int cachemiss)
-{
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- int err;
-
- Dprintk("ftp_lookup_listfile(%p, %d, {%s})\n", req, cachemiss, req->objectname);
- dentry = tux_lookup(req, req->objectname, flag, &mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- if (cachemiss)
- TUX_BUG();
- add_tux_atom(req, ftp_lookup_listfile);
- queue_cachemiss(req);
- return;
- }
- goto out_err;
- }
-
- if (S_ISDIR(dentry->d_inode->i_mode)) {
- err = permission(dentry->d_inode, MAY_EXEC, NULL);
- if (err) {
- Dprintk("Directory permission error: %d.\n", err);
- goto out_err_put;
- }
- install_req_dentry(req, dentry, mnt);
-
- add_tux_atom(req, ftp_do_ls_end);
- if (!req->cwd_dentry)
- TUX_BUG();
- add_tux_atom(req, list_directory);
- } else {
- install_req_dentry(req, dentry, mnt);
-
- add_tux_atom(req, ftp_do_ls_end);
- add_tux_atom(req, ftp_do_ls_onefile);
- }
-
- add_tux_atom(req, ftp_do_ls_start);
- add_tux_atom(req, ftp_wait_syn);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, WRITE_LIST, 200);
- return;
-
-out_err_put:
- dput(dentry);
- mntput(mnt);
-out_err:
- ftp_send_async_message(req, BAD_FILENAME, 550);
-}
-
-static void ftp_execute_command (tux_req_t *req, int cachemiss)
-{
- if (!req->parsed_len)
- TUX_BUG();
- trunc_headers(req);
- req->keep_alive = 1;
-
- switch (req->ftp_command) {
-
-#define ABORTED \
- "226 Abort successful.\r\n"
-
- case FTP_COMM_ABOR:
- {
- zap_data_socket(req);
- ftp_send_async_message(req, ABORTED, 226);
- break;
- }
-
- case FTP_COMM_PWD:
- {
- unsigned int str_len;
- char *buf, *path;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf) {
- req_err(req);
- ftp_send_async_message(req, LIST_ERR_MEM, 200);
- GOTO_ERR;
- }
-
- if (!req->cwd_dentry) {
- req->cwd_dentry = dget(req->docroot_dentry);
- req->cwd_mnt = mntget(req->docroot_mnt);
- }
-
-// "257 "/" is current directory.\r\n"
-
-#define PART_1 "257 \""
-#define PART_1_LEN (sizeof(PART_1)-1)
-
-#define PART_3 "\" is current directory.\r\n"
-#define PART_3_LEN sizeof(PART_3)
-
- path = tux_print_path(req, req->cwd_dentry, req->cwd_mnt,
- buf+PART_1_LEN, PAGE_SIZE - PART_3_LEN - PART_1_LEN);
-
- if (path < buf + PART_1_LEN)
- BUG();
-
- memcpy(path - PART_1_LEN, PART_1, PART_1_LEN);
- memcpy(buf + PAGE_SIZE-PART_3_LEN-1, PART_3, PART_3_LEN);
- str_len = buf + PAGE_SIZE-1 - (path - PART_1_LEN) - 1;
-
- __ftp_send_async_message(req, path - PART_1_LEN, 226, str_len);
- free_page((unsigned long)buf);
- break;
- }
-
- case FTP_COMM_CDUP:
- {
- memcpy(req->objectname, "..", 3);
- req->objectname_len = 2;
- req->uri_str = req->objectname;
- req->uri_len = req->objectname_len;
-
- // fall through to CWD:
- }
- case FTP_COMM_CWD:
- {
- ftp_chdir(req, cachemiss);
- break;
- }
-
- case FTP_COMM_NLST:
- case FTP_COMM_LIST:
- {
- if (!req->data_sock) {
- req_err(req);
- ftp_send_async_message(req, LIST_ERR, 200);
- GOTO_ERR;
- }
- if (req->dentry)
- TUX_BUG();
- if (!req->cwd_dentry) {
- req->cwd_dentry = dget(req->docroot_dentry);
- req->cwd_mnt = mntget(req->docroot_mnt);
- }
- if (req->objectname_len)
- ftp_lookup_listfile(req, cachemiss);
- else {
- dget(req->cwd_dentry);
- mntget(req->cwd_mnt);
- install_req_dentry(req, req->cwd_dentry, req->cwd_mnt);
- if (!req->dentry)
- TUX_BUG();
- add_tux_atom(req, ftp_do_ls_end);
- if (!req->cwd_dentry)
- TUX_BUG();
- add_tux_atom(req, list_directory);
- add_tux_atom(req, ftp_do_ls_start);
- add_tux_atom(req, ftp_wait_syn);
- add_tux_atom(req, ftp_flush_req);
- ftp_send_async_message(req, WRITE_LIST, 200);
- }
- break;
- }
-
- case FTP_COMM_RETR:
- {
- if (!req->data_sock) {
- req_err(req);
- ftp_send_async_message(req, RETR_ERR, 200);
- GOTO_ERR;
- }
- ftp_get_file(req, cachemiss);
- break;
- }
-
- case FTP_COMM_SIZE:
- {
- ftp_get_size(req, cachemiss);
- break;
- }
-
- case FTP_COMM_MDTM:
- {
- ftp_get_mdtm(req, cachemiss);
- break;
- }
-
- case FTP_COMM_PASV:
- {
- char buf [36 + 4*3 + 5 + 10];
- struct socket *data_sock;
- struct sockaddr_in addr;
- unsigned int str_len;
- struct tcp_opt *tp;
- u32 local_addr;
- int err;
-
- if (req->data_sock)
- zap_data_socket(req);
- /*
- * Create FTP data connection to client:
- */
- err = sock_create(AF_INET, SOCK_STREAM, IPPROTO_IP, &data_sock);
- if (err < 0) {
- Dprintk("sock create err: %d\n", err);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
-
- local_addr = inet_sk(req->sock->sk)->rcv_saddr;
- addr.sin_family = AF_INET;
- addr.sin_port = 0;
- addr.sin_addr.s_addr = local_addr;
- Dprintk("client address: (%d,%d,%d,%d).\n",
- NIPQUAD(inet_sk(req->sock->sk)->daddr));
-
- data_sock->sk->sk_reuse = 1;
- sock_set_flag(data_sock->sk, SOCK_URGINLINE);
- sock_reset_flag(data_sock->sk, SOCK_LINGER);
-
- err = data_sock->ops->bind(data_sock,
- (struct sockaddr*)&addr, sizeof(addr));
- tp = tcp_sk(data_sock->sk);
- tp->nonagle = 2;
- Dprintk("PASV bind() ret: %d.\n", err);
- if (err < 0) {
- req_err(req);
- sock_release(data_sock);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
-
- tp->ack.pingpong = tux_ack_pingpong;
-
- if (!tux_keepalive_timeout)
- tp->linger2 = 0;
- else
- tp->linger2 = tux_keepalive_timeout * HZ;
-
- err = data_sock->ops->listen(data_sock, 1);
- Dprintk("PASV listen() ret: %d\n", err);
- if (err) {
- req_err(req);
- sock_release(data_sock);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
- link_tux_data_socket(req, data_sock);
-
- Dprintk("FTP PASV listen sock state: %d, sk state: %d\n",
- data_sock->state, data_sock->sk->sk_state);
-
- str_len = sprintf(buf,
- "227 Entering Passive Mode (%d,%d,%d,%d,%d,%d)\r\n",
- NIPQUAD(local_addr),
- ntohs(inet_sk(data_sock->sk)->sport) / 256,
- ntohs(inet_sk(data_sock->sk)->sport) & 255 );
- Dprintk("PASV mess: {%s}\n", buf);
-
- add_tux_atom(req, ftp_accept_pasv);
- add_tux_atom(req, ftp_flush_req);
- __ftp_send_async_message(req, buf, 227, str_len);
- break;
- }
-
- case FTP_COMM_PORT:
- {
- struct socket *data_sock;
- struct sockaddr_in addr;
- kernel_cap_t saved_cap;
- u32 local_addr;
- int err;
-
- /*
- * Create FTP data connection to client:
- */
- err = sock_create(AF_INET, SOCK_STREAM, IPPROTO_IP, &data_sock);
- if (err < 0) {
- Dprintk("sock create err: %d\n", err);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
-
- local_addr = inet_sk(req->sock->sk)->rcv_saddr;
- addr.sin_family = AF_INET;
- addr.sin_port = htons(20);
- addr.sin_addr.s_addr = local_addr;
-
- Dprintk("data socket address: (%d,%d,%d,%d).\n",
- NIPQUAD(local_addr));
-
- data_sock->sk->sk_reuse = 1;
- sock_set_flag(data_sock->sk, SOCK_URGINLINE);
- sock_reset_flag(data_sock->sk, SOCK_LINGER);
-
- saved_cap = current->cap_effective;
- cap_raise (current->cap_effective, CAP_NET_BIND_SERVICE);
- err = data_sock->ops->bind(data_sock,
- (struct sockaddr*)&addr, sizeof(addr));
- current->cap_effective = saved_cap;
-
- Dprintk("ACTIVE bind() ret: %d.\n", err);
- if (err) {
- sock_release(data_sock);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
- tcp_sk(data_sock->sk)->nonagle = 2;
-
- link_tux_data_socket(req, data_sock);
-
- addr.sin_family = AF_INET;
- addr.sin_port = htons(req->ftp_user_port);
- addr.sin_addr.s_addr = htonl(req->ftp_user_addr);
-
- err = data_sock->ops->connect(data_sock, (struct sockaddr *) &addr, sizeof(addr), O_RDWR|O_NONBLOCK);
- if (err && (err != -EINPROGRESS)) {
- Dprintk("connect error: %d\n", err);
- zap_data_socket(req);
- req_err(req);
- ftp_send_async_message(req, CLOSE, 500);
- GOTO_ERR;
- }
- Dprintk("FTP data sock state: %d, sk state: %d\n", data_sock->state, data_sock->sk->sk_state);
- ftp_send_async_message(req, PORT_OK, 200);
- break;
- }
-
- case FTP_COMM_USER:
- {
- if (!strcmp(req->username, "ftp")
- || !strcmp(req->username, "FTP")
- || !strcmp(req->username, "anonymous")
- || !strcmp(req->username, "ANONYMOUS")) {
- unsigned int str_len;
- char login_ok [200];
-
- if (!tux_ftp_login_message) {
- ftp_send_async_message(req, LOGIN_OK_PASS, 230);
- break;
- }
- update_bandwidth(req, 0); /* get current bandwidth */
- if (nr_requests_used() == 1)
- str_len = sprintf(login_ok, LOGIN_OK_ONE,
- tux_max_connect, ftp_bandwidth);
- else
- str_len = sprintf(login_ok, LOGIN_OK,
- nr_requests_used(), tux_max_connect, ftp_bandwidth);
- __ftp_send_async_message(req, login_ok, 200, str_len);
- } else {
- clear_keepalive(req);
- ftp_send_async_message(req, LOGIN_FORBIDDEN, 530);
- }
- break;
- }
- case FTP_COMM_PASS:
- {
- ftp_send_async_message(req, LOGIN_OK_PASS, 230);
- break;
- }
- case FTP_COMM_SITE:
- {
- ftp_send_async_message(req, SITE, 214);
- break;
- }
- case FTP_COMM_SYST:
- {
- ftp_send_async_message(req, LINUX_SYST, 200);
- break;
- }
- case FTP_COMM_TYPE:
- {
- ftp_send_async_message(req, TYPE_OK, 200);
- break;
- }
-#define EXTRA_FEATURES "211-Extensions supported:\r\n SIZE\r\n MDTM\r\n211 End\r\n"
-
- case FTP_COMM_FEAT:
- {
- ftp_send_async_message(req, EXTRA_FEATURES, 211);
- break;
- }
- case FTP_COMM_HELP:
- case FTP_COMM_CLNT:
- case FTP_COMM_NOOP:
- {
- ftp_send_async_message(req, COMMAND_OK, 200);
- break;
- }
- case FTP_COMM_REST:
- {
- ftp_send_async_message(req, REST_OK, 200);
- break;
- }
- case FTP_COMM_QUIT:
- {
- clear_keepalive(req);
- ftp_send_async_message(req, BYE, 200);
- break;
- }
-
- default:
- {
- req->keep_alive = 1;
- ftp_send_async_message(req, CLOSE, 500);
- break;
- }
- }
- return;
-error:
- Dprintk("rejecting FTP session!\n");
- return;
-}
-
-
-static void ftp_timeout (tux_req_t *req, int cachemiss)
-{
- Dprintk("called ftp_timeout(%p)\n", req);
- if (req->error != TUX_ERROR_CONN_TIMEOUT)
- TUX_BUG();
- ftp_send_async_message(req, CLOSE_TIMEOUT, 421);
-}
-
-static void ftp_close (tux_req_t *req, int cachemiss)
-{
- Dprintk("called ftp_close(%p)\n", req);
- ftp_send_async_message(req, CLOSE, 500);
-}
-
-static void ftp_pre_log (tux_req_t *req)
-{
- if (tux_ftp_log_retr_only && (req->ftp_command != FTP_COMM_RETR))
- req->status = 0;
- else
- req->status = req->ftp_command;
-}
-
-tux_proto_t tux_proto_ftp = {
- defer_accept: 0,
- can_redirect: 0,
- got_request: ftp_got_request,
- parse_message: parse_ftp_message,
- illegal_request: ftp_close,
- request_timeout: ftp_timeout,
- pre_log: ftp_pre_log,
- check_req_err: ftp_check_req_err,
- print_dir_line: ftp_print_dir_line,
- name: "ftp",
-};
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * proto_http.c: HTTP application protocol support
- *
- * Right now we detect simple GET headers, anything more
- * subtle gets redirected to secondary server port.
- */
-
-#include <net/tux.h>
-#include "parser.h"
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-/*
- * Parse the HTTP message and put results into the request structure.
- * CISAPI extensions do not see the actual message buffer.
- *
- * Any perceived irregularity is honored with a redirect to the
- * secondary server - which in most cases should be Apache. So
- * if TUX gets confused by some strange request we fall back
- * to Apache to be RFC-correct.
- *
- * The parser is 'optimistic', ie. it's optimized for the case where
- * the whole message is available and correct. The parser is also
- * supposed to be 'robust', ie. it can be called multiple times with
- * an incomplete message, as new packets arrive.
- */
-
-static inline int TOHEX (char c)
-{
- switch (c) {
- case '0' ... '9': c -= '0'; break;
- case 'a' ... 'f': c -= 'a'-10; break;
- case 'A' ... 'F': c -= 'A'-10; break;
- default:
- c = -1;
- }
- return c;
-}
-
-/*
- * This function determines whether the client supports
- * gzip-type content-encoding.
- */
-static int may_gzip (const char *str, int len)
-{
- const char *tmp, *curr;
- int i;
-
- if (len <= 4)
- return 0;
- tmp = str;
- for (i = 0; i <= len-6; i++) {
- Dprintk("gzip-checking: {%s}\n", tmp);
- if (memcmp(tmp, " gzip", 5)) {
- tmp++;
- continue;
- }
- curr = tmp + 5;
-
- if (*curr == ',' || *curr == '\r')
- return 1;
- if (memcmp(curr, ";q=", 3))
- return 0;
- curr += 3;
- /*
- * Every qvalue except explicitly zero is accepted.
- * Zero values are "q=0.0", "q=0.00", "q=0.000".
- * Parsing is optimized.
- */
- if (*curr == '0') {
- curr += 2;
- if (*curr == '0') {
- curr++;
- if (*curr == ' ' || *curr == '\r')
- return 0;
- if (*curr == '0') {
- curr++;
- if (*curr == ' ' || *curr == '\r')
- return 0;
- if (*curr == '0') {
- curr++;
- if (*curr == ' ' ||
- *curr == '\r')
- return 0;
- }
- }
- }
- }
- return 1;
- }
- return 0;
-}
-
-/*
- * This function strips off 'strip_host_tail' number of hostname
- * components from the tail of the hostname.
- *
- * Eg. with a value of '1', the "somesite.hosting.com" hostname gets
- * transformed into the "somesite" string.
- */
-static void strip_hostname(tux_req_t *req)
-{
- int strip = strip_host_tail;
- int left = req->host_len;
- int component = 0;
-
- if (!strip || !left)
- return;
-
- while (--left) {
- if (req->host[left] != '.')
- continue;
- if (++component == strip)
- break;
- }
- if (!left)
- return;
- req->host[left] = 0;
- req->host_len = left;
-}
-
-static void http_lookup_vhost (tux_req_t *req, int cachemiss);
-static void http_process_message (tux_req_t *req, int cachemiss);
-
-int parse_http_message (tux_req_t *req, const int total_len)
-{
- int hexhex = 0, hex_val_0 = 0, hex_val_1 = 0;
- const char *curr, *uri, *message;
- unsigned int objectname_len, left;
- unsigned int have_r = 0;
- char c;
-
- left = total_len;
- message = req->headers;
- Dprintk("parsing request:\n---\n%s\n---\n", message);
-/*
- * RFC 2616, 5.1:
- *
- * Request-Line = Method SP Request-URI SP HTTP-Version CRLF
- */
-
- if (!total_len)
- TUX_BUG();
-
- curr = message;
-
-#define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete_message; } while (0)
-#define GOTO_REDIR do { TDprintk("redirect secondary at %s:%d.\n", __FILE__, __LINE__); goto error; } while (0)
-
-#define PRINT_MESSAGE_LEFT \
- Dprintk("message left (%d) at %s:%d:\n--->{%s}<---\n", left, __FILE__, __LINE__, curr)
-
- switch (*curr) {
- case 'G':
- if (PARSE_METHOD(req,curr,GET,left))
- break;
- GOTO_REDIR;
-
- case 'H':
- if (PARSE_METHOD(req,curr,HEAD,left))
- break;
- GOTO_REDIR;
-
- case 'P':
- if (PARSE_METHOD(req,curr,POST,left))
- break;
- if (PARSE_METHOD(req,curr,PUT,left))
- break;
- GOTO_REDIR;
-
- default:
- GOTO_REDIR;
- }
-
- req->method_str = message;
- req->method_len = curr-message-1;
-
- Dprintk("got method %d\n", req->method);
-
- PRINT_MESSAGE_LEFT;
-
- /*
- * Ok, we got one of the methods we can handle, parse
- * the URI:
- */
-
- {
- // Do not allow leading "../" and intermediate "/../"
- int dotdot = 1;
- char *tmp = req->objectname;
- int slashcheck = 1;
-
- req->uri_str = uri = curr;
-
- for (;;) {
- c = get_c(curr,left);
- if (slashcheck) {
- if (c == '/')
- continue;
- slashcheck = 0;
- }
-
- PRINT_MESSAGE_LEFT;
- if (c == ' ' || ((c == '?') && (tux_ignore_query != 1)) || c == '\r' || c == '\n')
- break;
- if (c == '#')
- GOTO_REDIR;
-
- Dprintk("hexhex: %d.\n", hexhex);
- /*
- * First handle HEX HEX encoding
- */
- switch (hexhex) {
- case 0:
- if (c == '%') {
- hexhex = 1;
- goto continue_parsing;
- }
- break;
- case 1:
- hex_val_0 = TOHEX(c);
- if (hex_val_0 < 0)
- GOTO_REDIR;
- hexhex = 2;
- goto continue_parsing;
- case 2:
- hex_val_1 = TOHEX(c);
- if (hex_val_1 < 0)
- GOTO_REDIR;
- c = (hex_val_0 << 4) | hex_val_1;
- if (!c)
- GOTO_REDIR;
- hexhex = 0;
- break;
- default:
- TUX_BUG();
- }
- if (hexhex)
- TUX_BUG();
-
- switch (dotdot) {
- case 0:
- break;
- case 1:
- if (c == '.')
- dotdot = 2;
- else
- dotdot = 0;
- break;
- case 2:
- if (c == '.')
- dotdot = 3;
- else
- dotdot = 0;
- break;
- case 3:
- if (c == '/')
- GOTO_REDIR;
- else
- dotdot = 0;
- break;
- default:
- TUX_BUG();
- }
- if (!dotdot && (c == '/'))
- dotdot = 1;
-
- *(tmp++) = c;
-continue_parsing:
- if (curr - uri >= MAX_OBJECTNAME_LEN)
- GOTO_REDIR;
- }
- PRINT_MESSAGE_LEFT;
- *tmp = 0;
-
- // handle trailing "/.."
- if (dotdot == 3)
- GOTO_REDIR;
-
- objectname_len = tmp - req->objectname;
- req->objectname_len = objectname_len;
- }
- Dprintk("got filename %s (%d)\n", req->objectname, req->objectname_len);
-
- PRINT_MESSAGE_LEFT;
-
- /*
- * Parse optional query string. Copy until end-of-string or space.
- */
- if (c == '?') {
- int query_len;
- const char *query;
-
- req->query_str = query = curr;
-
- for (;;) {
- c = get_c(curr,left);
- if (c == ' ')
- break;
- if (c == '#')
- GOTO_REDIR;
- }
- if (unlikely(tux_ignore_query == 2))
- req->query_str = NULL;
- else {
- query_len = curr-query-1;
- req->query_len = query_len;
- }
- }
- if (req->query_len)
- Dprintk("got query string %s (%d)\n", req->query_str, req->query_len);
- req->uri_len = curr-uri-1;
- if (!req->uri_len)
- GOTO_REDIR;
- Dprintk("got URI %s (%d)\n", req->uri_str, req->uri_len);
-
- PRINT_MESSAGE_LEFT;
- /*
- * Parse the HTTP version field:
- */
- req->version_str = curr;
- if (!PARSE_TOKEN(curr,"HTTP/1.",left))
- GOTO_REDIR;
-
- switch (get_c(curr,left)) {
- case '0':
- req->version = HTTP_1_0;
- break;
- case '1':
- req->version = HTTP_1_1;
- break;
- default:
- GOTO_REDIR;
- }
- /*
- * We default to keepalive in the HTTP/1.1 case and default
- * to non-keepalive in the HTTP/1.0 case. If max_keepalives
- * is 0 then we do no keepalives.
- */
- clear_keepalive(req);
- if (tux_max_keepalives && (req->version == HTTP_1_1))
- req->keep_alive = 1;
- req->version_len = curr - req->version_str;
-
- if (get_c(curr,left) != '\r')
- GOTO_REDIR;
- if (get_c(curr,left) != '\n')
- GOTO_REDIR;
-
- Dprintk("got version %d [%d]\n", req->version, req->version_len);
- PRINT_MESSAGE_LEFT;
-
- /*
- * Now parse (optional) request header fields:
- */
- for (;;) {
- char c;
-
- c = get_c(curr,left);
- switch (c) {
- case '\r':
- if (have_r)
- GOTO_REDIR;
- have_r = 1;
- continue;
- case '\n':
- if (!have_r)
- GOTO_REDIR;
- goto out;
- default:
- if (have_r)
- GOTO_REDIR;
- }
-
-#define PARSE_STR_FIELD(char,field,str,len) \
- if (PARSE_TOKEN(curr,field,left)) { \
- req->str = curr; \
- SKIP_LINE(curr,left); \
- req->len = curr - req->str - 2; \
- Dprintk(char field "field: %s.\n", req->str); \
- break; \
- }
-
-#define ALLOW_UNKNOWN_FIELDS 1
-#ifdef ALLOW_UNKNOWN_FIELDS
-# define UNKNOWN_FIELD { SKIP_LINE(curr,left); break; }
-#else
-# define UNKNOWN_FIELD GOTO_REDIR
-#endif
-
- switch (c) {
- case 'A':
- PARSE_STR_FIELD("A","ccept: ",
- accept_str,accept_len);
- if (PARSE_TOKEN(curr,"ccept-Encoding: ",left)) {
- const char *str = curr-1;
-
- req->accept_encoding_str = curr;
- SKIP_LINE(curr,left);
- req->accept_encoding_len = curr - req->accept_encoding_str - 2;
- Dprintk("Accept-Encoding field: {%s}.\n", str);
-
- if (tux_compression && may_gzip(str,curr-str)) {
- Dprintk("client accepts gzip!.\n");
- req->may_send_gzip = 1;
- }
- break;
- }
- PARSE_STR_FIELD("A","ccept-Charset: ",
- accept_charset_str,accept_charset_len);
- PARSE_STR_FIELD("A","ccept-Language: ",
- accept_language_str,accept_language_len);
- UNKNOWN_FIELD;
-
- case 'C':
- if (PARSE_TOKEN(curr,"onnection: ",left)) {
-next_token:
- switch (get_c(curr,left)) {
- case 'K':
- if (!PARSE_TOKEN(curr,"eep-Alive",left))
- GOTO_REDIR;
- if (tux_max_keepalives)
- req->keep_alive = 1;
- break;
-
- case 'C':
- case 'c':
- if (!PARSE_TOKEN(curr,"lose",left))
- GOTO_REDIR;
- clear_keepalive(req);
- break;
-
- case 'k':
- if (!PARSE_TOKEN(curr,"eep-alive",left))
- GOTO_REDIR;
- if (tux_max_keepalives)
- req->keep_alive = 1;
- break;
- case 'T':
- if (PARSE_TOKEN(curr,"E",left))
- break;
- if (PARSE_TOKEN(curr,"railers",left))
- break;
- if (PARSE_TOKEN(curr,"ransfer-Encoding",left))
- break;
- GOTO_REDIR;
- case 'P':
- if (PARSE_TOKEN(curr,"roxy-Authenticate",left))
- break;
- if (PARSE_TOKEN(curr,"roxy-Authorization",left))
- break;
- GOTO_REDIR;
- case 'U':
- if (!PARSE_TOKEN(curr,"pgrade",left))
- GOTO_REDIR;
- break;
- case ' ':
- PRINT_MESSAGE_LEFT;
- goto next_token;
- case ',':
- PRINT_MESSAGE_LEFT;
- goto next_token;
- default:
- GOTO_REDIR;
- }
- PRINT_MESSAGE_LEFT;
- if (*curr != '\r')
- goto next_token;
- // allow other tokens.
- SKIP_LINE(curr,left);
- break;
- }
-
- PARSE_STR_FIELD("C","ookie: ",
- cookies_str,cookies_len);
- PARSE_STR_FIELD("C","ontent-Type: ",
- content_type_str,content_type_len);
-
- if (PARSE_TOKEN(curr,"ontent-Length: ",left) ||
- PARSE_TOKEN(curr,"ontent-length: ",left)) {
- const char *tmp;
- req->contentlen_str = curr;
- SKIP_LINE(curr,left);
- req->contentlen_len = curr - req->contentlen_str - 2;
- if (req->contentlen_len) {
- tmp = req->contentlen_str;
- req->content_len = simple_strtoul(tmp, NULL, 10);
- }
- Dprintk("Content-Length field: %s [%d].\n", req->contentlen_str, req->contentlen_len);
- Dprintk("Content-Length value: %d.\n", req->content_len);
- break;
- }
- PARSE_STR_FIELD("C","ache-Control: ",
- cache_control_str,cache_control_len);
- UNKNOWN_FIELD;
-
- case 'H':
- if (PARSE_TOKEN(curr,"ost: ",left)) {
- const char *tmp = curr;
- char *tmp2 = req->host;
-
- /*
- * canonize the hostname:
- *
- * 1) strip off preceding 'www.' variants,
- * 2) transform it to lowercase.
- * 3) strip trailing dots
- * 4) potentially strip off tail
- */
-
-#define is_w(n) ((curr[n] == 'w') || (curr[n] == 'W'))
-
- if ((left > 4) && is_w(0) && is_w(1) &&
- is_w(2) && curr[3] == '.') {
- curr += 4;
- left -= 4;
- tmp = curr;
- }
-
- COPY_LINE_TOLOWER(curr, tmp2, left, req->host+MAX_HOST_LEN-2);
- req->host_len = curr - tmp - 2;
- while (req->host[req->host_len] == '.') {
- if (!req->host_len)
- break;
- req->host_len--;
- }
- req->host[req->host_len] = 0;
- if (strip_host_tail)
- strip_hostname(req);
- Dprintk("Host field: %s [%d].\n", req->host, req->host_len);
- break;
- }
- UNKNOWN_FIELD;
-
- case 'I':
- PARSE_STR_FIELD("I","f-None-Match: ",
- if_none_match_str,if_none_match_len);
- PARSE_STR_FIELD("I","f-Modified-Since: ",
- if_modified_since_str,if_modified_since_len);
- PARSE_STR_FIELD("I","f-Range: ",
- if_range_str,if_range_len);
- UNKNOWN_FIELD;
-
- case 'N':
- PARSE_STR_FIELD("N","egotiate: ",
- negotiate_str,negotiate_len);
- UNKNOWN_FIELD;
-
- case 'P':
- PARSE_STR_FIELD("P","ragma: ",
- pragma_str,pragma_len);
- UNKNOWN_FIELD;
-
- case 'R':
-
- PARSE_STR_FIELD("R","eferer: ",
- referer_str,referer_len);
- if (!PARSE_TOKEN(curr,"ange: bytes=",left))
- UNKNOWN_FIELD;
- {
- const char *tmp = curr;
- char *tmp2 = (char *)curr;
- unsigned int offset_start = 0, offset_end = 0;
-
- if (*tmp2 != '-')
- offset_start = simple_strtoul(tmp2, &tmp2, 10);
- if (*tmp2 == '-') {
- tmp2++;
- if (*tmp2 != '\r')
- offset_end = simple_strtoul(tmp2, &tmp2, 10) +1;
- }
- curr = tmp2;
- left -= tmp2-tmp;
-
- req->offset_start = offset_start;
- req->offset_end = offset_end;
-
- SKIP_LINE(curr,left);
- Dprintk("Range field: %s [%d] (%d-%d).\n", tmp, curr-tmp, offset_start, offset_end);
- break;
- }
-
- case 'U':
- PARSE_STR_FIELD("U","ser-Agent: ",
- user_agent_str,user_agent_len);
- UNKNOWN_FIELD;
-
- default:
- UNKNOWN_FIELD;
- }
- PRINT_MESSAGE_LEFT;
- }
-out:
- /*
- * POST data.
- */
- if ((req->method == METHOD_POST) && req->content_len) {
- PRINT_MESSAGE_LEFT;
- if (curr + req->content_len > message + total_len)
- GOTO_INCOMPLETE;
- req->post_data_str = curr;
- req->post_data_len = req->content_len;
- curr += req->content_len;
- left -= req->content_len;
- Dprintk("POST-ed data: {%s}\n", req->post_data_str);
- }
-
- switch (req->method) {
- default:
- GOTO_REDIR;
- case METHOD_GET:
- case METHOD_HEAD:
- case METHOD_POST:
- case METHOD_PUT:
- ;
- }
-
-#define TUX_SCHEME "http://"
-#define TUX_SCHEME_LEN (sizeof(TUX_SCHEME)-1)
-
- if (!memcmp(req->objectname, TUX_SCHEME, TUX_SCHEME_LEN)) {
-
- /* http://user:password@host:port/object */
-
- const char *head, *tail, *end, *host, *port;
- int host_len, objectname_len;
-
- head = req->objectname + TUX_SCHEME_LEN;
- end = req->objectname + req->objectname_len;
-
- tail = memchr(head, '/', end - head);
- if (!tail)
- GOTO_REDIR;
- host = memchr(head, '@', tail - head);
- if (!host)
- host = head;
- else
- host++;
- if (!*host)
- GOTO_REDIR;
- port = memchr(host, ':', tail - host);
- if (port)
- host_len = port - host;
- else
- host_len = tail - host;
- if (host_len >= MAX_HOST_LEN)
- GOTO_REDIR;
- memcpy(req->host, host, host_len);
- req->host_len = host_len;
- req->host[host_len] = 0;
-
- if (*tail != '/')
- TUX_BUG();
-
- req->uri_str = tail;
- req->uri_len = end - tail;
-
- tail++;
- while (*tail == '/')
- tail++;
-
- objectname_len = end - tail;
- memcpy(req->objectname, tail, objectname_len);
- req->objectname_len = objectname_len;
- req->objectname[objectname_len] = 0;
- } else
- if (req->uri_str[0] != '/')
- GOTO_REDIR;
-
- if ((req->version == HTTP_1_1) && !req->host_len)
- GOTO_REDIR;
- if (req->objectname[0] == '/')
- GOTO_REDIR;
- /*
- * Lets make sure nobody plays games with the host
- * header in a virtual hosting environment:
- */
- if (req->virtual && req->host_len) {
- if (memchr(req->host, '/', req->host_len))
- GOTO_REDIR;
- if (req->host[0] == '.') {
- if (req->host_len == 1)
- GOTO_REDIR;
- if ((req->host_len == 2) && (req->host[0] == '.'))
- GOTO_REDIR;
- }
- }
- /*
- * From this point on the request is for the main TUX engine:
- */
- Dprintk("ok, request accepted.\n");
-
- if (req->keep_alive) {
- req->nr_keepalives++;
- if (req->nr_keepalives == -1)
- req->nr_keepalives--;
- INC_STAT(nr_keepalive_reqs);
- } else
- INC_STAT(nr_nonkeepalive_reqs);
- INC_STAT(keepalive_hist[req->nr_keepalives]);
-
- PRINT_MESSAGE_LEFT;
- req->parsed_len = curr-message;
- if (req->dentry)
- TUX_BUG();
- req->virtual = tux_virtual_server;
- if (req->virtual)
- add_tux_atom(req, http_lookup_vhost);
- else {
- req->docroot_dentry = dget(req->proto->main_docroot.dentry);
- req->docroot_mnt = mntget(req->proto->main_docroot.mnt);
- add_tux_atom(req, http_process_message);
- }
-
- return req->parsed_len;
-
-incomplete_message:
- Dprintk("incomplete message!\n");
- PRINT_MESSAGE_LEFT;
-
- return 0;
-
-error:
- if (total_len > 0)
- req->parsed_len = total_len;
- else
- req->parsed_len = 0;
- PRINT_MESSAGE_LEFT;
- if (tux_TDprintk) {
- TDprintk("redirecting message to secondary server.\n");
- print_req(req);
- }
- return -1;
-}
-
-static int lookup_url (tux_req_t *req, const unsigned int flag)
-{
- /*
- * -1 : no previous checks made
- * 0 : previous check failed, do not check farther,
- * 1 : previous check successed, check farther
- */
- int not_modified = -1;
- int perm = 0, i;
- struct dentry *dentry = NULL;
- struct vfsmount *mnt = NULL;
- struct inode *inode;
- const char *filename;
-
- /*
- * Do not do any etag or last_modified header checking
- * if both unset.
- */
- if (!tux_generate_etags && !tux_generate_last_mod)
- not_modified = 0;
-
-repeat_lookup:
- if (req->dentry)
- TUX_BUG();
-
- filename = req->objectname;
- Dprintk("will look up {%s} (%d)\n", filename, req->objectname_len);
- Dprintk("current->fsuid: %d, current->fsgid: %d, ngroups: %d\n",
- current->fsuid, current->fsgid, current->group_info->ngroups);
- for (i = 0; i < current->group_info->ngroups; i++)
- Dprintk(".. group #%d: %d.\n", i, current->groups[i]);
-
- dentry = tux_lookup(req, filename, flag, &mnt);
-
-#define INDEX "/index.html"
-
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- goto cachemiss;
-
- if (tux_http_dir_indexing && (req->lookup_dir == 1)) {
- // undo the index.html appending:
- req->objectname_len -= sizeof(INDEX)-1;
- req->objectname[req->objectname_len] = 0;
- req->lookup_dir = 2;
- goto repeat_lookup;
- }
- if (!req->lookup_404) {
- int len = strlen(tux_404_page);
- memcpy(req->objectname, tux_404_page, len);
- req->objectname[len] = 0;
- req->objectname_len = len;
- req->lookup_404 = 1;
- req->status = 404;
- goto repeat_lookup;
- }
- TDprintk("abort - lookup error.\n");
- goto abort;
- }
-
- Dprintk("SUCCESS, looked up {%s} == dentry %p (inode %p, count %d.)\n", filename, dentry, dentry->d_inode, atomic_read(&dentry->d_count));
- inode = dentry->d_inode;
-
- /*
- * At this point we have a real, non-negative dentry.
- */
- perm = tux_permission(inode);
-
- if ((perm < 0) || (!S_ISDIR(dentry->d_inode->i_mode)
- && !S_ISREG(dentry->d_inode->i_mode))) {
- Dprintk("FAILED trusted dentry %p permission %d.\n", dentry, perm);
- req->status = 403;
- goto abort;
- }
- if ((req->lookup_dir != 2) && S_ISDIR(dentry->d_inode->i_mode)) {
- if (req->lookup_dir || (req->objectname_len +
- sizeof(INDEX) >= MAX_OBJECTNAME_LEN)) {
- req->status = 403;
- goto abort;
- }
- if (req->objectname_len && (req->objectname[req->objectname_len-1] != '/')) {
- dput(dentry);
- mntput(mnt);
- req->lookup_dir = 0;
- return 2;
- }
- memcpy(req->objectname + req->objectname_len,
- INDEX, sizeof(INDEX));
- req->objectname_len += sizeof(INDEX)-1;
- req->lookup_dir = 1;
- dput(dentry);
- mntput(mnt);
- mnt = NULL;
- dentry = NULL;
- goto repeat_lookup;
- }
- if (tux_max_object_size && (inode->i_size > tux_max_object_size)) {
- TDprintk("too big object, %Ld bytes.\n", inode->i_size);
- req->status = 403;
- goto abort;
- }
- req->total_file_len = inode->i_size;
- req->mtime = inode->i_mtime.tv_sec;
-
- {
- loff_t num = req->total_file_len;
- int nr_digits = 0;
- unsigned long modulo;
- char * etag_p = req->etag;
- char digits [30];
-
- do {
- modulo = do_div(num, 10);
- digits[nr_digits++] = '0' + modulo;
- } while (num);
-
- req->lendigits = nr_digits;
- req->etaglen = nr_digits;
-
- while (nr_digits)
- *etag_p++ = digits[--nr_digits];
-
- *etag_p++ = '-';
- num = req->mtime;
- nr_digits = 0;
-
- do {
- digits[nr_digits++] = 'a' + num % 16;
- num /= 16;
- } while (num);
- req->etaglen += nr_digits+1;
- while (nr_digits)
- *etag_p++ = digits[--nr_digits];
- *etag_p = 0;
- }
-
- if ((req->if_none_match_len >= req->etaglen) && (abs(not_modified) == 1)) {
-
- char * etag_p = req->etag;
- const char * match_p = req->if_none_match_str;
- int pos = req->etaglen - 1;
- int matchpos = req->etaglen - 1;
-
- do {
- while (etag_p[matchpos--] == match_p[pos--])
- if (matchpos < 0)
- break;
- if (matchpos < 0)
- pos = req->if_none_match_len;
- else {
- if (match_p[pos+1] == ',')
- pos += req->etaglen + 2;
- else
- pos += req->etaglen-matchpos;
- matchpos = req->etaglen - 1;
- }
- } while (pos < req->if_none_match_len);
-
- if (matchpos < 0) {
- not_modified = 1;
- TDprintk("Etag matched.\n");
- } else
- not_modified = 0;
- }
-
- if ((req->if_modified_since_len >= 24) && (abs(not_modified) == 1)) {
- if (parse_time(req->if_modified_since_str, req->if_modified_since_len) >= req->mtime ) {
- not_modified = 1;
- Dprintk("Last-Modified matched.\n");
- } else
- not_modified = 0;
- }
-
- if (not_modified == 1) {
- req->status = 304;
- goto abort;
- }
-
- Dprintk("looked up cached dentry %p, (count %d.)\n", dentry, dentry ? atomic_read(&dentry->d_count) : -1 );
-
- url_hist_hit(req->total_file_len);
-out:
- install_req_dentry(req, dentry, mnt);
- req->lookup_dir = 0;
- return 0;
-
-cachemiss:
- return 1;
-
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
-#if CONFIG_TUX_DEBUG
- if (!not_modified) {
- TDprintk("req %p has lookup errors!\n", req);
- if (tux_TDprintk)
- print_req(req);
- }
-#endif
- req_err(req);
- goto out;
-}
-
-int handle_gzip_req (tux_req_t *req, unsigned int flags)
-{
- char *curr = req->objectname + req->objectname_len;
- struct dentry *dentry;
- struct vfsmount *mnt = NULL;
- struct inode *inode, *orig_inode;
- loff_t size, orig_size;
-
- *curr++ = '.';
- *curr++ = 'g';
- *curr++ = 'z';
- *curr++ = 0;
- req->objectname_len += 3;
-
- dentry = tux_lookup(req, req->objectname, flags, &mnt);
-
- req->objectname_len -= 3;
- req->objectname[req->objectname_len] = 0;
-
- if (!dentry)
- return 0;
- if (IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- release_req_dentry(req);
- return 1;
- }
- return 0;
- }
-
- inode = dentry->d_inode;
- size = inode->i_size;
- orig_inode = req->dentry->d_inode;
- orig_size = orig_inode->i_size;
-
- if (!tux_permission(inode)
- && (size < orig_size)
- && (inode->i_mtime.tv_sec >= orig_inode->i_mtime.tv_sec)) {
-
- release_req_dentry(req);
- install_req_dentry(req, dentry, mnt);
- req->total_file_len = req->output_len = size;
- Dprintk("content WILL be gzipped!\n");
- req->content_gzipped = 1;
- } else {
- dput(dentry);
- mntput(mnt);
- }
-
- return 0;
-}
-
-static spinlock_t mimetypes_lock = SPIN_LOCK_UNLOCKED;
-
-static LIST_HEAD(mimetypes_head);
-
-static mimetype_t default_mimetype = { type: "text/plain", type_len: 10, expire_str: "", expire_str_len: 0 };
-
-#define MAX_MIMETYPE_LEN 128
-#define MAX_CACHE_CONTROL_AGE_LEN 30
-
-void add_mimetype (char *new_ext, char *new_type, char *new_expire)
-{
- int type_len = strlen(new_type);
- int ext_len = strlen(new_ext);
- int expire_len = strlen(new_expire);
- mimetype_t *mime;
- char *ext, *type, *expire;
-
- if (type_len > MAX_MIMETYPE_LEN)
- type_len = MAX_MIMETYPE_LEN;
- if (ext_len > MAX_URI_LEN)
- ext_len = MAX_URI_LEN;
- if (expire_len > MAX_CACHE_CONTROL_AGE_LEN)
- expire_len = MAX_CACHE_CONTROL_AGE_LEN;
-
- mime = tux_kmalloc(sizeof(*mime));
- memset(mime, 0, sizeof(*mime));
- ext = tux_kmalloc(ext_len + 1);
- type = tux_kmalloc(type_len + 1);
- expire = tux_kmalloc(expire_len + 1);
-
- strncpy(ext, new_ext, ext_len);
- strncpy(type, new_type, type_len);
- strncpy(expire, new_expire, expire_len);
-
- // in case one of the above parameters was too long :
-
- ext[ext_len] = '\0';
- type[type_len] = '\0';
- expire[expire_len] = '\0';
-
- mime->ext = ext;
- mime->ext_len = ext_len;
-
- mime->type = type;
- mime->type_len = type_len;
-
- mime->expire_str = expire;
- mime->expire_str_len = expire_len;
-
- mime->special = NORMAL_MIME_TYPE;
- if (!strcmp(type, "TUX/redirect"))
- mime->special = MIME_TYPE_REDIRECT;
- if (!strcmp(type, "TUX/CGI"))
- mime->special = MIME_TYPE_CGI;
- if (!strcmp(type, "TUX/module"))
- mime->special = MIME_TYPE_MODULE;
-
- spin_lock(&mimetypes_lock);
- list_add(&mime->list, &mimetypes_head);
- spin_unlock(&mimetypes_lock);
-}
-
-static inline int ext_matches (char *file, int len, char *ext, int extlen)
-{
- int i;
- char *tmp = file + len-1;
- char *tmp2 = ext + extlen-1;
-
- if (len < extlen)
- return 0;
-
- for (i = 0; i < extlen; i++) {
- if (*tmp != *tmp2)
- return 0;
- tmp--;
- tmp2--;
- }
- return 1;
-}
-
-/*
- * Overhead is not a problem, we cache the MIME type
- * in the dentry.
- */
-static mimetype_t * lookup_mimetype (tux_req_t *req)
-{
- char *objectname = req->objectname;
- int len = req->objectname_len;
- mimetype_t *mime = NULL;
- struct list_head *head, *tmp, *tmp1, *tmp2, *tmp3;
-
- if (!memchr(objectname, '.', len))
- goto out;
-
- spin_lock(&mimetypes_lock);
- head = &mimetypes_head;
- tmp = head->next;
-
- while (tmp != head) {
- mime = list_entry(tmp, mimetype_t, list);
- if (ext_matches(objectname, len, mime->ext, mime->ext_len)) {
- /*
- * Percolate often-used mimetypes up:
- */
- if (tmp->prev != &mimetypes_head) {
- tmp1 = tmp;
- tmp2 = tmp->prev;
- tmp3 = tmp->prev->prev;
- list_del(tmp1);
- list_del(tmp2);
- list_add(tmp, tmp3);
- list_add(tmp2, tmp);
- }
- break;
- } else
- mime = NULL;
- tmp = tmp->next;
- }
- spin_unlock(&mimetypes_lock);
-
-out:
- if (!mime)
- mime = &default_mimetype;
- return mime;
-}
-
-void free_mimetypes (void)
-{
- struct list_head *head, *tmp, *next;
- mimetype_t *mime;
-
- spin_lock(&mimetypes_lock);
- head = &mimetypes_head;
- tmp = head->next;
-
- while (tmp != head) {
- next = tmp->next;
- mime = list_entry(tmp, mimetype_t, list);
- list_del(tmp);
-
- kfree(mime->ext);
- mime->ext = NULL;
- kfree(mime->type);
- mime->type = NULL;
- kfree(mime);
-
- tmp = next;
- }
- spin_unlock(&mimetypes_lock);
-}
-
-/*
- * Various constant HTTP responses:
- */
-
-static const char forbidden[] =
- "HTTP/1.1 403 Forbidden\r\n"
- "Connection: Keep-Alive\r\n" \
- "Content-Length: 24\r\n\r\n"
- "<HTML> Forbidden </HTML>";
-
-static const char not_found[] =
- "HTTP/1.1 404 Not Found\r\n"
- "Connection: Keep-Alive\r\n" \
- "Content-Length: 29\r\n\r\n"
- "<HTML> Page Not Found </HTML>";
-
-#define NOTMODIFIED_1 \
- "HTTP/1.1 304 Not Modified\r\n" \
- "Connection: Keep-Alive\r\n" \
- "Date: "
-
-#define NOTMODIFIED_1_LEN (sizeof(NOTMODIFIED_1) - 1)
-
-#define NOTMODIFIED_2 \
- "\r\nETag: \""
-
-#define NOTMODIFIED_2_LEN (sizeof(NOTMODIFIED_2) - 1)
-
-#define NOTMODIFIED_3 \
- "\"\r\n\r\n"
-
-#define NOTMODIFIED_3_LEN (sizeof(NOTMODIFIED_3) - 1)
-
-#define REDIRECT_1 \
- "HTTP/1.1 301 Moved Permanently\r\n" \
- "Location: http://"
-
-#define REDIRECT_1_LEN (sizeof(REDIRECT_1) - 1)
-
-#define REDIRECT_2 \
- "/\r\nContent-Length: 36\r\n" \
- "Connection: Keep-Alive\r\n" \
- "Content-Type: text/html\r\n\r\n" \
- "<HTML> 301 Moved Permanently </HTML>"
-
-#define REDIRECT_2_LEN (sizeof(REDIRECT_2) - 1)
-
-void send_async_err_forbidden (tux_req_t *req)
-{
- send_async_message(req, forbidden, 403, 1);
-}
-
-void send_async_err_not_found (tux_req_t *req)
-{
- send_async_message(req, not_found, 404, 1);
-}
-
-static void send_ret_notmodified (tux_req_t *req)
-{
- char *buf;
- int size;
-
- size = NOTMODIFIED_1_LEN + DATE_LEN - 1 + NOTMODIFIED_2_LEN + req->etaglen + NOTMODIFIED_3_LEN;
- buf = get_abuf(req, size);
- memcpy(buf, NOTMODIFIED_1, NOTMODIFIED_1_LEN);
- buf += NOTMODIFIED_1_LEN;
- memcpy(buf, tux_date, DATE_LEN-1);
- buf += DATE_LEN-1;
- memcpy(buf, NOTMODIFIED_2, NOTMODIFIED_2_LEN);
- buf += NOTMODIFIED_2_LEN;
- memcpy(buf, &req->etag, req->etaglen);
- buf += req->etaglen;
- memcpy(buf, NOTMODIFIED_3, NOTMODIFIED_3_LEN);
- buf += NOTMODIFIED_3_LEN;
-
- req->status = 304;
- send_abuf(req, size, MSG_DONTWAIT);
- add_req_to_workqueue(req);
-}
-
-static void send_ret_redirect (tux_req_t *req, int cachemiss)
-{
- char *buf;
- unsigned int size;
- unsigned int uts_len = 0;
-
- size = REDIRECT_1_LEN;
- if (req->host_len)
- size += req->host_len;
- else {
- down_read(&uts_sem);
- uts_len = strlen(system_utsname.nodename);
- size += uts_len;
- }
- if (req->objectname[0] != '/')
- size++;
- size += req->objectname_len;
- size += REDIRECT_2_LEN;
-
- if (size > PAGE_SIZE) {
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return;
- }
-
- buf = get_abuf(req, size);
-
- memcpy(buf, REDIRECT_1, REDIRECT_1_LEN);
- buf += REDIRECT_1_LEN;
-
- Dprintk("req %p, host: %s, host_len: %d.\n", req, req->host, req->host_len);
- if (req->host_len) {
- memcpy(buf, req->host, req->host_len);
- buf += req->host_len;
- } else {
- memcpy(buf, system_utsname.nodename, uts_len);
- up_read(&uts_sem);
- buf += uts_len;
- }
- if (req->objectname[0] != '/') {
- buf[0] = '/';
- buf++;
- }
-
- memcpy(buf, req->objectname, req->objectname_len);
- buf += req->objectname_len;
-
- memcpy(buf, REDIRECT_2, REDIRECT_2_LEN);
- buf += REDIRECT_2_LEN;
-
- req->status = 301;
- send_abuf(req, size, MSG_DONTWAIT);
- add_req_to_workqueue(req);
-}
-
-static void http_got_request (tux_req_t *req)
-{
- req->host[0] = 0;
- req->host_len = 0;
- add_tux_atom(req, parse_request);
- add_req_to_workqueue(req);
-}
-
-
-tux_attribute_t * lookup_tux_attribute (tux_req_t *req)
-{
- tux_attribute_t *attr;
- struct inode *inode;
- mimetype_t *mime;
-
- attr = tux_kmalloc(sizeof(*attr));
- memset(attr, 0, sizeof(*attr));
-
- mime = lookup_mimetype(req);
-
- inode = req->dentry->d_inode;
- if (!inode->i_uid && !inode->i_gid) {
- if (mime->special == MIME_TYPE_MODULE) {
- attr->tcapi = lookup_tuxmodule(req->objectname);
- if (!attr->tcapi) {
- req_err(req);
- mime = &default_mimetype;
- }
- }
- } else {
- if (mime->special && (mime->special != MIME_TYPE_REDIRECT))
- mime = &default_mimetype;
- }
- attr->mime = mime;
-
- return attr;
-}
-
-static void handle_range(tux_req_t *req)
-{
- if (req->if_range_len) {
- time_t range_time;
-
- range_time = parse_time(req->if_range_str, req->if_range_len);
-
- /*
- * If the file is newer then we send the whole file.
- */
- if (range_time < req->mtime )
- goto out_no_range;
- }
- /* if no offset_end was specified then default to 'end of file': */
- if (!req->offset_end)
- req->offset_end = req->total_file_len;
- /*
- * Sanity checks:
- *
- * - is the range between 0...file_len-1 ?
- * - is offset_end after offset_start?
- *
- * (note that offset_end is higher by 1)
- */
- if ((req->offset_end > req->total_file_len) ||
- (req->offset_start >= req->total_file_len) ||
- (req->offset_end <= req->offset_start))
- goto out_no_range;
- /*
- * If the range is 0...file_len-1 then send the whole file:
- */
- if (!req->offset_start && (req->offset_end == req->total_file_len))
- goto out_no_range;
-
- /* ok, the range is valid, use it: */
-
- req->output_len = req->offset_end - req->offset_start;
- req->in_file.f_pos = req->offset_start;
- return;
-
-out_no_range:
- req->offset_start = 0;
- req->offset_end = 0;
-}
-
-static void http_pre_header (tux_req_t *req, int push);
-static void http_post_header (tux_req_t *req, int cachemiss);
-static void http_send_body (tux_req_t *req, int cachemiss);
-
-#define DIRLIST_HEAD_1 "\
-<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\
-<HTML><HEAD><TITLE>Index of %s</TITLE></HEAD><BODY>\
-<H1>Index of %s </H1><PRE><HR>\n%s"
-
-#define DIRLIST_HEAD_2 "\
-<IMG SRC=\"/icons/back.gif\"ALT=\"[DIR]\"> <A HREF=\"../\">Parent Directory</A>\n"
-
-#define DIRLIST_HEAD_SIZE (sizeof(DIRLIST_HEAD_1) + sizeof(DIRLIST_HEAD_2))
-
-static void http_dirlist_head (tux_req_t *req, int cachemiss)
-{
- char *buf1, *buf2, *path;
- int len;
-
- buf1 = (char *)__get_free_page(GFP_KERNEL);
- buf2 = (char *)__get_free_page(GFP_KERNEL);
- if (!buf1 || !buf2)
- goto out;
- path = tux_print_path(req, req->dentry, req->mnt, buf1, PAGE_SIZE);
- if (path[0] == '/' && path[1] == '/' && !path[3])
- path = "/";
- if (2*strlen(path) + DIRLIST_HEAD_SIZE >= PAGE_SIZE)
- goto out;
- len = sprintf(buf2, DIRLIST_HEAD_1, path, path, req->dentry == req->docroot_dentry ? "" : DIRLIST_HEAD_2);
- __send_async_message(req, buf2, 200, len, 0);
-
-out:
- if (buf1)
- free_page((unsigned long)buf1);
- if (buf2)
- free_page((unsigned long)buf2);
-}
-
-#define DIRLIST_TAIL "\
-</PRE><HR><ADDRESS><IMG SRC=\"/icons/tuxlogo.gif\"ALIGN=\"MIDDLE\"ALT=\"[TUX]\">Powered by Linux/TUX 3.0</ADDRESS>\n</BODY></HTML>"
-
-static void http_dirlist_tail (tux_req_t *req, int cachemiss)
-{
- __send_async_message(req, DIRLIST_TAIL, 200, sizeof(DIRLIST_TAIL)-1, 1);
-}
-
-static void http_dirlist (tux_req_t *req, int cachemiss)
-{
- int head = (req->method == METHOD_HEAD);
-
- req->lookup_dir = 3;
- clear_keepalive(req);
- if (!head) {
- add_tux_atom(req, http_dirlist_tail);
- add_tux_atom(req, list_directory);
- add_tux_atom(req, http_dirlist_head);
- }
- http_pre_header(req, head);
- add_req_to_workqueue(req);
-}
-
-static char *host_path_hash(tux_req_t *req, char *tmp)
-{
- if (req->host_len < 2)
- return NULL;
-
- switch (mass_hosting_hash) {
- default:
- case 0:
- return req->host;
- case 1:
-
- // www.ABCDEFG.com => A/ABCDEFG.com
-
- tmp[0] = req->host[0];
- tmp[1] = '/';
- memcpy(tmp + 2, req->host, req->host_len);
- tmp[req->host_len + 2] = 0;
-
- return tmp;
- case 2:
- // www.ABCDEFG.com => A/AB/ABCDEFG.com
-
- tmp[0] = req->host[0];
- tmp[1] = '/';
- tmp[2] = req->host[0];
- tmp[3] = req->host[1];
- tmp[4] = '/';
- memcpy(tmp + 5, req->host, req->host_len);
- tmp[req->host_len + 5] = 0;
-
- return tmp;
- case 3:
- // www.ABCDEFG.com => A/AB/ABC/ABCDEFG.com
-
- tmp[0] = req->host[0];
- tmp[1] = '/';
- tmp[2] = req->host[0];
- tmp[3] = req->host[1];
- tmp[4] = '/';
- tmp[5] = req->host[0];
- tmp[6] = req->host[1];
- tmp[7] = req->host[2];
- tmp[8] = '/';
- memcpy(tmp + 9, req->host, req->host_len);
- tmp[req->host_len + 9] = 0;
-
- return tmp;
- }
-}
-
-static struct dentry * vhost_lookup (tux_req_t *req, struct nameidata* base, struct vfsmount **mnt)
-{
- struct dentry *dentry = NULL;
- // 255.255.255.255
- char ip [3+1+3+1+3+1+3 + 2];
-
- if (req->virtual >= TUX_VHOST_IP) {
- sprintf(ip, "%d.%d.%d.%d",
- NIPQUAD(inet_sk(req->sock->sk)->rcv_saddr));
- dentry = __tux_lookup (req, ip, base, mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- return dentry;
- base->dentry = dget(req->proto->main_docroot.dentry);
- base->mnt = mntget(req->proto->main_docroot.mnt);
- goto lookup_default;
- }
- if (req->virtual == TUX_VHOST_IP)
- goto done;
-
- // fall through in mixed mode:
- }
-
- if (!req->host_len) {
-lookup_default:
- *mnt = NULL;
- dentry = __tux_lookup (req, tux_default_vhost, base, mnt);
- } else {
- char tmp [MAX_HOST_LEN*2];
- char *host_path;
-
- host_path = host_path_hash(req, tmp);
- Dprintk("host path hash returned: {%s}\n", host_path);
-
- dentry = NULL;
- if (host_path) {
- *mnt = NULL;
- dentry = __tux_lookup (req, host_path, base, mnt);
- }
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- return dentry;
- base->dentry = dget(req->proto->main_docroot.dentry);
- base->mnt = mntget(req->proto->main_docroot.mnt);
- if (req->virtual >= TUX_VHOST_IP) {
- *mnt = NULL;
- dentry = __tux_lookup (req, ip, base, mnt);
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
- return dentry;
- base->dentry = dget(req->proto->main_docroot.dentry);
- base->mnt = mntget(req->proto->main_docroot.mnt);
- }
- }
- goto lookup_default;
- }
- }
-done:
- return dentry;
-}
-
-static void http_lookup_vhost (tux_req_t *req, int cachemiss)
-{
- struct dentry *dentry;
- struct nameidata base;
- struct vfsmount *mnt = NULL;
- unsigned int flag = cachemiss ? 0 : LOOKUP_ATOMIC;
-
- Dprintk("http_lookup_vhost(%p, %d, virtual: %d, host: %s (%d).)\n", req, flag, req->virtual, req->host, req->host_len);
-
- base.flags = LOOKUP_FOLLOW|flag;
- base.last_type = LAST_ROOT;
- base.dentry = dget(req->proto->main_docroot.dentry);
- base.mnt = mntget(req->proto->main_docroot.mnt);
-
- dentry = vhost_lookup(req, &base, &mnt);
-
- Dprintk("looked up dentry %p.\n", dentry);
-
- if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
- TUX_BUG();
-
- if (!dentry || IS_ERR(dentry)) {
- if (PTR_ERR(dentry) == -EWOULDBLOCKIO) {
- add_tux_atom(req, http_lookup_vhost);
- queue_cachemiss(req);
- return;
- }
- goto abort;
- }
-
- req->docroot_dentry = dentry;
- req->docroot_mnt = mnt;
-
- add_tux_atom(req, http_process_message);
- add_req_to_workqueue(req);
- return;
-abort:
- if (dentry) {
- if (!IS_ERR(dentry))
- dput(dentry);
- dentry = NULL;
- }
- if (mnt) {
- if (!IS_ERR(mnt))
- mntput(mnt);
- mnt = NULL;
- }
- req_err(req);
- add_req_to_workqueue(req);
-}
-
-static void http_process_message (tux_req_t *req, int cachemiss)
-{
- tux_attribute_t *attr;
- int missed;
- unsigned int lookup_flag = cachemiss ? 0 : LOOKUP_ATOMIC;
-
- Dprintk("handling req %p, cachemiss: %d.\n", req, cachemiss);
-
- /*
- * URL redirection support - redirect all valid requests
- * to the first userspace module.
- */
- if (tux_all_userspace) {
- tcapi_template_t *tcapi = get_first_usermodule();
- if (tcapi) {
- req->usermode = 1;
- req->usermodule_idx = tcapi->userspace_id;
- goto usermode;
- }
- }
- missed = lookup_url(req, lookup_flag);
- if (missed == 2) {
- if (req->query_str) {
- req->error = TUX_ERROR_REDIRECT;
- goto error;
- }
- send_ret_redirect(req, cachemiss);
- return;
- }
- if (req->error)
- goto error;
- if (missed) {
-cachemiss:
- if (cachemiss)
- TUX_BUG();
- Dprintk("uncached request.\n");
- INC_STAT(static_lookup_cachemisses);
- if (req->dentry)
- TUX_BUG();
- add_tux_atom(req, http_process_message);
- queue_cachemiss(req);
- return;
- }
- /*
- * HTML directory indexing.
- */
- if (S_ISDIR(req->dentry->d_inode->i_mode))
- return http_dirlist(req, cachemiss);
- if (!S_ISREG(req->dentry->d_inode->i_mode))
- TUX_BUG();
-
-
- attr = req->dentry->d_extra_attributes;
- if (!attr) {
- attr = lookup_tux_attribute(req);
- if (!attr)
- TUX_BUG();
- req->dentry->d_extra_attributes = attr;
- }
- if (attr->mime)
- Dprintk("using MIME type %s:%s, %d.\n", attr->mime->type, attr->mime->ext, attr->mime->special);
- if (attr->tcapi) {
- req->usermode = 1;
- req->usermodule_idx = attr->tcapi->userspace_id;
- if (req->module_dentry)
- TUX_BUG();
- req->module_dentry = dget(req->dentry);
- release_req_dentry(req);
- goto usermode;
- }
-
- switch (attr->mime->special) {
- case MIME_TYPE_MODULE:
- req->usermode = 1;
- goto usermode;
-
- case MIME_TYPE_REDIRECT:
- req->error = TUX_ERROR_REDIRECT;
- goto error;
-
- case MIME_TYPE_CGI:
-#if CONFIG_TUX_EXTCGI
- Dprintk("CGI request %p.\n", req);
- query_extcgi(req);
- return;
-#endif
-
- default:
- if (req->query_str) {
- req->error = TUX_ERROR_REDIRECT;
- goto error;
- }
- }
- req->attr = attr;
- switch (req->method) {
- case METHOD_GET:
- case METHOD_HEAD:
- break;
- default:
- req->error = TUX_ERROR_REDIRECT;
- goto error;
- }
- if (req->usermode)
- TUX_BUG();
-
- req->output_len = req->total_file_len;
- /*
- * Do range calculations.
- */
- if (req->offset_end || req->offset_start)
- handle_range(req);
-
- if (req->may_send_gzip && !req->offset_start && !req->offset_end) {
- if (handle_gzip_req(req, lookup_flag))
- goto cachemiss;
- if ((tux_compression >= 2) && !req->content_gzipped)
- req->content_gzipped = 2;
- }
- if (req->parsed_len)
- trunc_headers(req);
-
- if (req->error)
- goto error;
-
- add_tux_atom(req, http_send_body);
- add_tux_atom(req, http_post_header);
-
- http_pre_header(req, req->method == METHOD_HEAD);
-
- add_req_to_workqueue(req);
- return;
-
-error:
- if (req->error)
- zap_request(req, cachemiss);
- return;
-
-usermode:
- add_req_to_workqueue(req);
-}
-
-static void http_post_header (tux_req_t *req, int cachemiss)
-{
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = req->output_len;
-#endif
- req->bytes_sent = 0; // data comes now.
-
- add_req_to_workqueue(req);
-}
-
-static void http_send_body (tux_req_t *req, int cachemiss)
-{
- int ret;
-
- Dprintk("SEND req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
-
- SET_TIMESTAMP(req->output_timestamp);
-
- if (req->error) {
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- /*
- * We are in the middle of a file transfer,
- * zap it immediately:
- */
- TDprintk("req->error = TUX_ERROR_CONN_CLOSE.\n");
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
- return;
- }
-
-repeat:
- ret = 0;
- if (!req->status)
- req->status = 200;
- if (req->method != METHOD_HEAD) {
- ret = generic_send_file(req, req->sock, cachemiss);
- Dprintk("body send-file returned: %d.\n", ret);
- } else {
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- }
-
- switch (ret) {
- case -5:
- add_tux_atom(req, http_send_body);
- output_timeout(req);
- break;
- case -4:
- add_tux_atom(req, http_send_body);
- if (add_output_space_event(req, req->sock)) {
- del_tux_atom(req);
- goto repeat;
- }
- break;
- case -3:
- INC_STAT(static_sendfile_cachemisses);
- add_tux_atom(req, http_send_body);
- queue_cachemiss(req);
- break;
- case -1:
- break;
- default:
- req->in_file.f_pos = 0;
- add_req_to_workqueue(req);
- break;
- }
-}
-
-#define DEFAULT_DATE "Wed, 01 Jan 1970 00:00:01 GMT"
-
-char tux_date [DATE_LEN] = DEFAULT_DATE;
-
-/*
- * HTTP header
- */
-
-#define HEADER_PART1A \
- "HTTP/1.1 200 OK\r\n" \
- "Content-Type: "
-
-#define HEADER_PART1B \
- "HTTP/1.1 200 OK"
-
-#define HEADER_PART1AP \
- "HTTP/1.1 206 Partial Content\r\n" \
- "Content-Type: "
-
-#define HEADER_PART1BP \
- "HTTP/1.1 206 Partial Content"
-
-#define HEADER_PART1C \
- "HTTP/1.1 404 Page Not Found\r\n" \
- "Content-Type: "
-
-#define HEADER_PART1D \
- "HTTP/1.1 200 OK\r\n" \
- "Content-Type: text/html\r\n" \
- "Connection: close\r\n"
-
-#define HEADER_PART2_keepalive "\r\nConnection: Keep-Alive\r\nDate: "
-
-#define HEADER_PART2_close "\r\nConnection: close\r\nDate: "
-
-#define HEADER_PART2_none "\r\nDate: "
-
-// date "%s"
-
-#define HEADER_PART3A "\r\nContent-Encoding: gzip"
-#define HEADER_PART3BX "\r\nContent-Length: "
-
-/*
- * Please acknowledge our hard work by not changing this define, or
- * at least please acknowledge us by leaving "TUX/2.0 (Linux)" in
- * the ID string. Thanks! :-)
- */
-#define HEADER_PART3BY "\r\nServer: TUX/2.0 (Linux)\r\nContent-Length: "
-#define HEADER_PART3C "\r\nETag: \""
-#define HEADER_PART3ACC "\r\nAccept-Ranges: bytes"
-#define HEADER_PART3L "\r\nLast-Modified: "
-#define HEADER_PART3P "\r\nContent-Range: bytes "
-#define HEADER_PART3CA "\r\nCache-Control: max-age="
-#define HEADER_PART4 "\r\n\r\n"
-
-#define MAX_OUT_HEADER_LEN (sizeof(HEADER_PART1AP) + MAX_MIMETYPE_LEN + \
- sizeof(HEADER_PART2_keepalive) + DATE_LEN + \
- sizeof(HEADER_PART3A) + sizeof(HEADER_PART3BY) + \
- 12 + sizeof(HEADER_PART3C) + 21 + sizeof(HEADER_PART3L) + \
- sizeof(HEADER_PART3P) + 32 + \
- DATE_LEN + sizeof(HEADER_PART4) + sizeof(tux_extra_html_header) \
- + sizeof(HEADER_PART3CA) + MAX_CACHE_CONTROL_AGE_LEN)
-
-static void http_pre_header (tux_req_t *req, int head)
-{
- int partial = req->offset_start | req->offset_end;
- unsigned long flags;
- char *buf, *curr;
- mimetype_t *mime = NULL;
- int size;
-
-
- if (MAX_OUT_HEADER_LEN > PAGE_SIZE)
- TUX_BUG();
- if ((req->attr && req->attr->tcapi) || req->usermode)
- TUX_BUG();
-
-#define COPY_STATIC_PART(nr,curr) \
- do { \
- memcpy(curr, HEADER_PART##nr, sizeof(HEADER_PART##nr)-1); \
- curr += sizeof(HEADER_PART##nr)-1; \
- } while (0)
-
- buf = curr = get_abuf(req, MAX_OUT_HEADER_LEN);
-
- if (req->lookup_dir) {
- COPY_STATIC_PART(1D, curr);
- goto dir_next;
- }
- mime = req->attr->mime;
- if (!mime)
- TUX_BUG();
-
- if (req->status == 404) {
- COPY_STATIC_PART(1C, curr);
- memcpy(curr, mime->type, mime->type_len);
- curr += mime->type_len;
- } else {
- if (tux_noid && (mime == &default_mimetype)) {
- if (partial)
- COPY_STATIC_PART(1BP, curr);
- else
- COPY_STATIC_PART(1B, curr);
- } else {
- if (partial)
- COPY_STATIC_PART(1AP, curr);
- else
- COPY_STATIC_PART(1A, curr);
- memcpy(curr, mime->type, mime->type_len);
- curr += mime->type_len;
- }
- }
-
- if (tux_generate_cache_control && mime->expire_str_len) {
- COPY_STATIC_PART(3CA, curr);
- memcpy(curr, mime->expire_str, mime->expire_str_len);
- curr += mime->expire_str_len;
- }
-
- if (req->keep_alive /* && (req->version == HTTP_1_0) */)
- COPY_STATIC_PART(2_keepalive, curr);
- else if (!req->keep_alive && (req->version == HTTP_1_1))
- COPY_STATIC_PART(2_close, curr);
- else
- // HTTP/1.0 default means close
- COPY_STATIC_PART(2_none, curr);
-
-dir_next:
- memcpy(curr, tux_date, DATE_LEN-1);
- curr += DATE_LEN-1;
-
- if (req->content_gzipped)
- COPY_STATIC_PART(3A, curr);
-
- /*
- * Content-Length:
- */
- if (!req->lookup_dir) {
- if (tux_noid)
- COPY_STATIC_PART(3BX, curr);
- else
- COPY_STATIC_PART(3BY, curr);
-
- if (partial)
- curr += sprintf(curr, "%Ld", req->output_len);
- else {
- if (req->content_gzipped)
- curr += sprintf(curr, "%Ld",
- req->total_file_len);
- else {
- memcpy(curr, &req->etag, req->lendigits);
- curr += req->lendigits;
- }
- }
- if (tux_generate_etags && (req->status != 404)) {
- COPY_STATIC_PART(3C, curr);
- memcpy(curr, &req->etag, req->etaglen);
- curr += req->etaglen;
- curr[0] = '"';
- curr++;
- }
- if (tux_generate_last_mod || tux_generate_etags)
- COPY_STATIC_PART(3ACC, curr);
- }
- if (tux_generate_last_mod && (req->status != 404)) {
- COPY_STATIC_PART(3L, curr);
- last_mod_time(curr, req->mtime);
- curr += DATE_LEN-1;
- }
- if (partial) {
- COPY_STATIC_PART(3P, curr);
- curr += sprintf(curr, "%Ld-%Ld/%Ld", req->offset_start,
- req->offset_end-1, req->total_file_len);
- }
- COPY_STATIC_PART(4, curr);
- /*
- * Possibly add an extra HTML header:
- */
- if (tux_extra_html_header_size && mime && !strcmp(mime->type, "text/html")) {
- unsigned int len = tux_extra_html_header_size;
-
- memcpy(curr, tux_extra_html_header, len);
- curr += len;
- }
-
- size = curr-buf;
-
-#if CONFIG_TUX_DEBUG
- *curr = 0;
- Dprintk("{%s} [%d/%d]\n", buf, size, strlen(buf));
-#endif
-
- flags = MSG_DONTWAIT;
- if (!head)
- flags |= MSG_MORE;
- send_abuf(req, size, flags);
-}
-
-void http_illegal_request (tux_req_t *req, int cachemiss)
-{
- if (req->status == 304)
- send_ret_notmodified(req);
- else {
- if (req->status == 403)
- send_async_err_forbidden(req);
- else
- send_async_err_not_found(req);
- }
-}
-
-static int http_check_req_err (tux_req_t *req, int cachemiss)
-{
- if ((req->sock->sk->sk_state <= TCP_SYN_RECV) &&
- !tcp_sk(req->sock->sk)->urg_data)
- return 0;
- Dprintk("http_check_req_err(%p,%d): 1 (state: %d, urg: %d)\n",
- req, cachemiss, req->sock->sk->sk_state,
- tcp_sk(req->sock->sk)->urg_data);
-#if CONFIG_TUX_DEBUG
- req->bytes_expected = 0;
-#endif
- req->in_file.f_pos = 0;
- req->error = TUX_ERROR_CONN_CLOSE;
- zap_request(req, cachemiss);
-
- return 1;
-}
-
-#define COPY_STR(str) \
- do { memcpy(tmp, str, sizeof(str)-1); \
- tmp += sizeof(str)-1; } while (0)
-
-static char * http_print_dir_line (tux_req_t *req, char *tmp, char *d_name, int d_len, int d_type, struct dentry *dentry, struct inode *inode)
-{
- int len, spaces;
- loff_t size;
-
- switch (d_type) {
- case DT_DIR:
- COPY_STR("<IMG SRC=\"/icons/dir.gif\" ALT=\"[DIR]\">");
- break;
- case DT_REG:
- if ((d_len >= 3) &&
- (d_name[d_len-3] == '.') &&
- (d_name[d_len-2] == 'g') &&
- (d_name[d_len-1] == 'z'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 't') &&
- (d_name[d_len-2] == 'g') &&
- (d_name[d_len-1] == 'z'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 't') &&
- (d_name[d_len-2] == 'x') &&
- (d_name[d_len-1] == 't'))
- COPY_STR("<IMG SRC=\"/icons/text.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 'b') &&
- (d_name[d_len-2] == 'z') &&
- (d_name[d_len-1] == '2'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- if ((d_len >= 4) &&
- (d_name[d_len-4] == '.') &&
- (d_name[d_len-3] == 'z') &&
- (d_name[d_len-2] == 'i') &&
- (d_name[d_len-1] == 'p'))
- COPY_STR("<IMG SRC=\"/icons/compressed.gif\" ALT=\"[ ]\">");
- else
- COPY_STR("<IMG SRC=\"/icons/file.gif\" ALT=\"[ ]\">");
- break;
- case DT_LNK:
- COPY_STR("<IMG SRC=\"/icons/link.gif\" ALT=\"[LNK]\">");
- break;
- default:
- if (tux_hide_unreadable)
- goto out_dput;
- COPY_STR("<IMG SRC=\"/icons/unknown.gif\" ALT=\"[ ]\">");
- break;
- }
-
-#define LIST_1 " <A HREF=\""
-#define LIST_2 "\">"
-#define LIST_2_DIR "/\">"
-#define LIST_3 "</A> "
-
- COPY_STR(LIST_1);
- memcpy(tmp, d_name, d_len);
- tmp += d_len;
- if (d_type == DT_DIR)
- COPY_STR(LIST_2_DIR);
- else
- COPY_STR(LIST_2);
- spaces = 0;
- len = d_len;
-
- if (len > 25)
- len = 25;
- memcpy(tmp, d_name, len);
- tmp += len;
- if (len != d_len) {
- *tmp++ = '.';
- *tmp++ = '.';
- } else {
- if (d_type == DT_DIR)
- *tmp++ = '/';
- else
- spaces++;
- spaces++;
- }
- COPY_STR(LIST_3);
- while (spaces) {
- *tmp++ = ' ';
- spaces--;
- }
-#define FILL 25
- if (d_len < FILL) {
- memset(tmp, ' ', FILL-d_len);
- tmp += FILL-d_len;
- }
-
- tmp += time_unix2ls(inode->i_mtime.tv_sec, tmp);
- *tmp++ = ' ';
-
- if (d_type != DT_REG) {
- COPY_STR(" - ");
- goto out_size;
- }
- size = inode->i_size >> 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8Lik ", size);
- goto out_size;
- }
- size >>= 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8LiM ", size);
- goto out_size;
- }
- size >>= 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8LiG ", size);
- goto out_size;
- }
- size >>= 10;
- if (size < 1024) {
- tmp += sprintf(tmp, "%8LiT ", size);
- goto out_size;
- }
- size >>= 10;
- tmp += sprintf(tmp, "%8LiT ", size);
-
-out_size:
- *tmp++ = '\n';
- *tmp = 0;
-
- return tmp;
-out_dput:
- return NULL;
-}
-
-tux_proto_t tux_proto_http = {
- defer_accept: 1,
- can_redirect: 1,
- got_request: http_got_request,
- parse_message: parse_http_message,
- illegal_request: http_illegal_request,
- check_req_err: http_check_req_err,
- print_dir_line: http_print_dir_line,
- name: "http",
-};
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * redirect.c: redirect requests to other server sockets (such as Apache).
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-static void dummy_destructor(struct open_request *req)
-{
-}
-
-static struct or_calltable dummy =
-{
- 0,
- NULL,
- NULL,
- &dummy_destructor,
- NULL
-};
-
-static int redirect_sock (tux_req_t *req, const int port)
-{
- struct socket *sock = req->sock;
- struct open_request *tcpreq;
- struct sock *sk, *oldsk;
- int err = -1;
-
- /*
- * Look up (optional) listening user-space socket.
- */
- local_bh_disable();
- sk = tcp_v4_lookup_listener(INADDR_ANY, port, 0);
- /*
- * Look up localhost listeners as well.
- */
- if (!sk) {
- u32 daddr;
- ((unsigned char *)&daddr)[0] = 127;
- ((unsigned char *)&daddr)[1] = 0;
- ((unsigned char *)&daddr)[2] = 0;
- ((unsigned char *)&daddr)[3] = 1;
- sk = tcp_v4_lookup_listener(daddr, port, 0);
- }
- local_bh_enable();
-
- /* No secondary server found */
- if (!sk)
- goto out;
-
- /*
- * Requeue the 'old' socket as an accept-socket of
- * the listening socket. This way we can shuffle
- * a socket around. Since we've read the input data
- * via the non-destructive MSG_PEEK, the secondary
- * server can be used transparently.
- */
- oldsk = sock->sk;
- lock_sock(sk);
-
- if (sk->sk_state != TCP_LISTEN)
- goto out_unlock;
-
- tcpreq = tcp_openreq_alloc();
- if (!tcpreq)
- goto out_unlock;
-
- unlink_tux_socket(req);
-
- sock->sk = NULL;
- sock->state = SS_UNCONNECTED;
-
- tcpreq->class = &dummy;
- write_lock_irq(&oldsk->sk_callback_lock);
- oldsk->sk_socket = NULL;
- oldsk->sk_sleep = NULL;
- write_unlock_irq(&oldsk->sk_callback_lock);
-
- tcp_sk(oldsk)->nonagle = 0;
-
- tcp_acceptq_queue(sk, tcpreq, oldsk);
-
- sk->sk_data_ready(sk, 0);
-
- /*
- * It's now completely up to the secondary
- * server to handle this request.
- */
- sock_release(req->sock);
- req->sock = NULL;
- req->parsed_len = 0;
- err = 0;
- Dprintk("req %p redirected to secondary server!\n", req);
-
-out_unlock:
- release_sock(sk);
- sock_put(sk);
-out:
- if (err)
- Dprintk("NO secondary server for req %p!\n", req);
- return err;
-}
-
-void redirect_request (tux_req_t *req, int cachemiss)
-{
- if (tux_TDprintk && (req->status != 304)) {
- TDprintk("trying to redirect req %p, req->error: %d, req->status: %d.\n", req, req->error, req->status);
- print_req(req);
- }
-
- if (cachemiss)
- TUX_BUG();
- if (req->error == TUX_ERROR_CONN_CLOSE)
- goto out_flush;
- if (!req->sock)
- TUX_BUG();
-
- if (!req->status)
- req->status = -1;
- if (!req->proto->can_redirect || (req->status == 304) || redirect_sock(req, tux_clientport)) {
- if (req->parsed_len)
- trunc_headers(req);
- req->proto->illegal_request(req, cachemiss);
- return;
- } else {
- if (req->data_sock)
- BUG();
- }
-out_flush:
- clear_keepalive(req);
- if (!tux_redirect_logging)
- req->status = 0;
- flush_request(req, cachemiss);
-}
-
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * times.c: time conversion routines.
- *
- * Original time convserion code Copyright (C) 1999 by Arjan van de Ven
- */
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-
-
-#include "times.h"
-
-char *dayName[7] = {
- "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
-};
-
-static char *monthName[12] = {
- "Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
-};
-
-char itoa_h[60]={'0','0','0','0','0','0','0','0','0','0',
- '1','1','1','1','1','1','1','1','1','1',
- '2','2','2','2','2','2','2','2','2','2',
- '3','3','3','3','3','3','3','3','3','3',
- '4','4','4','4','4','4','4','4','4','4',
- '5','5','5','5','5','5','5','5','5','5'};
-
-char itoa_l[60]={'0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9',
- '0','1','2','3','4','5','6','7','8','9'};
-
-int time_unix2ls(time_t zulu, char *buf)
-{
- int Y=0,M=0,D=0;
- int H=0,Min=0,S=0,WD=0;
- int I,I2;
- time_t rest, delta;
-
- if (zulu > xtime.tv_sec)
- zulu = xtime.tv_sec;
-
- I=0;
- while (I<TUX_NUMYEARS) {
- if (TimeDays[I][0]>zulu)
- break;
- I++;
- }
-
- Y=--I;
- if (I<0) {
- Y=0;
- goto BuildYear;
- }
- I2=0;
- while (I2<=12) {
- if (TimeDays[I][I2]>zulu)
- break;
- I2++;
- }
-
- M=I2-1;
-
- rest=zulu - TimeDays[Y][M];
- WD=WeekDays[Y][M];
- D=rest/86400;
- rest=rest%86400;
- WD+=D;
- WD=WD%7;
- H=rest/3600;
- rest=rest%3600;
- Min=rest/60;
- rest=rest%60;
- S=rest;
-
-BuildYear:
- Y+=TUX_YEAROFFSET;
-
-
- /* Format: Day, 01 Mon 1999 01:01:01 GMT */
-
- delta = xtime.tv_sec - zulu;
- if (delta > 6*30*24*60)
- // "May 23 2000"
- return sprintf( buf, "%s %02i %04i", monthName[M], D+1, Y);
- else
- // "May 23 10:14"
- return sprintf( buf, "%s %02i %02i:%02i",
- monthName[M], D+1, H, Min);
-}
-
-static int MonthHash[32] =
- {0,0,7,0,0,0,0,0,0,0,0,3,0,0,0,2,6,0,5,0,9,8,4,0,0,11,1,10,0,0,0,0};
-
-#define is_digit(c) ((c) >= '0' && (c) <= '9')
-
-static inline int skip_atoi(char **s)
-{
- int i=0;
-
- while (is_digit(**s))
- i = i*10 + *((*s)++) - '0';
- return i;
-}
-
-time_t mimetime_to_unixtime(char *Q)
-{
- int Y,M,D,H,Min,S;
- unsigned int Hash;
- time_t Temp;
- char *s,**s2;
-
- s=Q;
- s2=&s;
-
- if (strlen(s)<30) return 0;
- if (s[3]!=',') return 0;
- if (s[19]!=':') return 0;
-
- s+=5; /* Skip day of week */
- D = skip_atoi(s2); /* Day of month */
- s++;
- Hash = (char)s[0]+(char)s[2];
- Hash = (Hash<<1) + (char)s[1];
- Hash = (Hash&63)>>1;
- M = MonthHash[Hash];
- s+=4;
- Y = skip_atoi(s2); /* Year */
- s++;
- H = skip_atoi(s2); /* Hour */
- s++;
- Min = skip_atoi(s2); /* Minutes */
- s++;
- S = skip_atoi(s2); /* Seconds */
- s++;
- if ((s[0]!='G')||(s[1]!='M')||(s[2]!='T'))
- {
- return 0; /* No GMT */
- }
-
- if (Y<TUX_YEAROFFSET) Y = TUX_YEAROFFSET;
- if (Y>TUX_YEAROFFSET+9) Y = TUX_YEAROFFSET+9;
-
- Temp = TimeDays[Y-TUX_YEAROFFSET][M];
- Temp += D*86400+H*3600+Min*60+S;
-
- return Temp;
-}
-
-// writes the full http date, corresponding to time_t received
-
-void last_mod_time(char * curr, const time_t t)
-{
- int day, tod, year, wday, mon, hour, min, sec;
-
- tod = t % 86400;
- day = t / 86400;
- if (tod < 0) {
- tod += 86400;
- --day;
- }
-
- hour = tod / 3600;
- tod %= 3600;
- min = tod / 60;
- sec = tod % 60;
-
- wday = (day + 4) % 7;
- if (wday < 0)
- wday += 7;
-
- day -= 11017;
- /* day 0 is march 1, 2000 */
- year = 5 + day / 146097;
- day = day % 146097;
- if (day < 0) {
- day += 146097;
- --year;
- }
- /* from now on, day is nonnegative */
- year *= 4;
- if (day == 146096) {
- year += 3;
- day = 36524;
- } else {
- year += day / 36524;
- day %= 36524;
- }
- year *= 25;
- year += day / 1461;
- day %= 1461;
- year *= 4;
- if (day == 1460) {
- year += 3;
- day = 365;
- } else {
- year += day / 365;
- day %= 365;
- }
-
- day *= 10;
- mon = (day + 5) / 306;
- day = day + 5 - 306 * mon;
- day /= 10;
- if (mon >= 10) {
- ++year;
- mon -= 10;
- } else
- mon += 2;
-
- sprintf(curr, "%s, %.2d %s %d %.2d:%.2d:%.2d GMT", dayName[wday],
- day+1, monthName[mon], year, hour, min, sec);
-}
-
-// writes the full date in ISO8601 format,
-// corresponding to time_t received
-// example: 20011126224910
-
-int mdtm_time(char * curr, const time_t t)
-{
- int day, tod, year, wday, mon, hour, min, sec;
-
- tod = t % 86400;
- day = t / 86400;
- if (tod < 0) {
- tod += 86400;
- --day;
- }
-
- hour = tod / 3600;
- tod %= 3600;
- min = tod / 60;
- sec = tod % 60;
-
- wday = (day + 4) % 7;
- if (wday < 0)
- wday += 7;
-
- day -= 11017;
- /* day 0 is march 1, 2000 */
- year = 5 + day / 146097;
- day = day % 146097;
- if (day < 0) {
- day += 146097;
- --year;
- }
- /* from now on, day is nonnegative */
- year *= 4;
- if (day == 146096) {
- year += 3;
- day = 36524;
- } else {
- year += day / 36524;
- day %= 36524;
- }
- year *= 25;
- year += day / 1461;
- day %= 1461;
- year *= 4;
- if (day == 1460) {
- year += 3;
- day = 365;
- } else {
- year += day / 365;
- day %= 365;
- }
-
- day *= 10;
- mon = (day + 5) / 306;
- day = day + 5 - 306 * mon;
- day /= 10;
- if (mon >= 10) {
- ++year;
- mon -= 10;
- } else
- mon += 2;
-
- return sprintf(curr, "213 %.4d%.2d%.2d%.2d%.2d%.2d\r\n",
- year, mon+1, day+1, hour, min, sec);
-}
-
-static inline int make_num(const char *s)
-{
- if (*s >= '0' && *s <= '9')
- return 10 * (*s - '0') + *(s + 1) - '0';
- else
- return *(s + 1) - '0';
-}
-
-static inline int make_month(const char *s)
-{
- int i;
-
- for (i = 0; i < 12; i++)
- if (!strncmp(monthName[i], s, 3))
- return i+1;
- return 0;
-}
-
-time_t parse_time(const char *str, const int str_len)
-{
- int hour;
- int min;
- int sec;
- int mday;
- int mon;
- int year;
-
- if (str[3] == ',') {
- /* Thu, 09 Jan 1993 01:29:59 GMT */
-
- if (str_len < 29)
- return -1;
-
- mday = make_num(str+5);
- mon = make_month(str + 8);
- year = 100 * make_num(str + 12) + make_num(str + 14);
- hour = make_num(str + 17);
- min = make_num(str + 20);
- sec = make_num(str + 23);
- }
- else {
- const char *s;
- s = strchr(str, ',');
- if (!s || (str_len - (s - str) < 24)) {
- /* Wed Jun 9 01:29:59 1993 */
-
- if (str_len < 24)
- return -1;
-
- mon = make_month(str+4);
- mday = make_num(str+8);
- hour = make_num(str+11);
- min = make_num(str+14);
- sec = make_num(str+17);
- year = make_num(str+20)*100 + make_num(str+22);
- }
- else {
- /* Thursday, 10-Jun-93 01:29:59 GMT */
-
- mday = make_num(s + 2);
- mon = make_month(s + 5);
- year = make_num(s + 9) + 1900;
- if (year < 1970)
- year += 100;
- hour = make_num(s + 12);
- min = make_num(s + 15);
- sec = make_num(s + 18);
- }
- }
-
- if (sec < 0 || sec > 59)
- return -1;
- if (min < 0 || min > 59)
- return -1;
- if (hour < 0 || hour > 23)
- return -1;
- if (mday < 1 || mday > 31)
- return -1;
- if (mon < 1 || mon > 12)
- return -1;
- if (year < 1970 || year > 2020)
- return -1;
-
- return mktime(year, mon, mday, hour, min, sec);
-}
+++ /dev/null
-static time_t TimeDays[10][13] = {
- { 852073200, 854751600, 857170800, 859849200, 862441200, 865119600, 867711600, 870390000, 873068400, 875660400, 878338800, 880930800, 883609200 } ,
- { 883609200, 886287600, 888706800, 891385200, 893977200, 896655600, 899247600, 901926000, 904604400, 907196400, 909874800, 912466800, 915145200 } ,
- { 915145200, 917823600, 920242800, 922921200, 925513200, 928191600, 930783600, 933462000, 936140400, 938732400, 941410800, 944002800, 946681200 } ,
- { 946681200, 949359600, 951865200, 954543600, 957135600, 959814000, 962406000, 965084400, 967762800, 970354800, 973033200, 975625200, 978303600 } ,
- { 978303600, 980982000, 983401200, 986079600, 988671600, 991350000, 993942000, 996620400, 999298800, 1001890800, 1004569200, 1007161200, 1009839600 } ,
- { 1009839600, 1012518000, 1014937200, 1017615600, 1020207600, 1022886000, 1025478000, 1028156400, 1030834800, 1033426800, 1036105200, 1038697200, 1041375600 } ,
- { 1041375600, 1044054000, 1046473200, 1049151600, 1051743600, 1054422000, 1057014000, 1059692400, 1062370800, 1064962800, 1067641200, 1070233200, 1072911600 } ,
- { 1072911600, 1075590000, 1078095600, 1080774000, 1083366000, 1086044400, 1088636400, 1091314800, 1093993200, 1096585200, 1099263600, 1101855600, 1104534000 } ,
- { 1104534000, 1107212400, 1109631600, 1112310000, 1114902000, 1117580400, 1120172400, 1122850800, 1125529200, 1128121200, 1130799600, 1133391600, 1136070000 } ,
- { 1136070000, 1138748400, 1141167600, 1143846000, 1146438000, 1149116400, 1151708400, 1154386800, 1157065200, 1159657200, 1162335600, 1164927600, 1167606000 }
-};
-static int WeekDays[10][13] = {
- { 3, 6, 6, 2, 4, 0, 2, 5, 1, 3, 6, 1, 4 } ,
- { 4, 0, 0, 3, 5, 1, 3, 6, 2, 4, 0, 2, 5 } ,
- { 5, 1, 1, 4, 6, 2, 4, 0, 3, 5, 1, 3, 6 } ,
- { 6, 2, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5, 1 } ,
- { 1, 4, 4, 0, 2, 5, 0, 3, 6, 1, 4, 6, 2 } ,
- { 2, 5, 5, 1, 3, 6, 1, 4, 0, 2, 5, 0, 3 } ,
- { 3, 6, 6, 2, 4, 0, 2, 5, 1, 3, 6, 1, 4 } ,
- { 4, 0, 1, 4, 6, 2, 4, 0, 3, 5, 1, 3, 6 } ,
- { 6, 2, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4, 0 } ,
- { 0, 3, 3, 6, 1, 4, 6, 2, 5, 0, 3, 5, 1 }
-};
-#define TUX_YEAROFFSET 1997
-#define TUX_NUMYEARS 10
+++ /dev/null
-/*
- * TUX - Integrated Application Protocols Layer and Object Cache
- *
- * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
- *
- * userspace.c: handle userspace-module requests
- */
-
-#include <net/tux.h>
-
-/****************************************************************
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************/
-
#include <linux/security.h>
#include <linux/vs_context.h>
#include <linux/vs_network.h>
-#include <linux/vs_limit.h>
int sysctl_unix_max_dgram_qlen = 10;
mntput(mnt);
}
- vx_sock_dec(sk);
clr_vx_info(&sk->sk_vx_info);
clr_nx_info(&sk->sk_nx_info);
sock_put(sk);
sk_set_owner(sk, THIS_MODULE);
set_vx_info(&sk->sk_vx_info, current->vx_info);
- sk->sk_xid = vx_current_xid();
- vx_sock_inc(sk);
set_nx_info(&sk->sk_nx_info, current->nx_info);
+ sk->sk_xid = vx_current_xid();
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
EXPORT_SYMBOL(xfrm4_rcv);
EXPORT_SYMBOL(xfrm4_tunnel_register);
EXPORT_SYMBOL(xfrm4_tunnel_deregister);
+EXPORT_SYMBOL(xfrm4_tunnel_check_size);
EXPORT_SYMBOL(xfrm_register_type);
EXPORT_SYMBOL(xfrm_unregister_type);
EXPORT_SYMBOL(xfrm_get_type);
return;
expired:
- read_unlock(&xp->lock);
km_policy_expired(xp, dir, 1);
xfrm_policy_delete(xp, dir);
xfrm_pol_put(xp);
write_lock_bh(&xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (pol) {
- if (dir < XFRM_POLICY_MAX)
- atomic_inc(&flow_cache_genid);
+ if (pol)
xfrm_policy_kill(pol);
- }
}
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
xfrm_put_type(x->type);
}
kfree(x);
+ wake_up(&km_waitq);
}
static void xfrm_state_gc_task(void *data)
x = list_entry(entry, struct xfrm_state, bydst);
xfrm_state_gc_destroy(x);
}
- wake_up(&km_waitq);
}
static inline unsigned long make_jiffies(long secs)
spin_lock_bh(&xfrm_state_lock);
x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
- if (x1) {
+ if (!x1) {
+ x1 = afinfo->find_acq(
+ x->props.mode, x->props.reqid, x->id.proto,
+ &x->id.daddr, &x->props.saddr, 0);
+ if (x1 && x1->id.spi != x->id.spi && x1->id.spi) {
+ xfrm_state_put(x1);
+ x1 = NULL;
+ }
+ }
+
+ if (x1 && x1->id.spi) {
xfrm_state_put(x1);
x1 = NULL;
err = -EEXIST;
goto out;
}
- x1 = afinfo->find_acq(
- x->props.mode, x->props.reqid, x->id.proto,
- &x->id.daddr, &x->props.saddr, 0);
-
__xfrm_state_insert(x);
err = 0;
for (h=0; h<maxspi-minspi+1; h++) {
spi = minspi + net_random()%(maxspi-minspi+1);
x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
- if (x0 == NULL) {
- x->id.spi = htonl(spi);
+ if (x0 == NULL)
break;
- }
xfrm_state_put(x0);
}
+ x->id.spi = htonl(spi);
}
if (x->id.spi) {
spin_lock_bh(&xfrm_state_lock);
if (err)
return err;
- xfrm_probe_algs();
-
x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
if (!x)
return err;
# docproc: Preprocess .tmpl file in order to generate .sgml docs
# conmakehash: Create arrays for initializing the kernel console tables
-host-progs := conmakehash kallsyms pnmtologo bin2c
-always := $(host-progs)
+host-progs := conmakehash kallsyms modpost mk_elfconfig pnmtologo bin2c
+always := $(host-progs) empty.o
+
+modpost-objs := modpost.o file2alias.o sumversion.o
subdir-$(CONFIG_MODVERSIONS) += genksyms
-subdir-y += mod
# Let clean descend into subdirs
subdir- += basic lxdialog kconfig package
+
+# dependencies on generated files need to be listed explicitly
+
+$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
+
+quiet_cmd_elfconfig = MKELF $@
+ cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@
+
+$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE
+ $(call if_changed,elfconfig)
+
+targets += elfconfig.h
# Step 2), invoke modpost
# Includes step 3,4
quiet_cmd_modpost = MODPOST
- cmd_modpost = scripts/mod/modpost \
+ cmd_modpost = scripts/modpost \
$(if $(KBUILD_EXTMOD),-i,-o) $(symverfile) \
$(filter-out FORCE,$^)
/* Big exception to the "don't include kernel headers into userspace, which
* even potentially has different endianness and word sizes, since
* we handle those differences explicitly below */
-#include "../../include/linux/mod_devicetable.h"
+#include "../include/linux/mod_devicetable.h"
#define ADD(str, sep, cond, field) \
do { \
71, 94, 92, 82, 0, 0, 62, 0, 63, 0,
62, 63, 0, 64, 0, 65, 0, 5, 0, 16,
0, 20, 0, 11, 0, 13, 0, 66, 0, 70,
- 0, 27, 46, 62, 47, 0, 21, 36, 0, 23,
+ 0, 27, 46, 65, 47, 0, 21, 36, 0, 23,
36, 0, 10, 36, 0, 21, 36, 84, 0, 23,
36, 84, 0, 10, 36, 31, 0, 10, 31, 0,
21, 84, 0, 23, 84, 0, 7, 0, 18, 0,
};
static const short yypact[] = {-32768,
- 15,-32768, 197,-32768, 23,-32768,-32768,-32768,-32768,-32768,
+ 19,-32768, 175,-32768, 32,-32768,-32768,-32768,-32768,-32768,
-18,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
--32768, -28,-32768, -25,-32768,-32768,-32768, -26, -22, -12,
--32768,-32768,-32768,-32768, 49, 493,-32768,-32768,-32768,-32768,
--32768,-32768,-32768,-32768,-32768,-32768,-32768, 27, -8, 101,
--32768, 493, -8,-32768, 493, 10,-32768,-32768, 11, 9,
- 18, 26,-32768, 49, -15, -13,-32768,-32768,-32768, 25,
- 24, 48, 149,-32768,-32768, 49,-32768, 414, 39, 40,
- 47,-32768, 9,-32768,-32768, 49,-32768,-32768,-32768, 66,
--32768, 241,-32768,-32768, 50,-32768, 5, 65, 42, 66,
- 17, 56, 55,-32768,-32768,-32768, 60,-32768, 75,-32768,
- 80,-32768,-32768,-32768,-32768,-32768, 81, 82, 370, 85,
- 98, 89,-32768,-32768, 88,-32768, 91,-32768,-32768,-32768,
--32768, 284,-32768, 24,-32768, 103,-32768,-32768,-32768,-32768,
--32768, 8, 43,-32768, 30,-32768,-32768, 457,-32768,-32768,
- 92, 93,-32768,-32768, 95,-32768, 96,-32768,-32768, 327,
--32768,-32768,-32768,-32768,-32768,-32768, 99, 104,-32768,-32768,
- 148,-32768
+-32768, -30,-32768, -26,-32768,-32768,-32768, -32, -10, -2,
+-32768,-32768,-32768,-32768, 2, 428,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768, 34, 12, 79,
+-32768, 428, 12,-32768, 455, 33,-32768,-32768, 15, 14,
+ 35, 29,-32768, 2, -14, -21,-32768,-32768,-32768, 67,
+ 31, 37, 127,-32768,-32768, 2,-32768, 54, 60, 66,
+ 69,-32768, 14,-32768,-32768, 2,-32768,-32768,-32768, 84,
+-32768, 219,-32768,-32768, 70,-32768, 20, 91, 72, 84,
+ -20, 74, 81,-32768,-32768,-32768, 86,-32768, 102,-32768,
+ 106,-32768,-32768,-32768,-32768,-32768, 109, 108, 348, 112,
+ 126, 117,-32768,-32768, 118,-32768, 122,-32768,-32768,-32768,
+-32768, 262,-32768, 31,-32768, 131,-32768,-32768,-32768,-32768,
+-32768, 7, 120,-32768, -9,-32768,-32768, 392,-32768,-32768,
+ 125, 130,-32768,-32768, 132,-32768, 159,-32768,-32768, 305,
+-32768,-32768,-32768,-32768,-32768,-32768, 160, 161,-32768,-32768,
+ 174,-32768
};
static const short yypgoto[] = {-32768,
- 152,-32768,-32768,-32768, 119,-32768,-32768, 94, 0, -55,
- -35,-32768,-32768,-32768, -69,-32768,-32768, -56, -30,-32768,
- -76,-32768, -122,-32768,-32768, 29, -62,-32768,-32768,-32768,
--32768, -17,-32768,-32768, 105,-32768,-32768, 52, 86, 83,
+ 208,-32768,-32768,-32768, 158,-32768,-32768, 128, 0, -90,
+ -36,-32768, 157,-32768, -70,-32768,-32768, -51, -31,-32768,
+ -40,-32768, -125,-32768,-32768, 65, -97,-32768,-32768,-32768,
+-32768, -19,-32768,-32768, 143,-32768,-32768, 83, 124, 141,
-32768,-32768,-32768
};
-#define YYLAST 533
-
-
-static const short yytable[] = { 78,
- 67, 99, 35, 84, 65, 125, 54, 49, 155, 152,
- 53, 80, 47, 88, 171, 89, 9, 48, 91, 55,
- 127, 50, 129, 56, 50, 18, 114, 99, 81, 99,
- 57, 69, 92, 87, 27, 77, 119, 168, 31, -89,
- 126, 50, 67, 140, 96, 79, 58, 156, 131, 143,
- 97, 76, 60, 142, -89, 60, 59, 68, 60, 95,
- 85, 159, 132, 96, 99, 45, 46, 93, 94, 97,
- 86, 60, 143, 143, 98, 160, 119, 126, 140, 157,
- 158, 96, 156, 67, 58, 111, 112, 97, 142, 60,
- 60, 106, 119, 113, 59, 116, 60, 128, 133, 134,
- 98, 70, 93, 88, 119, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 135, 24, 25, 26, 27, 28, 139, 136,
- 31, 146, 147, 148, 149, 154, -19, 150, 163, 164,
- 32, 165, 166, -19, -103, 169, -19, 172, -19, 107,
- 170, -19, 4, 6, 7, 8, 9, 10, 11, 12,
+#define YYLAST 495
+
+
+static const short yytable[] = { 67,
+ 99, 119, 35, 65, 54, 49, 152, 155, 84, 53,
+ 91, 131, 47, 55, 88, 80, 89, 48, 171, 50,
+ 125, 9, 159, 50, 92, 132, 99, 81, 99, 69,
+ 18, 114, 87, 77, 168, 56, 160, 58, -89, 27,
+ 57, 119, 140, 31, 157, 158, 156, 59, 143, 60,
+ 58, 76, 142, -89, 60, 126, 127, 119, 129, 96,
+ 59, 50, 60, 99, 68, 97, 95, 60, 79, 119,
+ 96, 143, 143, 86, 45, 46, 97, 85, 60, 70,
+ 106, 98, 67, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 75, 24, 25, 26, 27, 28, 162, 108, 31, 115,
- 124, 0, 130, 0, -19, 153, 0, 0, 32, 0,
- 0, -19, -104, 0, -19, 0, -19, 5, 0, -19,
- 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 0, 0, 0,
- 0, 0, -19, 0, 0, 0, 32, 0, 0, -19,
- 0, 118, -19, 0, -19, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 0, 24, 25, 26, 27, 28, 0, 0,
- 31, 0, 0, 0, 0, -82, 0, 0, 0, 0,
- 32, 0, 0, 0, 151, 0, 0, -82, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 0, 24, 25, 26, 27,
- 28, 0, 0, 31, 0, 0, 0, 0, -82, 0,
- 0, 0, 0, 32, 0, 0, 0, 167, 0, 0,
- -82, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 0, 24,
- 25, 26, 27, 28, 0, 0, 31, 0, 0, 0,
- 0, -82, 0, 0, 0, 0, 32, 0, 0, 0,
- 0, 0, 0, -82, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 0, 24, 25, 26, 27, 28, 0, 0, 31,
- 0, 0, 0, 0, 0, 140, 0, 0, 0, 141,
- 0, 0, 0, 0, 0, 142, 0, 60, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 0, 24, 25, 26, 27,
- 28, 0, 0, 31, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 32, 0, 0, 0, 0, 0, 0,
- 110, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 0, 24,
- 25, 26, 27, 28, 0, 0, 31, 0, 0, 0,
- 0, 161, 0, 0, 0, 0, 32, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 0, 24, 25, 26, 27, 28,
- 0, 0, 31, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 32
+ 110, 24, 25, 26, 27, 28, 111, 126, 31, 93,
+ 94, 96, 112, 116, -19, 113, 133, 97, 32, 60,
+ 98, -19, -103, 128, -19, 134, -19, 107, 93, -19,
+ 88, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 135, 24,
+ 25, 26, 27, 28, 139, 140, 31, 136, 146, 156,
+ 147, 148, -19, 154, 149, 142, 32, 60, 150, -19,
+ -104, 163, -19, 172, -19, 5, 164, -19, 165, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 166, 169, 170, 4, 75,
+ -19, 78, 162, 115, 32, 108, 153, -19, 124, 118,
+ -19, 0, -19, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 130, 24, 25, 26, 27, 28, 0, 0, 31, 0,
+ 0, 0, 0, -82, 0, 0, 0, 0, 32, 0,
+ 0, 0, 151, 0, 0, -82, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 0, 24, 25, 26, 27, 28, 0,
+ 0, 31, 0, 0, 0, 0, -82, 0, 0, 0,
+ 0, 32, 0, 0, 0, 167, 0, 0, -82, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 0, 24, 25, 26,
+ 27, 28, 0, 0, 31, 0, 0, 0, 0, -82,
+ 0, 0, 0, 0, 32, 0, 0, 0, 0, 0,
+ 0, -82, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
+ 24, 25, 26, 27, 28, 0, 0, 31, 0, 0,
+ 0, 0, 0, 140, 0, 0, 0, 141, 0, 0,
+ 0, 0, 0, 142, 0, 60, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 0, 24, 25, 26, 27, 28, 0,
+ 0, 31, 0, 0, 0, 0, 161, 0, 0, 0,
+ 0, 32, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
+ 24, 25, 26, 27, 28, 0, 0, 31, 0, 0,
+ 7, 8, 9, 10, 11, 0, 13, 32, 15, 16,
+ 0, 18, 19, 20, 0, 22, 0, 24, 25, 26,
+ 27, 28, 0, 0, 31, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 32
};
-static const short yycheck[] = { 55,
- 36, 71, 3, 60, 35, 1, 24, 36, 1, 132,
- 36, 1, 31, 29, 0, 31, 8, 36, 32, 46,
- 97, 50, 99, 46, 50, 17, 83, 97, 59, 99,
- 43, 49, 46, 64, 26, 53, 92, 160, 30, 32,
- 36, 50, 78, 36, 40, 36, 36, 40, 32, 119,
- 46, 52, 48, 46, 47, 48, 46, 31, 48, 36,
- 43, 32, 46, 40, 134, 43, 44, 43, 44, 46,
- 45, 48, 142, 143, 51, 46, 132, 36, 36, 142,
- 143, 40, 40, 119, 36, 47, 47, 46, 46, 48,
- 48, 44, 148, 47, 46, 30, 48, 33, 43, 45,
- 51, 1, 43, 29, 160, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 43, 23, 24, 25, 26, 27, 47, 49,
- 30, 47, 35, 45, 47, 33, 36, 47, 47, 47,
- 40, 47, 47, 43, 44, 47, 46, 0, 48, 1,
- 47, 51, 1, 5, 6, 7, 8, 9, 10, 11,
+static const short yycheck[] = { 36,
+ 71, 92, 3, 35, 24, 36, 132, 1, 60, 36,
+ 32, 32, 31, 46, 29, 1, 31, 36, 0, 50,
+ 1, 8, 32, 50, 46, 46, 97, 59, 99, 49,
+ 17, 83, 64, 53, 160, 46, 46, 36, 32, 26,
+ 43, 132, 36, 30, 142, 143, 40, 46, 119, 48,
+ 36, 52, 46, 47, 48, 36, 97, 148, 99, 40,
+ 46, 50, 48, 134, 31, 46, 36, 48, 36, 160,
+ 40, 142, 143, 45, 43, 44, 46, 43, 48, 1,
+ 44, 51, 119, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 52, 23, 24, 25, 26, 27, 148, 73, 30, 86,
- 95, -1, 100, -1, 36, 134, -1, -1, 40, -1,
- -1, 43, 44, -1, 46, -1, 48, 1, -1, 51,
- -1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, -1, -1, -1,
- -1, -1, 36, -1, -1, -1, 40, -1, -1, 43,
- -1, 1, 46, -1, 48, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, -1, 23, 24, 25, 26, 27, -1, -1,
- 30, -1, -1, -1, -1, 35, -1, -1, -1, -1,
- 40, -1, -1, -1, 1, -1, -1, 47, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, -1, 23, 24, 25, 26,
- 27, -1, -1, 30, -1, -1, -1, -1, 35, -1,
- -1, -1, -1, 40, -1, -1, -1, 1, -1, -1,
- 47, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, -1, 23,
- 24, 25, 26, 27, -1, -1, 30, -1, -1, -1,
- -1, 35, -1, -1, -1, -1, 40, -1, -1, -1,
- -1, -1, -1, 47, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, -1, 23, 24, 25, 26, 27, -1, -1, 30,
- -1, -1, -1, -1, -1, 36, -1, -1, -1, 40,
- -1, -1, -1, -1, -1, 46, -1, 48, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, -1, 23, 24, 25, 26,
- 27, -1, -1, 30, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 40, -1, -1, -1, -1, -1, -1,
- 47, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, -1, 23,
- 24, 25, 26, 27, -1, -1, 30, -1, -1, -1,
- -1, 35, -1, -1, -1, -1, 40, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, -1, 23, 24, 25, 26, 27,
- -1, -1, 30, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 40
+ 47, 23, 24, 25, 26, 27, 47, 36, 30, 43,
+ 44, 40, 47, 30, 36, 47, 43, 46, 40, 48,
+ 51, 43, 44, 33, 46, 45, 48, 1, 43, 51,
+ 29, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 43, 23,
+ 24, 25, 26, 27, 47, 36, 30, 49, 47, 40,
+ 35, 45, 36, 33, 47, 46, 40, 48, 47, 43,
+ 44, 47, 46, 0, 48, 1, 47, 51, 47, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 47, 47, 47, 1, 52,
+ 36, 55, 148, 86, 40, 73, 134, 43, 95, 1,
+ 46, -1, 48, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 100, 23, 24, 25, 26, 27, -1, -1, 30, -1,
+ -1, -1, -1, 35, -1, -1, -1, -1, 40, -1,
+ -1, -1, 1, -1, -1, 47, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, 23, 24, 25, 26, 27, -1,
+ -1, 30, -1, -1, -1, -1, 35, -1, -1, -1,
+ -1, 40, -1, -1, -1, 1, -1, -1, 47, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, -1, 23, 24, 25,
+ 26, 27, -1, -1, 30, -1, -1, -1, -1, 35,
+ -1, -1, -1, -1, 40, -1, -1, -1, -1, -1,
+ -1, 47, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ 23, 24, 25, 26, 27, -1, -1, 30, -1, -1,
+ -1, -1, -1, 36, -1, -1, -1, 40, -1, -1,
+ -1, -1, -1, 46, -1, 48, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, 23, 24, 25, 26, 27, -1,
+ -1, 30, -1, -1, -1, -1, 35, -1, -1, -1,
+ -1, 40, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ 23, 24, 25, 26, 27, -1, -1, 30, -1, -1,
+ 6, 7, 8, 9, 10, -1, 12, 40, 14, 15,
+ -1, 17, 18, 19, -1, 21, -1, 23, 24, 25,
+ 26, 27, -1, -1, 30, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 40
};
/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
#line 3 "/usr/lib/bison.simple"
# that the kernel isn't the stock distribution kernel, for example by
# adding some text to the end of the version number.
#
-%define sublevel 8
+%define sublevel 7
%define kversion 2.6.%{sublevel}
%define rpmversion 2.6.%{sublevel}
%define rhbsys %([ -r /etc/beehive-root ] && echo || echo .`whoami`)
-%define release 1.521.2.6.planetlab%{?date:.%{date}}
+%define release 1.planetlab%{?date:.%{date}}
%define signmodules 0
%define KVERREL %{PACKAGE_VERSION}-%{PACKAGE_RELEASE}
#
%define kernel_prereq fileutils, module-init-tools, initscripts >= 5.83, mkinitrd >= 3.5.5
-Vendor: PlanetLab
-Packager: PlanetLab Central <support@planet-lab.org>
-Distribution: PlanetLab 3.0
-URL: http://cvs.planet-lab.org/cvs/linux-2.6
-
Name: kernel
Group: System Environment/Kernel
License: GPLv2
# List the packages used during the kernel build
#
BuildPreReq: module-init-tools, patch >= 2.5.4, bash >= 2.03, sh-utils, tar
-BuildPreReq: bzip2, findutils, gzip, m4, perl, make >= 3.78, gnupg
-#BuildPreReq: kernel-utils >= 1:2.4-12.1.142
+BuildPreReq: bzip2, findutils, gzip, m4, perl, make >= 3.78, gnupg, kernel-utils >= 2.4-12.1.139
+# temporary req since modutils changed output format
+#BuildPreReq: modutils >= 2.4.26-14
BuildRequires: gcc >= 2.96-98, binutils >= 2.12, redhat-rpm-config
BuildConflicts: rhbuildsys(DiskFree) < 500Mb
BuildArchitectures: i686
%description uml
This package includes a user mode version of the Linux kernel.
-%package vserver
-Summary: A placeholder RPM that provides kernel and kernel-drm
-
-Group: System Environment/Kernel
-Provides: kernel = %{version}
-Provides: kernel-drm = 4.3.0
-
-%description vserver
-VServers do not require and cannot use kernels, but some RPMs have
-implicit or explicit dependencies on the "kernel" package
-(e.g. tcpdump). This package installs no files but provides the
-necessary dependencies to make rpm and yum happy.
-
%prep
%setup -n linux-%{kversion}
grep "__crc_$i\$" System.map >> $RPM_BUILD_ROOT/boot/System.map-$KernelVer ||:
done
rm -f exported
-# install -m 644 init/kerntypes.o $RPM_BUILD_ROOT/boot/Kerntypes-$KernelVer
+ install -m 644 init/kerntypes.o $RPM_BUILD_ROOT/boot/Kerntypes-$KernelVer
install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer
rm -f System.map
cp arch/*/boot/bzImage $RPM_BUILD_ROOT/%{image_install_path}/vmlinuz-$KernelVer
# make some useful links
pushd /boot > /dev/null ; {
ln -sf System.map-%{KVERREL} System.map
-# ln -sf Kerntypes-%{KVERREL} Kerntypes
+ ln -sf Kerntypes-%{KVERREL} Kerntypes
ln -sf config-%{KVERREL} config
ln -sf initrd-%{KVERREL}.img initrd-boot
ln -sf vmlinuz-%{KVERREL} kernel-boot
%files
%defattr(-,root,root)
/%{image_install_path}/vmlinuz-%{KVERREL}
-#/boot/Kerntypes-%{KVERREL}
+/boot/Kerntypes-%{KVERREL}
/boot/System.map-%{KVERREL}
/boot/config-%{KVERREL}
%dir /lib/modules/%{KVERREL}
%files smp
%defattr(-,root,root)
/%{image_install_path}/vmlinuz-%{KVERREL}smp
-#/boot/Kerntypes-%{KVERREL}smp
+/boot/Kerntypes-%{KVERREL}smp
/boot/System.map-%{KVERREL}smp
/boot/config-%{KVERREL}smp
%dir /lib/modules/%{KVERREL}smp
/usr/share/doc/kernel-doc-%{kversion}/Documentation/*
%endif
-
-%files vserver
-%defattr(-,root,root)
-# no files
-
%changelog
-* Thu Sep 16 2004 Mark Huang <mlhuang@cs.princeton.edu>
-- merge to Fedora Core 2 2.6.8-1.521
-
-* Tue Aug 31 2004 Arjan van de Ven <arjanv@redhat.com>
-- fix execshield buglet with legacy binaries
-- 2.6.9-rc1-bk7
-
-* Mon Aug 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.9-rc1-bk6
-
-* Sat Aug 28 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.9-rc1-bk4, now with i915 DRM driver
-
-* Fri Aug 27 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.9-rc1-bk2
-
-* Mon Aug 23 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8.1-bk2
-
-* Sat Aug 21 2004 Arjan van de Ven <arjanv@redhat.com>
-- attempt to fix early-udev bug
-
-* Fri Aug 13 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc4-bk3
-- split execshield up some more
-
-* Fri Aug 13 2004 Dave Jones <davej@redhat.com>
-- Update SCSI whitelist again with some more card readers.
-
-* Mon Aug 9 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc3-bk3
-
* Thu Aug 5 2004 Mark Huang <mlhuang@cs.princeton.edu>
- adapt for Fedora Core 2 based PlanetLab 3.0 (remove Source and Patch
sections, most non-x86 sections, and GPG sections)
-* Wed Aug 4 2004 Arjan van de Ven <arjanv@redhat.com>
-- Add the flex-mmap bits for s390/s390x (Pete Zaitcev)
-- Add flex-mmap for x86-64 32 bit emulation
-- 2.6.8-rc3
-
-* Mon Aug 2 2004 Arjan van de Ven <arjanv@redhat.com>
-- Add Rik's token trashing control patch
-
-* Sun Aug 1 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc2-bk11
-
-* Fri Jul 30 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc2-bk8
-
-* Wed Jul 28 2004 Arjan van de Ven <arjanv@redhat.com>
-- 2.6.8-rc2-bk6
-- make a start at splitting up the execshield patchkit
-
* Fri Jul 16 2004 Arjan van de Ven <arjanv@redhat.com>
- ppc32 embedded updates
strncpy(menu_item, item, menu_width);
menu_item[menu_width] = 0;
- j = first_alpha(menu_item, "YyNnMmHh");
+ j = first_alpha(menu_item, "YyNnMm");
/* Clear 'residue' of last item */
wattrset (win, menubox_attr);
if (key < 256 && isalpha(key)) key = tolower(key);
- if (strchr("ynmh", key))
+ if (strchr("ynm", key))
i = max_choice;
else {
for (i = choice+1; i < max_choice; i++) {
- j = first_alpha(items[(scroll+i)*2+1], "YyNnMmHh");
+ j = first_alpha(items[(scroll+i)*2+1], "YyNnMm");
if (key == tolower(items[(scroll+i)*2+1][j]))
break;
}
if (i == max_choice)
for (i = 0; i < max_choice; i++) {
- j = first_alpha(items[(scroll+i)*2+1], "YyNnMmHh");
+ j = first_alpha(items[(scroll+i)*2+1], "YyNnMm");
if (key == tolower(items[(scroll+i)*2+1][j]))
break;
}
--- /dev/null
+#!/bin/sh
+#
+# Copyright (C) 2002 Khalid Aziz <khalid_aziz@hp.com>
+# Copyright (C) 2002 Randy Dunlap <rddunlap@osdl.org>
+# Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
+# Copyright (C) 2002 Hewlett-Packard Company
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+#
+# Rules to generate ikconfig.h from linux/.config:
+# - Retain lines that begin with "CONFIG_"
+# - Retain lines that begin with "# CONFIG_"
+# - lines that use double-quotes must \\-escape-quote them
+
+if [ $# -lt 2 ]
+then
+ echo "Usage: `basename $0` <configuration_file> <Makefile>"
+ exit 1
+fi
+
+config=$1
+makefile=$2
+
+cat << EOF
+#ifndef _IKCONFIG_H
+#define _IKCONFIG_H
+/*
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *
+ * This file is generated automatically by scripts/mkconfigs. Do not edit.
+ *
+ */
+static char const ikconfig_config[] __attribute__((unused)) =
+"CONFIG_BEGIN=n\\n\\
+$(sed < $config -n 's/"/\\"/g;/^#\? \?CONFIG_/s/.*/&\\n\\/p')
+CONFIG_END=n\\n";
+#endif /* _IKCONFIG_H */
+EOF
+++ /dev/null
-#!/bin/sh
-# Generates a small Makefile used in the root of the output
-# directory, to allow make to be started from there.
-# The Makefile also allow for more convinient build of external modules
-
-# Usage
-# $1 - Kernel src directory
-# $2 - Output directory
-# $3 - version
-# $4 - patchlevel
-
-
-cat << EOF
-# Automatically generated by $0: don't edit
-
-VERSION = $3
-PATCHLEVEL = $4
-
-KERNELSRC := $1
-KERNELOUTPUT := $2
-
-MAKEFLAGS += --no-print-directory
-
-all:
- \$(MAKE) -C \$(KERNELSRC) O=\$(KERNELOUTPUT)
-
-%::
- \$(MAKE) -C \$(KERNELSRC) O=\$(KERNELOUTPUT) \$@
-
-EOF
-
+++ /dev/null
-host-progs := modpost mk_elfconfig
-always := $(host-progs) empty.o
-
-modpost-objs := modpost.o file2alias.o sumversion.o
-
-# dependencies on generated files need to be listed explicitly
-
-$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
-
-quiet_cmd_elfconfig = MKELF $@
- cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@
-
-$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE
- $(call if_changed,elfconfig)
-
-targets += elfconfig.h
MKSPEC := $(srctree)/scripts/package/mkspec
PREV := set -e; cd ..;
-# rpm-pkg
.PHONY: rpm-pkg rpm
-$(objtree)/kernel.spec: $(MKSPEC) $(srctree)/Makefile
+$(objtree)/kernel.spec: $(MKSPEC)
$(CONFIG_SHELL) $(MKSPEC) > $@
rpm-pkg rpm: $(objtree)/kernel.spec
clean-rule += rm -f $(objtree)/kernel.spec
-# binrpm-pkg
-.PHONY: binrpm-pkg
-$(objtree)/binkernel.spec: $(MKSPEC) $(srctree)/Makefile
- $(CONFIG_SHELL) $(MKSPEC) prebuilt > $@
-
-binrpm-pkg: $(objtree)/binkernel.spec
- $(MAKE)
- set -e; \
- $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
- set -e; \
- mv -f $(objtree)/.tmp_version $(objtree)/.version
-
- $(RPM) --define "_builddir $(srctree)" --target $(UTS_MACHINE) -bb $<
-
-clean-rule += rm -f $(objtree)/binkernel.spec
-
# Deb target
# ---------------------------------------------------------------------------
#
# ---------------------------------------------------------------------------
help:
@echo ' rpm-pkg - Build the kernel as an RPM package'
- @echo ' binrpm-pkg - Build an rpm package containing the compiled kernel & modules'
@echo ' deb-pkg - Build the kernel as an deb package'
# Patched for non-x86 by Opencon (L) 2002 <opencon@rio.skydome.net>
#
-# how we were called determines which rpms we build and how we build them
-if [ "$1" = "prebuilt" ]; then
- PREBUILT=true
-else
- PREBUILT=false
-fi
-
# starting to output the spec
if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then
PROVIDES=kernel-drm
echo "Group: System Environment/Kernel"
echo "Vendor: The Linux Community"
echo "URL: http://www.kernel.org"
-
-if ! $PREBUILT; then
echo -n "Source: kernel-$VERSION.$PATCHLEVEL.$SUBLEVEL"
echo "$EXTRAVERSION.tar.gz" | sed -e "s/-//g"
-fi
-
echo "BuildRoot: /var/tmp/%{name}-%{PACKAGE_VERSION}-root"
echo "Provides: $PROVIDES"
echo "%define __spec_install_post /usr/lib/rpm/brp-compress || :"
echo "%description"
echo "The Linux Kernel, the operating system core itself"
echo ""
-
-if ! $PREBUILT; then
echo "%prep"
echo "%setup -q"
echo ""
-fi
-
echo "%build"
-
-if ! $PREBUILT; then
echo "make clean && make"
echo ""
-fi
-
echo "%install"
echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib $RPM_BUILD_ROOT/lib/modules'
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/major.h>
-#include <linux/personality.h>
#include "avc.h"
#include "objsec.h"
if (rc)
return rc;
- /* Clear any possibly unsafe personality bits on exec: */
- current->personality &= ~PER_CLEAR_ON_SETID;
-
/* Set the security field to the new SID. */
bsec->sid = newsid;
}
endmenu
menu "Open Sound System"
- depends on SOUND!=n && (BROKEN || !SPARC64)
+ depends on SOUND!=n
config SOUND_PRIME
tristate "Open Sound System (DEPRECATED)"
struct snd_info_entry *entry;
snd_info_buffer_t *buf;
size_t size = 0;
- loff_t pos;
data = snd_magic_cast(snd_info_private_data_t, file->private_data, return -ENXIO);
snd_assert(data != NULL, return -ENXIO);
- pos = *offset;
- if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
- return -EIO;
- if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
- return -EIO;
entry = data->entry;
switch (entry->content) {
case SNDRV_INFO_CONTENT_TEXT:
buf = data->rbuffer;
if (buf == NULL)
return -EIO;
- if (pos >= buf->size)
+ if (file->f_pos >= (long)buf->size)
return 0;
- size = buf->size - pos;
+ size = buf->size - file->f_pos;
size = min(count, size);
- if (copy_to_user(buffer, buf->buffer + pos, size))
+ if (copy_to_user(buffer, buf->buffer + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
break;
case SNDRV_INFO_CONTENT_DATA:
if (entry->c.ops->read)
- size = entry->c.ops->read(entry,
+ return entry->c.ops->read(entry,
data->file_private_data,
- file, buffer, count, pos);
+ file, buffer, count);
break;
}
- if ((ssize_t) size > 0)
- *offset = pos + size;
return size;
}
struct snd_info_entry *entry;
snd_info_buffer_t *buf;
size_t size = 0;
- loff_t pos;
data = snd_magic_cast(snd_info_private_data_t, file->private_data, return -ENXIO);
snd_assert(data != NULL, return -ENXIO);
entry = data->entry;
- pos = *offset;
- if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
- return -EIO;
- if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
- return -EIO;
switch (entry->content) {
case SNDRV_INFO_CONTENT_TEXT:
buf = data->wbuffer;
if (buf == NULL)
return -EIO;
- if (pos >= buf->len)
+ if (file->f_pos < 0)
+ return -EINVAL;
+ if (file->f_pos >= (long)buf->len)
return -ENOMEM;
- size = buf->len - pos;
+ size = buf->len - file->f_pos;
size = min(count, size);
- if (copy_from_user(buf->buffer + pos, buffer, size))
+ if (copy_from_user(buf->buffer + file->f_pos, buffer, size))
return -EFAULT;
- if ((long)buf->size < pos + size)
- buf->size = pos + size;
+ if ((long)buf->size < file->f_pos + size)
+ buf->size = file->f_pos + size;
+ file->f_pos += size;
break;
case SNDRV_INFO_CONTENT_DATA:
if (entry->c.ops->write)
- size = entry->c.ops->write(entry,
+ return entry->c.ops->write(entry,
data->file_private_data,
- file, buffer, count, pos);
+ file, buffer, count);
break;
}
- if ((ssize_t) size > 0)
- *offset = pos + size;
return size;
}
else
printk("pcm_oss: read: recovering from SUSPEND\n");
#endif
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, 0);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
}
if (ret == -EPIPE) {
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, 0);
if (ret < 0)
break;
}
else
printk("pcm_oss: readv: recovering from SUSPEND\n");
#endif
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, 0);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
snd_pcm_oss_setup_t *psetup = NULL, *csetup = NULL;
int nonblock;
wait_queue_t wait;
- static char printed_comm[16];
-
- if (strncmp(printed_comm, current->comm, 16)) {
- printk("application %s uses obsolete OSS audio interface\n",
- current->comm);
- memcpy(printed_comm, current->comm, 16);
- }
+
+ printk("application %s uses obsolete OSS audio interface\n",current->comm);
snd_assert(cardnum >= 0 && cardnum < SNDRV_CARDS, return -ENXIO);
device = SNDRV_MINOR_OSS_DEVICE(minor) == SNDRV_MINOR_OSS_PCM1 ?
const char *snd_pcm_stream_name(snd_pcm_stream_t stream)
{
- snd_assert(stream <= SNDRV_PCM_STREAM_LAST, return NULL);
+ snd_assert(stream <= SNDRV_PCM_STREAM_LAST, return 0);
return snd_pcm_stream_names[stream];
}
const char *snd_pcm_access_name(snd_pcm_access_t access)
{
- snd_assert(access <= SNDRV_PCM_ACCESS_LAST, return NULL);
+ snd_assert(access <= SNDRV_PCM_ACCESS_LAST, return 0);
return snd_pcm_access_names[access];
}
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
- snd_assert(format <= SNDRV_PCM_FORMAT_LAST, return NULL);
+ snd_assert(format <= SNDRV_PCM_FORMAT_LAST, return 0);
return snd_pcm_format_names[format];
}
const char *snd_pcm_subformat_name(snd_pcm_subformat_t subformat)
{
- snd_assert(subformat <= SNDRV_PCM_SUBFORMAT_LAST, return NULL);
+ snd_assert(subformat <= SNDRV_PCM_SUBFORMAT_LAST, return 0);
return snd_pcm_subformat_names[subformat];
}
const char *snd_pcm_tstamp_mode_name(snd_pcm_tstamp_t mode)
{
- snd_assert(mode <= SNDRV_PCM_TSTAMP_LAST, return NULL);
+ snd_assert(mode <= SNDRV_PCM_TSTAMP_LAST, return 0);
return snd_pcm_tstamp_mode_names[mode];
}
const char *snd_pcm_state_name(snd_pcm_state_t state)
{
- snd_assert(state <= SNDRV_PCM_STATE_LAST, return NULL);
+ snd_assert(state <= SNDRV_PCM_STATE_LAST, return 0);
return snd_pcm_state_names[state];
}
}
static long snd_opl4_mem_proc_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *_buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *_buf, long count)
{
opl4_t *opl4 = snd_magic_cast(opl4_t, entry->private_data, return -ENXIO);
long size;
char* buf;
size = count;
- if (pos + size > entry->size)
- size = entry->size - pos;
+ if (file->f_pos + size > entry->size)
+ size = entry->size - file->f_pos;
if (size > 0) {
buf = vmalloc(size);
if (!buf)
return -ENOMEM;
- snd_opl4_read_memory(opl4, buf, pos, size);
+ snd_opl4_read_memory(opl4, buf, file->f_pos, size);
if (copy_to_user(_buf, buf, size)) {
vfree(buf);
return -EFAULT;
}
vfree(buf);
+ file->f_pos += size;
return size;
}
return 0;
}
static long snd_opl4_mem_proc_write(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, const char __user *_buf,
- unsigned long count, unsigned long pos)
+ struct file *file, const char __user *_buf, long count)
{
opl4_t *opl4 = snd_magic_cast(opl4_t, entry->private_data, return -ENXIO);
long size;
char *buf;
size = count;
- if (pos + size > entry->size)
- size = entry->size - pos;
+ if (file->f_pos + size > entry->size)
+ size = entry->size - file->f_pos;
if (size > 0) {
buf = vmalloc(size);
if (!buf)
vfree(buf);
return -EFAULT;
}
- snd_opl4_write_memory(opl4, buf, pos, size);
+ snd_opl4_write_memory(opl4, buf, file->f_pos, size);
vfree(buf);
+ file->f_pos += size;
return size;
}
return 0;
break;
if (snd_rawmidi_transmit(substream, &midi_byte, 1) != 1)
break;
-#ifdef SNDRV_SERIAL_MS124W_MB_NOCOMBO
+#if SNDRV_SERIAL_MS124W_MB_NOCOMBO
/* select exactly one of the four ports */
addr_byte = (1 << (substream->number + 4)) | 0x08;
#else
struct vx_rmh rmh;
int data_mode;
- *pipep = NULL;
+ *pipep = 0;
vx_init_rmh(&rmh, CMD_RES_PIPE);
vx_set_pipe_cmd_params(&rmh, capture, audioid, num_audio);
#if 0 // NYI
{
snd_pcm_runtime_t *runtime = subs->runtime;
vx_core_t *chip = snd_pcm_substream_chip(subs);
- vx_pipe_t *pipe = NULL;
+ vx_pipe_t *pipe = 0;
unsigned int audio;
int err;
pipe = snd_magic_cast(vx_pipe_t, subs->runtime->private_data, return -EINVAL);
if (--pipe->references == 0) {
- chip->playback_pipes[pipe->number] = NULL;
+ chip->playback_pipes[pipe->number] = 0;
vx_free_pipe(chip, pipe);
}
if (! subs->runtime->private_data)
return -EINVAL;
pipe = snd_magic_cast(vx_pipe_t, subs->runtime->private_data, return -EINVAL);
- chip->capture_pipes[pipe->number] = NULL;
+ chip->capture_pipes[pipe->number] = 0;
pipe_out_monitoring = pipe->monitoring_pipe;
if (pipe_out_monitoring) {
if (--pipe_out_monitoring->references == 0) {
vx_free_pipe(chip, pipe_out_monitoring);
- chip->playback_pipes[pipe->number] = NULL;
- pipe->monitoring_pipe = NULL;
+ chip->playback_pipes[pipe->number] = 0;
+ pipe->monitoring_pipe = 0;
}
}
chip->pcm[pcm->device] = NULL;
if (chip->playback_pipes) {
kfree(chip->playback_pipes);
- chip->playback_pipes = NULL;
+ chip->playback_pipes = 0;
}
if (chip->capture_pipes) {
kfree(chip->capture_pipes);
- chip->capture_pipes = NULL;
+ chip->capture_pipes = 0;
}
}
* Linux Video interface
*/
-static int snd_tea575x_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long data)
+static int snd_tea575x_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
tea575x_t *tea = video_get_drvdata(dev);
- void __user *arg = (void __user *)data;
switch(cmd) {
case VIDIOCGCAP:
}
}
+static int snd_tea575x_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(inode, file, cmd, arg, snd_tea575x_do_ioctl);
+}
+
/*
* initialize all the tea575x chips
*/
config SND_SB16_CSP
bool "Sound Blaster 16/AWE CSP support"
- depends on (SND_SB16 || SND_SBAWE) && (BROKEN || !PPC)
+ depends on SND_SB16 || SND_SBAWE
help
Say 'Y' to include support for CSP core. This special coprocessor
can do variable tasks like various compression and decompression
else
nblock->prev->next = nblock;
up(&alloc->memory_mutex);
- return NULL;
+ return 0;
}
pblock = pblock->next;
}
} gus_proc_private_t;
static long snd_gf1_mem_proc_dump(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
gus_proc_private_t *priv = snd_magic_cast(gus_proc_private_t, entry->private_data, return -ENXIO);
int err;
size = count;
- if (pos + size > priv->size)
- size = (long)priv->size - pos;
+ if (file->f_pos + size > priv->size)
+ size = (long)priv->size - file->f_pos;
if (size > 0) {
- if ((err = snd_gus_dram_read(gus, buf, pos, size, priv->rom)) < 0)
+ if ((err = snd_gus_dram_read(gus, buf, file->f_pos, size, priv->rom)) < 0)
return err;
+ file->f_pos += size;
return size;
}
return 0;
emu8k_pcm_t *rec = subs->runtime->private_data;
if (rec)
kfree(rec);
- subs->runtime->private_data = NULL;
+ subs->runtime->private_data = 0;
return 0;
}
runtime->hw.rate_max = 44100;
runtime->hw.channels_max = 2;
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_sb8_hw_constraint_rate_channels, NULL,
+ snd_sb8_hw_constraint_rate_channels, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_RATE, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_sb8_hw_constraint_channels_rate, NULL,
+ snd_sb8_hw_constraint_channels_rate, 0,
SNDRV_PCM_HW_PARAM_RATE, -1);
break;
case SB_HW_201:
#else
#define DPRINT(cond, args...) \
if ((dev->debug & (cond)) == (cond)) { \
- snd_printk (args); \
+ snd_printk (##args); \
}
#endif
#else
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
- { 0x0 }
+ { 0x0, 0x0 }
};
#define NEEDS_ACK 1
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned long) rbuf;
- rbuf = NULL;
+ rbuf = 0;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
+ if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, 0, wbuf)) == 0) {
dev->sample_status[sample_num] = WF_ST_EMPTY;
}
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, 0, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, 0, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
{
char rbuf[8];
- if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
+ if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, 0)) {
snd_printk ("can't get memory stats.\n");
return -1;
} else {
u16 sample_short;
u32 length;
- u16 __user *data_end = NULL;
+ u16 __user *data_end = 0;
unsigned int i;
const unsigned int max_blksize = 4096/2;
unsigned int written;
if (snd_wavefront_cmd (dev,
header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
- NULL, sample_hdr)) {
+ 0, sample_hdr)) {
snd_printk ("sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
blocksize = ((length-written+7)&~0x7);
}
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, 0, 0)) {
snd_printk ("download block "
"request refused.\n");
return -(EIO);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, 0, alias_hdr)) {
snd_printk ("download alias failed.\n");
return -(EIO);
}
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, 0, drumbuf)) {
snd_printk ("download drum failed.\n");
return -(EIO);
}
voices[0] = 32;
- if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, NULL, voices)) {
+ if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, 0, voices)) {
snd_printk ("cannot set number of voices to 32.\n");
goto gone_bad;
}
MODULE_PARM_DESC(wss,"change between ACI/WSS-mixer; use 0 and 1 - untested"
" default: do nothing; for PCM1-pro only");
-#ifdef DEBUG
+#if DEBUG
static void print_bits(unsigned char c)
{
int j;
static inline int aci_rawwrite(unsigned char byte)
{
if (busy_wait() >= 0) {
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "aci_rawwrite(%d)\n", byte);
#endif
outb(byte, COMMAND_REGISTER);
if (busy_wait() >= 0) {
byte=inb(STATUS_REGISTER);
-#ifdef DEBUG
+#if DEBUG
printk(KERN_DEBUG "%d = aci_rawread()\n", byte);
#endif
return byte;
} isapnp_ad1816_list[] __initdata = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A','D','S'), ISAPNP_FUNCTION(0x7150),
- NULL },
+ 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A','D','S'), ISAPNP_FUNCTION(0x7180),
- NULL },
+ 0 },
{0}
};
ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001),
ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000),
0, 0, 0, 1, 0},
- {NULL}
+ {0}
};
static struct isapnp_device_id id_table[] __devinitdata = {
{ "AC97_3D_CONTROL", 0x100 + AC97_3D_CONTROL, 16 },
{ "AC97_MODEM_RATE", 0x100 + AC97_MODEM_RATE, 16 },
{ "AC97_POWER_CONTROL", 0x100 + AC97_POWER_CONTROL, 16 },
- { NULL }
+ { 0 }
};
if (dev == NULL)
ssize_t ret = 0;
DECLARE_WAITQUEUE(wait, current);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
down(&state->sem);
#if 0
if (dmabuf->mapped) {
ad1889_set_wav_rate(ad1889_dev, 44100);
ad1889_set_wav_fmt(ad1889_dev, AFMT_S16_LE);
AD1889_WRITEW(ad1889_dev, AD_DSWADA, 0x0404); /* attenuation */
- return nonseekable_open(inode, file);
+ return 0;
}
static int ad1889_release(struct inode *inode, struct file *file)
if ((err = ad1889_ac97_init(dev, 0)) != 0)
goto err_free_dsp;
- if (((proc_root = proc_mkdir("driver/ad1889", NULL)) == NULL) ||
+ if (((proc_root = proc_mkdir("driver/ad1889", 0)) == NULL) ||
create_proc_read_entry("ac97", S_IFREG|S_IRUGO, proc_root, ac97_read_proc, dev->ac97_codec) == NULL ||
create_proc_read_entry("info", S_IFREG|S_IRUGO, proc_root, ad1889_read_proc, dev) == NULL)
goto err_free_dsp;
err_free_mem:
ad1889_free_dev(dev);
- pci_set_drvdata(pcidev, NULL);
+ pci_set_drvdata(pcidev, 0);
return -ENODEV;
}
size_t count, loff_t * ppos)
{
struct ali_state *state = (struct ali_state *) file->private_data;
- struct ali_card *card = state ? state->card : NULL;
+ struct ali_card *card = state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
#ifdef DEBUG2
printk("ali_audio: ali_read called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & DAC_RUNNING)
const char __user *buffer, size_t count, loff_t * ppos)
{
struct ali_state *state = (struct ali_state *) file->private_data;
- struct ali_card *card = state ? state->card : NULL;
+ struct ali_card *card = state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
#ifdef DEBUG2
printk("ali_audio: ali_write called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & ADC_RUNNING)
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
outl(0x00000000, card->iobase + ALI_INTERRUPTCR);
outl(0x00000000, card->iobase + ALI_INTERRUPTSR);
- return nonseekable_open(inode, file);
+ return 0;
}
static int ali_release(struct inode *inode, struct file *file)
if (card->ac97_codec[i] != NULL
&& card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
}
return -ENODEV;
static int au1000_open_mixdev(struct inode *inode, struct file *file)
{
file->private_data = &au1000_state;
- return nonseekable_open(inode, file);
+ return 0;
}
static int au1000_release_mixdev(struct inode *inode, struct file *file)
unsigned long flags;
int cnt, usercnt, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
dbg("write: count=%d", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int au1000_release(struct inode *inode, struct file *file)
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int cm_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
set_fmt(s, fmtm, fmts);
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int cm_release(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs4281: cs4281_open_mixdev()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
}
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
printk(KERN_INFO "cs4281: cs4281_open()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
ret = 0;
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ |
FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
tmp &= 0xFFFF;
tmp |= card->pctl;
CS_DBGOUT(CS_PARMS, 6, printk(
- "cs46xx: start_dac() poke card=%p tmp=0x%.08x addr=%p \n",
- card, (unsigned)tmp,
- card->ba1.idx[(BA1_PCTL >> 16) & 3]+(BA1_PCTL&0xffff) ) );
+ "cs46xx: start_dac() poke card=0x%.08x tmp=0x%.08x addr=0x%.08x \n",
+ (unsigned)card, (unsigned)tmp,
+ (unsigned)card->ba1.idx[(BA1_PCTL >> 16) & 3]+(BA1_PCTL&0xffff) ) );
cs461x_poke(card, BA1_PCTL, tmp);
}
spin_unlock_irqrestore(&card->lock, flags);
memset(dmabuf->rawbuf,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
(unsigned)hwptr);
- memset((char *)dmabuf->rawbuf +
- dmabuf->dmasize + hwptr - diff,
+ memset((void *)((unsigned)dmabuf->rawbuf +
+ dmabuf->dmasize + hwptr - diff),
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
diff - hwptr);
}
unsigned ptr;
int cnt;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
ret = 0;
unsigned ptr;
int cnt;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: CopySamples()+ ") );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
- " dst=%p src=%p count=%d fmt=0x%x\n",
- dst,src,count,fmt) );
+ " dst=0x%x src=0x%x count=%d fmt=0x%x\n",
+ (unsigned)dst,(unsigned)src,(unsigned)count,(unsigned)fmt) );
/*
* See if the data should be output as 8-bit unsigned stereo.
return -ENODEV;
dmabuf = &state->dmabuf;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
dmabuf->dmasize,dmabuf->count,buffer,ret) );
if (cs_copy_to_user(state, buffer,
- (char *)dmabuf->rawbuf + swptr, cnt, &copied))
+ (void *)((unsigned)dmabuf->rawbuf + swptr), cnt, &copied))
{
if (!ret) ret = -EFAULT;
goto out;
return -EFAULT;
dmabuf = &state->dmabuf;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
down(&state->sem);
if (dmabuf->mapped)
{
int ret = 0;
unsigned long size;
- CS_DBGOUT(CS_FUNCTION | CS_PARMS, 2, printk("cs46xx: cs_mmap()+ file=%p %s %s\n",
- file, vma->vm_flags & VM_WRITE ? "VM_WRITE" : "",
+ CS_DBGOUT(CS_FUNCTION | CS_PARMS, 2, printk("cs46xx: cs_mmap()+ file=0x%x %s %s\n",
+ (unsigned)file, vma->vm_flags & VM_WRITE ? "VM_WRITE" : "",
vma->vm_flags & VM_READ ? "VM_READ" : "") );
if (vma->vm_flags & VM_WRITE) {
* use the DAC only.
*/
state = card->states[1];
- if (!state) {
+ if(!(unsigned)state)
+ {
ret = -EINVAL;
goto out;
}
{
struct cs_card *card = (struct cs_card *)file->private_data;
struct cs_state *state;
- struct dmabuf *dmabuf=NULL;
+ struct dmabuf *dmabuf=0;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
int ret=0;
unsigned int tmp;
- CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=%p %s %s\n",
- file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
+ CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=0x%x %s %s\n",
+ (unsigned)file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
list_for_each(entry, &cs46xx_devs)
return ret;
}
CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n") );
- return nonseekable_open(inode, file);
+ return 0;
}
static int cs_release(struct inode *inode, struct file *file)
struct dmabuf *dmabuf;
struct cs_state *state;
unsigned int tmp;
- CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=%p %s %s\n",
- file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
+ CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=0x%x %s %s\n",
+ (unsigned)file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
{
unsigned int tmp;
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
- printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=%p\n",
- (unsigned)card->pm.flags,card));
+ printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=0x%x\n",
+ (unsigned)card->pm.flags,(unsigned)card));
/*
* check the current state, only suspend if IDLE
*/
CS_INC_USE_COUNT(&card->mixer_use_cnt);
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs46xx: cs_open_mixdev()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
static int cs_release_mixdev(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
"cs46xx: cs_ac97_init()- codec number %d not found\n",
num_ac97) );
- card->ac97_codec[num_ac97] = NULL;
+ card->ac97_codec[num_ac97] = 0;
break;
}
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
card->ac97_codec[num_ac97] = codec;
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
- "cs46xx: cs_ac97_init() ac97_codec[%d] set to %p\n",
+ "cs46xx: cs_ac97_init() ac97_codec[%d] set to 0x%x\n",
(unsigned int)num_ac97,
- codec));
+ (unsigned int)codec));
/* if there is no secondary codec at all, don't probe any more */
if (!ready_2nd)
{
card->ba1.name.reg = ioremap_nocache(card->ba1_addr + BA1_SP_REG, CS461X_BA1_REG_SIZE);
CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO
- "cs46xx: card=%p card->ba0=%p\n",card,card->ba0) );
+ "cs46xx: card=0x%x card->ba0=0x%.08x\n",(unsigned)card,(unsigned)card->ba0) );
CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO
- "cs46xx: card->ba1=%p %p %p %p\n",
- card->ba1.name.data0,
- card->ba1.name.data1,
- card->ba1.name.pmem,
- card->ba1.name.reg) );
+ "cs46xx: card->ba1=0x%.08x 0x%.08x 0x%.08x 0x%.08x\n",
+ (unsigned)card->ba1.name.data0,
+ (unsigned)card->ba1.name.data1,
+ (unsigned)card->ba1.name.pmem,
+ (unsigned)card->ba1.name.reg) );
if(card->ba0 == 0 || card->ba1.name.data0 == 0 ||
card->ba1.name.data1 == 0 || card->ba1.name.pmem == 0 ||
if (pmdev)
{
CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
- "cs46xx: probe() pm_register() succeeded (%p).\n",
- pmdev));
+ "cs46xx: probe() pm_register() succeeded (0x%x).\n",
+ (unsigned)pmdev));
pmdev->data = card;
}
else
{
CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 2, printk(KERN_INFO
- "cs46xx: probe() pm_register() failed (%p).\n",
- pmdev));
+ "cs46xx: probe() pm_register() failed (0x%x).\n",
+ (unsigned)pmdev));
card->pm.flags |= CS46XX_PM_NOT_REGISTERED;
}
- CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n",
- (unsigned)card->pm.flags,card));
+ CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=0x%x\n",
+ (unsigned)card->pm.flags,(unsigned)card));
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
"cs46xx: probe()- device allocated successfully\n"));
struct cs_card *card;
CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs46xx: cs46xx_pm_callback dev=%p rqst=0x%x card=%p\n",
- dev,(unsigned)rqst,data));
+ "cs46xx: cs46xx_pm_callback dev=0x%x rqst=0x%x card=%d\n",
+ (unsigned)dev,(unsigned)rqst,(unsigned)data));
card = (struct cs_card *) dev->data;
if (card) {
switch(rqst) {
*/
static int cs46xx_suspend_tbl(struct pci_dev *pcidev, u32 state);
static int cs46xx_resume_tbl(struct pci_dev *pcidev);
-#define cs_pm_register(a, b, c) NULL
+#define cs_pm_register(a, b, c) 0
#define cs_pm_unregister_all(a)
#define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl
#define CS46XX_RESUME_TBL cs46xx_resume_tbl
#ifndef _dmasound_h_
/*
- * linux/sound/oss/dmasound/dmasound.h
+ * linux/drivers/sound/dmasound/dmasound.h
*
*
* Minor numbers for the sound driver.
#define le2be16dbl(x) (((x)<<8 & 0xff00ff00) | ((x)>>8 & 0x00ff00ff))
#define IOCTL_IN(arg, ret) \
- do { int error = get_user(ret, (int __user *)(arg)); \
+ do { int error = get_user(ret, (int *)(arg)); \
if (error) return error; \
} while (0)
-#define IOCTL_OUT(arg, ret) ioctl_return((int __user *)(arg), ret)
+#define IOCTL_OUT(arg, ret) ioctl_return((int *)(arg), ret)
-static inline int ioctl_return(int __user *addr, int value)
+static inline int ioctl_return(int *addr, int value)
{
return value < 0 ? value : put_user(value, addr);
}
*/
typedef struct {
- ssize_t (*ct_ulaw)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_alaw)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s8)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u8)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s16be)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u16be)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s16le)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u16le)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_ulaw)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_alaw)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s8)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u8)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s16be)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u16be)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s16le)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u16le)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
} TRANS;
struct sound_settings {
/*
- * linux/sound/oss/dmasound/dmasound_atari.c
+ * linux/drivers/sound/dmasound/dmasound_atari.c
*
* Atari TT and Falcon DMA Sound Driver
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
/*
- * linux/sound/oss/dmasound/dmasound_awacs.c
+ * linux/drivers/sound/dmasound/dmasound_awacs.c
*
* PowerMac `AWACS' and `Burgundy' DMA Sound Driver
* with some limited support for DACA & Tumbler
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and
* history prior to 2001/01/26.
*
* 26/01/2001 ed 0.1 Iain Sandoe
#undef IOCTL_OUT
#define IOCTL_IN(arg, ret) \
- rc = get_user(ret, (int __user *)(arg)); \
+ rc = get_user(ret, (int *)(arg)); \
if (rc) break;
#define IOCTL_OUT(arg, ret) \
- ioctl_return2((int __user *)(arg), ret)
+ ioctl_return2((int *)(arg), ret)
-static inline int ioctl_return2(int __user *addr, int value)
+static inline int ioctl_return2(int *addr, int value)
{
return value < 0 ? value : put_user(value, addr);
}
write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol);
msleep(100);
if (gpio_headphone_irq) {
- if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",NULL) < 0) {
+ if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",0) < 0) {
printk(KERN_ERR "tumbler: Can't request headphone interrupt\n");
gpio_headphone_irq = 0;
} else {
val = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio_headphone_detect, 0);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio_headphone_detect, val | 0x80);
/* Trigger it */
- headphone_intr(0,NULL,NULL);
+ headphone_intr(0,0,0);
}
}
if (!gpio_headphone_irq) {
tas_dmasound_cleanup(void)
{
if (gpio_headphone_irq)
- free_irq(gpio_headphone_irq, NULL);
+ free_irq(gpio_headphone_irq, 0);
return 0;
}
static int
tas_mixer_ioctl(u_int cmd, u_long arg)
{
- int __user *argp = (int __user *)arg;
int data;
int rc;
if ((cmd & ~0xff) == MIXER_WRITE(0) &&
tas_supported_mixers() & (1<<(cmd & 0xff))) {
- rc = get_user(data, argp);
+ rc = get_user(data, (int *)(arg));
if (rc<0) return rc;
tas_set_mixer_level(cmd & 0xff, data);
tas_get_mixer_level(cmd & 0xff, &data);
- return ioctl_return2(argp, data);
+ return ioctl_return2((int *)(arg), data);
}
if ((cmd & ~0xff) == MIXER_READ(0) &&
tas_supported_mixers() & (1<<(cmd & 0xff))) {
tas_get_mixer_level(cmd & 0xff, &data);
- return ioctl_return2(argp, data);
+ return ioctl_return2((int *)(arg), data);
}
switch(cmd) {
static int __init PMacIrqInit(void)
{
if (awacs)
- if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", NULL))
+ if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", 0))
return 0;
- if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", NULL)
- || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", NULL))
+ if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", 0)
+ || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", 0))
return 0;
return 1;
}
msleep(200);
}
if (awacs)
- free_irq(awacs_irq, NULL);
- free_irq(awacs_tx_irq, NULL);
- free_irq(awacs_rx_irq, NULL);
+ free_irq(awacs_irq, 0);
+ free_irq(awacs_tx_irq, 0);
+ free_irq(awacs_rx_irq, 0);
if (awacs)
iounmap((void *)awacs);
write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol);
msleep(150);
tas_leave_sleep(); /* Stub for now */
- headphone_intr(0,NULL,NULL);
+ headphone_intr(0,0,0);
break;
case AWACS_DACA:
msleep(10); /* Check this !!! */
sound_device_id = 0;
/* device ID appears post g3 b&w */
- prop = (unsigned int *)get_property(info, "device-id", NULL);
+ prop = (unsigned int *)get_property(info, "device-id", 0);
if (prop != 0)
sound_device_id = *prop;
} else if (is_pbook_g3) {
struct device_node* mio;
- macio_base = NULL;
+ macio_base = 0;
for (mio = io->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0
&& mio->n_addrs > 0) {
/*
- * linux/sound/oss/dmasound/dmasound_core.c
+ * linux/drivers/sound/dmasound/dmasound_core.c
*
*
* OSS/Free compatible Atari TT/Falcon and Amiga DMA sound driver for
return stereo;
}
-static ssize_t sound_copy_translate(TRANS *trans, const u_char __user *userPtr,
+static ssize_t sound_copy_translate(TRANS *trans, const u_char *userPtr,
size_t userCount, u_char frame[],
ssize_t *frameUsed, ssize_t frameLeft)
{
- ssize_t (*ct_func)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_func)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
switch (dmasound.soft.format) {
case AFMT_MU_LAW:
strlcpy(info.id, dmasound.mach.name2, sizeof(info.id));
strlcpy(info.name, dmasound.mach.name2, sizeof(info.name));
info.modify_counter = mixer.modify_counter;
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ if (copy_to_user((int *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
while (i--)
dmasound.mach.dma_free(sq->buffers[i], size);
kfree(sq->buffers);
- sq->buffers = NULL;
+ sq->buffers = 0;
return -ENOMEM;
}
}
static int sq_setup(struct sound_queue *sq)
{
- int (*setup_func)(void) = NULL;
+ int (*setup_func)(void) = 0;
int hard_frame ;
if (sq->locked) { /* are we already set? - and not changeable */
dmasound.mach.play();
}
-static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft,
+static ssize_t sq_write(struct file *file, const char *src, size_t uLeft,
loff_t *ppos)
{
ssize_t uWritten = 0;
* it and restart the DMA.
*/
-static ssize_t sq_read(struct file *file, char __user *dst, size_t uLeft,
+static ssize_t sq_read(struct file *file, char *dst, size_t uLeft,
loff_t *ppos)
{
info.fragstotal = write_sq.max_active;
info.fragsize = write_sq.user_frag_size;
info.bytes = info.fragments * info.fragsize;
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ if (copy_to_user((void *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
} else
return 0;
}
-static ssize_t state_read(struct file *file, char __user *buf, size_t count,
+static ssize_t state_read(struct file *file, char *buf, size_t count,
loff_t *ppos)
{
int n = state.len - state.ptr;
/*
- * linux/sound/oss/dmasound/dmasound_paula.c
+ * linux/drivers/sound/dmasound/dmasound_paula.c
*
* Amiga `Paula' DMA Sound Driver
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
/*
- * linux/sound/oss/dmasound/dmasound_q40.c
+ * linux/drivers/sound/dmasound/dmasound_q40.c
*
* Q40 DMA Sound Driver
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
{
int rc;
struct tas_biquad_ctrl_t biquad;
- void __user *argp = (void __user *)arg;
- if (copy_from_user(&biquad, argp, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_from_user((void *)&biquad, (const void *)arg, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3001c_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(argp, &biquad, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_to_user((void *)arg, (const void *)&biquad, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
int i,j;
char sync_required[2][6];
struct tas_biquad_ctrl_t biquad;
- struct tas_biquad_ctrl_list_t __user *argp = (void __user *)arg;
memset(sync_required,0,sizeof(sync_required));
- if (copy_from_user(&filter_count, &argp->filter_count, sizeof(int)))
+ if (copy_from_user((void *)&filter_count,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,filter_count),
+ sizeof(int))) {
return -EFAULT;
+ }
- if (copy_from_user(&flags, &argp->flags, sizeof(int)))
+ if (copy_from_user((void *)&flags,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,flags),
+ sizeof(int))) {
return -EFAULT;
+ }
if (cmd & SIOC_IN) {
}
for (i=0; i < filter_count; i++) {
- if (copy_from_user(&biquad, &argp->biquads[i],
+ if (copy_from_user((void *)&biquad,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3001c_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(&argp->biquads[i], &biquad,
+ if (copy_to_user((void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
+ (const void *)&biquad,
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
{
int rc;
struct tas_drce_ctrl_t drce_ctrl;
- void __user *argp = (void __user *)arg;
- if (copy_from_user(&drce_ctrl, argp, sizeof(struct tas_drce_ctrl_t)))
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
+ sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
+ }
#ifdef DEBUG_DRCE
printk("DRCE IOCTL: input [ FLAGS:%x ENABLE:%x THRESH:%x\n",
if (drce_ctrl.flags & TAS_DRCE_THRESHOLD)
drce_ctrl.data.threshold = self->drce_state.threshold;
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
u_int cmd,
u_long arg)
{
- uint __user *argp = (void __user *)arg;
switch (cmd) {
case TAS_READ_EQ:
case TAS_WRITE_EQ:
return tas3001c_eq_list_rw(self, cmd, arg);
case TAS_READ_EQ_FILTER_COUNT:
- put_user(TAS3001C_BIQUAD_FILTER_COUNT, argp);
+ put_user(TAS3001C_BIQUAD_FILTER_COUNT, (uint *)(arg));
return 0;
case TAS_READ_EQ_CHANNEL_COUNT:
- put_user(TAS3001C_BIQUAD_CHANNEL_COUNT, argp);
+ put_user(TAS3001C_BIQUAD_CHANNEL_COUNT, (uint *)(arg));
return 0;
case TAS_READ_DRCE:
return tas3001c_drce_rw(self, cmd, arg);
case TAS_READ_DRCE_CAPS:
- put_user(TAS_DRCE_ENABLE | TAS_DRCE_THRESHOLD, argp);
+ put_user(TAS_DRCE_ENABLE | TAS_DRCE_THRESHOLD, (uint *)(arg));
return 0;
case TAS_READ_DRCE_MIN:
case TAS_READ_DRCE_MAX: {
struct tas_drce_ctrl_t drce_ctrl;
- if (copy_from_user(&drce_ctrl, argp,
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
}
}
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
static struct tas_drce_t eqp_0e_2_1_drce = {
.enable = 1,
- .above = { .val = 3.0 * (1<<8), .expand = 0 },
- .below = { .val = 1.0 * (1<<8), .expand = 0 },
- .threshold = -15.33 * (1<<8),
- .energy = 2.4 * (1<<12),
- .attack = 0.013 * (1<<12),
- .decay = 0.212 * (1<<12),
+ .above { .val = 3.0 * (1<<8), .expand = 0 },
+ .below { .val = 1.0 * (1<<8), .expand = 0 },
+ .threshold -15.33 * (1<<8),
+ .energy 2.4 * (1<<12),
+ .attack 0.013 * (1<<12),
+ .decay 0.212 * (1<<12),
};
static struct tas_biquad_ctrl_t eqp_0e_2_1_biquads[]={
u_int cmd,
u_long arg)
{
- void __user *argp = (void __user *)arg;
int rc;
struct tas_biquad_ctrl_t biquad;
- if (copy_from_user((void *)&biquad, argp, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_from_user((void *)&biquad, (const void *)arg, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3004_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(argp, &biquad, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_to_user((void *)arg, (const void *)&biquad, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
int i,j;
char sync_required[TAS3004_BIQUAD_CHANNEL_COUNT][TAS3004_BIQUAD_FILTER_COUNT];
struct tas_biquad_ctrl_t biquad;
- struct tas_biquad_ctrl_list_t __user *argp = (void __user *)arg;
memset(sync_required,0,sizeof(sync_required));
- if (copy_from_user(&filter_count, &argp->filter_count, sizeof(int)))
+ if (copy_from_user((void *)&filter_count,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,filter_count),
+ sizeof(int))) {
return -EFAULT;
+ }
- if (copy_from_user(&flags, &argp->flags, sizeof(int)))
+ if (copy_from_user((void *)&flags,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,flags),
+ sizeof(int))) {
return -EFAULT;
+ }
if (cmd & SIOC_IN) {
}
for (i=0; i < filter_count; i++) {
- if (copy_from_user(&biquad, &argp->biquads[i],
+ if (copy_from_user((void *)&biquad,
+ (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3004_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user(&argp->biquads[i], &biquad,
+ if (copy_to_user((void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
+ (const void *)&biquad,
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
{
int rc;
struct tas_drce_ctrl_t drce_ctrl;
- void __user *argp = (void __user *)arg;
- if (copy_from_user(&drce_ctrl, argp, sizeof(struct tas_drce_ctrl_t)))
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
+ sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
+ }
#ifdef DEBUG_DRCE
printk("DRCE: input [ FLAGS:%x ENABLE:%x ABOVE:%x/%x BELOW:%x/%x THRESH:%x ENERGY:%x ATTACK:%x DECAY:%x\n",
if (drce_ctrl.flags & TAS_DRCE_DECAY)
drce_ctrl.data.decay = self->drce_state.decay;
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
u_int cmd,
u_long arg)
{
- uint __user *argp = (void __user *)arg;
switch (cmd) {
case TAS_READ_EQ:
case TAS_WRITE_EQ:
return tas3004_eq_list_rw(self, cmd, arg);
case TAS_READ_EQ_FILTER_COUNT:
- put_user(TAS3004_BIQUAD_FILTER_COUNT, argp);
+ put_user(TAS3004_BIQUAD_FILTER_COUNT, (uint *)(arg));
return 0;
case TAS_READ_EQ_CHANNEL_COUNT:
- put_user(TAS3004_BIQUAD_CHANNEL_COUNT, argp);
+ put_user(TAS3004_BIQUAD_CHANNEL_COUNT, (uint *)(arg));
return 0;
case TAS_READ_DRCE:
TAS_DRCE_ENERGY |
TAS_DRCE_ATTACK |
TAS_DRCE_DECAY,
- argp);
+ (uint *)(arg));
return 0;
case TAS_READ_DRCE_MIN:
struct tas_drce_ctrl_t drce_ctrl;
const struct tas_drce_t *drce_copy;
- if (copy_from_user(&drce_ctrl, argp,
+ if (copy_from_user((void *)&drce_ctrl,
+ (const void *)arg,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
drce_ctrl.data.decay=drce_copy->decay;
}
- if (copy_to_user(argp, &drce_ctrl,
+ if (copy_to_user((void *)arg,
+ (const void *)&drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
/*
- * linux/sound/oss/dmasound/trans_16.c
+ * linux/drivers/sound/dmasound/trans_16.c
*
* 16 bit translation routines. Only used by Power mac at present.
*
- * See linux/sound/oss/dmasound/dmasound_core.c for copyright and
+ * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and
* history prior to 08/02/2001.
*
* 08/02/2001 Iain Sandoe
static short dmasound_alaw2dma16[] ;
static short dmasound_ulaw2dma16[] ;
-static ssize_t pmac_ct_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static int expand_data; /* Data for expanding */
-static ssize_t pmac_ct_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
userCount >>= (stereo? 2: 1);
used = count = min_t(unsigned long, userCount, frameLeft);
if (!stereo) {
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
while (count > 0) {
short data;
if (get_user(data, up++))
return stereo? used * 4: used * 2;
}
-static ssize_t pmac_ct_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
}
-static ssize_t pmac_ctx_law(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_law(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
return stereo? utotal * 2: utotal;
}
-static ssize_t pmac_ctx_s8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ctx_u8(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ctx_s16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
unsigned int *p = (unsigned int *) &frame[*frameUsed];
unsigned int data = expand_data;
- unsigned short __user *up = (unsigned short __user *) userPtr;
+ unsigned short *up = (unsigned short *) userPtr;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int stereo = dmasound.soft.stereo;
}
-static ssize_t pmac_ctx_u16(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
unsigned int *p = (unsigned int *) &frame[*frameUsed];
unsigned int data = expand_data;
- unsigned short __user *up = (unsigned short __user *) userPtr;
+ unsigned short *up = (unsigned short *) userPtr;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int stereo = dmasound.soft.stereo;
/* data in routines... */
-static ssize_t pmac_ct_s8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
val = *p++;
val = (val * software_input_volume) >> 7;
data = val >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
val = *p;
val = (val * software_input_volume) >> 7;
data = val >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
p++;
}
-static ssize_t pmac_ct_u8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
val = *p++;
val = (val * software_input_volume) >> 7;
data = (val >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
val = *p;
val = (val * software_input_volume) >> 7;
data = (val >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
p++;
return stereo? used * 2: used;
}
-static ssize_t pmac_ct_s16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
return stereo? used * 4: used * 2;
}
-static ssize_t pmac_ct_u16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
/* data in routines (reducing speed)... */
-static ssize_t pmac_ctx_s8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
p++;
if (bal < 0) {
data = vall >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
data = valr >> 8;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
userCount--;
}
-static ssize_t pmac_ctx_u8_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
p++;
if (bal < 0) {
data = (vall >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
if (stereo) {
data = (valr >> 8) ^ 0x80;
- if (put_user(data, (u_char __user *)userPtr++))
+ if (put_user(data, (u_char *)userPtr++))
return -EFAULT;
}
userCount--;
return stereo? utotal * 2: utotal;
}
-static ssize_t pmac_ctx_s16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int bal = expand_read_bal;
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
int stereo = dmasound.soft.stereo;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
return stereo? utotal * 4: utotal * 2;
}
-static ssize_t pmac_ctx_u16_read(const u_char __user *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16_read(const u_char *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int bal = expand_read_bal;
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
short *fp = (short *) &frame[*frameUsed];
- short __user *up = (short __user *) userPtr;
+ short *up = (short *) userPtr;
int stereo = dmasound.soft.stereo;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
DPD(3, "emu10k1_audio_read(), buffer=%p, count=%d\n", buffer, (u32) count);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
DPD(3, "emu10k1_audio_write(), buffer=%p, count=%d\n", buffer, (u32) count);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
file->private_data = (void *) wave_dev;
- return nonseekable_open(inode, file);
+ return 0;
}
static int emu10k1_audio_release(struct inode *inode, struct file *file)
{
char s[48];
- if (!proc_mkdir ("driver/emu10k1", NULL)) {
+ if (!proc_mkdir ("driver/emu10k1", 0)) {
printk(KERN_ERR "emu10k1: unable to create proc directory driver/emu10k1\n");
goto err_out;
}
sprintf(s, "driver/emu10k1/%s", pci_name(card->pci_dev));
- if (!proc_mkdir (s, NULL)) {
+ if (!proc_mkdir (s, 0)) {
printk(KERN_ERR "emu10k1: unable to create proc directory %s\n", s);
goto err_emu10k1_proc;
}
sprintf(s, "driver/emu10k1/%s/info", pci_name(card->pci_dev));
- if (!create_proc_read_entry (s, 0, NULL, emu10k1_info_proc, card)) {
+ if (!create_proc_read_entry (s, 0, 0, emu10k1_info_proc, card)) {
printk(KERN_ERR "emu10k1: unable to create proc entry %s\n", s);
goto err_dev_proc;
}
if (!card->is_aps) {
sprintf(s, "driver/emu10k1/%s/ac97", pci_name(card->pci_dev));
- if (!create_proc_read_entry (s, 0, NULL, ac97_read_proc, card->ac97)) {
+ if (!create_proc_read_entry (s, 0, 0, ac97_read_proc, card->ac97)) {
printk(KERN_ERR "emu10k1: unable to create proc entry %s\n", s);
goto err_proc_ac97;
}
up(&card->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int emu10k1_midi_release(struct inode *inode, struct file *file)
DPD(4, "emu10k1_midi_read(), count %#x\n", (u32) count);
+ if (pos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
DPD(4, "emu10k1_midi_write(), count=%#x\n", (u32) count);
+ if (pos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (card->seq_mididev) {
kfree(card->seq_mididev);
- card->seq_mididev = NULL;
+ card->seq_mididev = 0;
}
}
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac2.mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac1.mapped)
return -ENXIO;
if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= FMODE_DAC;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_release_dac(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1370_midi_release(struct inode *inode, struct file *file)
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac2.mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac1.mapped)
return -ENXIO;
if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= FMODE_DAC;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_release_dac(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int es1371_midi_release(struct inode *inode, struct file *file)
return -ENODEV;
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int solo1_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
prog_codec(s);
- return nonseekable_open(inode, file);
+ return 0;
}
static /*const*/ struct file_operations solo1_audio_fops = {
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int solo1_midi_release(struct inode *inode, struct file *file)
outb(1, s->sbbase+3); /* enable OPL3 */
s->open_mode |= FMODE_DMFM;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int solo1_dmfm_release(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_READ)
forte_channel_init (forte, &forte->rec);
- return nonseekable_open(inode, file);
+ return 0;
}
unsigned int i = bytes, sz = 0;
unsigned long flags;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok (VERIFY_READ, buffer, bytes))
return -EFAULT;
unsigned int i = bytes, sz;
unsigned long flags;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (!access_ok (VERIFY_WRITE, buffer, bytes))
return -EFAULT;
static int __init
forte_proc_init (void)
{
- if (!proc_mkdir ("driver/forte", NULL))
+ if (!proc_mkdir ("driver/forte", 0))
return -EIO;
- if (!create_proc_read_entry ("driver/forte/chip", 0, NULL, forte_proc_read, forte)) {
+ if (!create_proc_read_entry ("driver/forte/chip", 0, 0, forte_proc_read, forte)) {
remove_proc_entry ("driver/forte", NULL);
return -EIO;
}
- if (!create_proc_read_entry("driver/forte/ac97", 0, NULL, ac97_read_proc, forte->ac97)) {
+ if (!create_proc_read_entry("driver/forte/ac97", 0, 0, ac97_read_proc, forte->ac97)) {
remove_proc_entry ("driver/forte/chip", NULL);
remove_proc_entry ("driver/forte", NULL);
return -EIO;
{
gus_wave_init(hw_config);
+ request_region(hw_config->io_base, 16, "GUS");
+ request_region(hw_config->io_base + 0x100, 12, "GUS"); /* 0x10c-> is MAX */
+
if (sound_alloc_dma(hw_config->dma, "GUS"))
printk(KERN_ERR "gus_card.c: Can't allocate DMA channel %d\n", hw_config->dma);
if (hw_config->dma2 != -1 && hw_config->dma2 != hw_config->dma)
printk(KERN_ERR "GUS: Unsupported IRQ %d\n", irq);
return 0;
}
- if (gus_wave_detect(hw_config->io_base))
+ if (check_region(hw_config->io_base, 16))
+ printk(KERN_ERR "GUS: I/O range conflict (1)\n");
+ else if (check_region(hw_config->io_base + 0x100, 16))
+ printk(KERN_ERR "GUS: I/O range conflict (2)\n");
+ else if (gus_wave_detect(hw_config->io_base))
return 1;
#ifndef EXCLUDE_GUS_IODETECT
* Look at the possible base addresses (0x2X0, X=1, 2, 3, 4, 5, 6)
*/
- for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10) {
- if (io_addr == hw_config->io_base) /* Already tested */
- continue;
- if (gus_wave_detect(io_addr)) {
- hw_config->io_base = io_addr;
- return 1;
- }
- }
+ for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10)
+ if (io_addr != hw_config->io_base) /*
+ * Already tested
+ */
+ if (!check_region(io_addr, 16))
+ if (!check_region(io_addr + 0x100, 16))
+ if (gus_wave_detect(io_addr))
+ {
+ hw_config->io_base = io_addr;
+ return 1;
+ }
#endif
printk("NO GUS card found !\n");
unsigned long loc;
unsigned char val;
- if (!request_region(baseaddr, 16, "GUS"))
- return 0;
- if (!request_region(baseaddr + 0x100, 12, "GUS")) { /* 0x10c-> is MAX */
- release_region(baseaddr, 16);
- return 0;
- }
-
gus_base = baseaddr;
gus_write8(0x4c, 0); /* Reset GF1 */
/* See if there is first block there.... */
gus_poke(0L, 0xaa);
- if (gus_peek(0L) != 0xaa) {
- release_region(baseaddr + 0x100, 12);
- release_region(baseaddr, 16);
- return 0;
- }
+ if (gus_peek(0L) != 0xaa)
+ return (0);
/* Now zero it out so that I can check for mirroring .. */
gus_poke(0L, 0x00);
if (hal2) {
file->private_data = hal2;
- return nonseekable_open(inode, file);
+ return 0;
}
return -ENODEV;
}
if (!count)
return 0;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (down_interruptible(&adc->sem))
return -EINTR;
if (file->f_flags & O_NONBLOCK) {
if (!count)
return 0;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (down_interruptible(&dac->sem))
return -EINTR;
if (file->f_flags & O_NONBLOCK) {
dac->usecount++;
}
- return nonseekable_open(inode, file);
+ return 0;
}
static int hal2_release(struct inode *inode, struct file *file)
/* extract register offset from codec struct */
#define IO_REG_OFF(codec) (((struct i810_card *) codec->private_data)->ac97_id_map[codec->id])
-#define I810_IOREAD(size, type, card, off) \
-({ \
- type val; \
- if (card->use_mmio) \
- val=read##size(card->iobase_mmio+off); \
- else \
- val=in##size(card->iobase+off); \
- val; \
-})
-
-#define I810_IOREADL(card, off) I810_IOREAD(l, u32, card, off)
-#define I810_IOREADW(card, off) I810_IOREAD(w, u16, card, off)
-#define I810_IOREADB(card, off) I810_IOREAD(b, u8, card, off)
-
-#define I810_IOWRITE(size, val, card, off) \
-({ \
- if (card->use_mmio) \
- write##size(val, card->iobase_mmio+off); \
- else \
- out##size(val, card->iobase+off); \
-})
-
-#define I810_IOWRITEL(val, card, off) I810_IOWRITE(l, val, card, off)
-#define I810_IOWRITEW(val, card, off) I810_IOWRITE(w, val, card, off)
-#define I810_IOWRITEB(val, card, off) I810_IOWRITE(b, val, card, off)
-
-#define GET_CIV(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_CIV), SG_LEN)
-#define GET_LVI(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_LVI), SG_LEN)
+#define GET_CIV(port) MODULOP2(inb((port) + OFF_CIV), SG_LEN)
+#define GET_LVI(port) MODULOP2(inb((port) + OFF_LVI), SG_LEN)
/* set LVI from CIV */
-#define CIV_TO_LVI(card, port, off) \
- I810_IOWRITEB(MODULOP2(GET_CIV((card), (port)) + (off), SG_LEN), (card), (port) + OFF_LVI)
+#define CIV_TO_LVI(port, off) \
+ outb(MODULOP2(GET_CIV((port)) + (off), SG_LEN), (port) + OFF_LVI)
static struct i810_card *devs = NULL;
return 0;
if (rec)
- port = dmabuf->read_channel->port;
+ port = state->card->iobase + dmabuf->read_channel->port;
else
- port = dmabuf->write_channel->port;
+ port = state->card->iobase + dmabuf->write_channel->port;
if(state->card->pci_id == PCI_DEVICE_ID_SI_7012) {
port_picb = port + OFF_SR;
port_picb = port + OFF_PICB;
do {
- civ = GET_CIV(state->card, port);
- offset = I810_IOREADW(state->card, port_picb);
+ civ = GET_CIV(port);
+ offset = inw(port_picb);
/* Must have a delay here! */
if(offset == 0)
udelay(1);
* that we won't have to worry about the chip still being
* out of sync with reality ;-)
*/
- } while (civ != GET_CIV(state->card, port) || offset != I810_IOREADW(state->card, port_picb));
+ } while (civ != GET_CIV(port) || offset != inw(port_picb));
return (((civ + 1) * dmabuf->fragsize - (bytes * offset))
% dmabuf->dmasize);
struct i810_card *card = state->card;
dmabuf->enable &= ~ADC_RUNNING;
- I810_IOWRITEB(0, card, PI_CR);
+ outb(0, card->iobase + PI_CR);
// wait for the card to acknowledge shutdown
- while( I810_IOREADB(card, PI_CR) != 0 ) ;
+ while( inb(card->iobase + PI_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEB( I810_IOREADB(card, PI_PICB), card, PI_PICB );
+ outb( inb(card->iobase + PI_PICB), card->iobase + PI_PICB );
else
- I810_IOWRITEB( I810_IOREADB(card, PI_SR), card, PI_SR );
- I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PI, card, GLOB_STA);
+ outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
+ outl( inl(card->iobase + GLOB_STA) & INT_PI, card->iobase + GLOB_STA);
}
static void stop_adc(struct i810_state *state)
(dmabuf->trigger & PCM_ENABLE_INPUT)) {
dmabuf->enable |= ADC_RUNNING;
// Interrupt enable, LVI enable, DMA enable
- I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PI_CR);
+ outb(0x10 | 0x04 | 0x01, state->card->iobase + PI_CR);
}
}
struct i810_card *card = state->card;
dmabuf->enable &= ~DAC_RUNNING;
- I810_IOWRITEB(0, card, PO_CR);
+ outb(0, card->iobase + PO_CR);
// wait for the card to acknowledge shutdown
- while( I810_IOREADB(card, PO_CR) != 0 ) ;
+ while( inb(card->iobase + PO_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEB( I810_IOREADB(card, PO_PICB), card, PO_PICB );
+ outb( inb(card->iobase + PO_PICB), card->iobase + PO_PICB );
else
- I810_IOWRITEB( I810_IOREADB(card, PO_SR), card, PO_SR );
- I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PO, card, GLOB_STA);
+ outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
+ outl( inl(card->iobase + GLOB_STA) & INT_PO, card->iobase + GLOB_STA);
}
static void stop_dac(struct i810_state *state)
(dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
dmabuf->enable |= DAC_RUNNING;
// Interrupt enable, LVI enable, DMA enable
- I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PO_CR);
+ outb(0x10 | 0x04 | 0x01, state->card->iobase + PO_CR);
}
}
static void start_dac(struct i810_state *state)
sg++;
}
spin_lock_irqsave(&state->card->lock, flags);
- I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
- while( I810_IOREADB(state->card, c->port+OFF_CR) & 0x02 ) ;
- I810_IOWRITEL((u32)state->card->chandma +
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ while( inb(state->card->iobase+c->port+OFF_CR) & 0x02 ) ;
+ outl((u32)state->card->chandma +
c->num*sizeof(struct i810_channel),
- state->card, c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card, c->port, 0);
+ state->card->iobase+c->port+OFF_BDBAR);
+ CIV_TO_LVI(state->card->iobase+c->port, 0);
spin_unlock_irqrestore(&state->card->lock, flags);
void (*start)(struct i810_state *);
count = dmabuf->count;
+ port = state->card->iobase;
if (rec) {
- port = dmabuf->read_channel->port;
+ port += dmabuf->read_channel->port;
trigger = PCM_ENABLE_INPUT;
start = __start_adc;
count = dmabuf->dmasize - count;
} else {
- port = dmabuf->write_channel->port;
+ port += dmabuf->write_channel->port;
trigger = PCM_ENABLE_OUTPUT;
start = __start_dac;
}
return;
start(state);
- while (!(I810_IOREADB(state->card, port + OFF_CR) & ((1<<4) | (1<<2))))
+ while (!(inb(port + OFF_CR) & ((1<<4) | (1<<2))))
;
}
/* MASKP2(swptr, fragsize) - 1 is the tail of our transfer */
x = MODULOP2(MASKP2(dmabuf->swptr, fragsize) - 1, dmabuf->dmasize);
x >>= dmabuf->fragshift;
- I810_IOWRITEB(x, state->card, port + OFF_LVI);
+ outb(x, port + OFF_LVI);
}
static void i810_update_lvi(struct i810_state *state, int rec)
/* this is normal for the end of a read */
/* only give an error if we went past the */
/* last valid sg entry */
- if (GET_CIV(state->card, PI_BASE) !=
- GET_LVI(state->card, PI_BASE)) {
+ if (GET_CIV(state->card->iobase + PI_BASE) !=
+ GET_LVI(state->card->iobase + PI_BASE)) {
printk(KERN_WARNING "i810_audio: DMA overrun on read\n");
dmabuf->error++;
}
/* this is normal for the end of a write */
/* only give an error if we went past the */
/* last valid sg entry */
- if (GET_CIV(state->card, PO_BASE) !=
- GET_LVI(state->card, PO_BASE)) {
+ if (GET_CIV(state->card->iobase + PO_BASE) !=
+ GET_LVI(state->card->iobase + PO_BASE)) {
printk(KERN_WARNING "i810_audio: DMA overrun on write\n");
printk("i810_audio: CIV %d, LVI %d, hwptr %x, "
"count %d\n",
- GET_CIV(state->card, PO_BASE),
- GET_LVI(state->card, PO_BASE),
+ GET_CIV(state->card->iobase + PO_BASE),
+ GET_LVI(state->card->iobase + PO_BASE),
dmabuf->hwptr, dmabuf->count);
dmabuf->error++;
}
struct i810_state *state = card->states[i];
struct i810_channel *c;
struct dmabuf *dmabuf;
- unsigned long port;
+ unsigned long port = card->iobase;
u16 status;
if(!state)
} else /* This can occur going from R/W to close */
continue;
- port = c->port;
+ port+=c->port;
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- status = I810_IOREADW(card, port + OFF_PICB);
+ status = inw(port + OFF_PICB);
else
- status = I810_IOREADW(card, port + OFF_SR);
+ status = inw(port + OFF_SR);
#ifdef DEBUG_INTERRUPTS
printk("NUM %d PORT %X IRQ ( ST%d ", c->num, c->port, status);
if(dmabuf->enable & ADC_RUNNING)
count = dmabuf->dmasize - count;
if (count >= (int)dmabuf->fragsize) {
- I810_IOWRITEB(I810_IOREADB(card, port+OFF_CR) | 1, card, port+OFF_CR);
+ outb(inb(port+OFF_CR) | 1, port+OFF_CR);
#ifdef DEBUG_INTERRUPTS
printk(" CONTINUE ");
#endif
}
}
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_PICB);
+ outw(status & DMA_INT_MASK, port + OFF_PICB);
else
- I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_SR);
+ outw(status & DMA_INT_MASK, port + OFF_SR);
}
#ifdef DEBUG_INTERRUPTS
printk(")\n");
spin_lock(&card->lock);
- status = I810_IOREADL(card, GLOB_STA);
+ status = inl(card->iobase + GLOB_STA);
if(!(status & INT_MASK))
{
i810_channel_interrupt(card);
/* clear 'em */
- I810_IOWRITEL(status & INT_MASK, card, GLOB_STA);
+ outl(status & INT_MASK, card->iobase + GLOB_STA);
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static ssize_t i810_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : NULL;
+ struct i810_card *card=state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
printk("i810_audio: i810_read called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & DAC_RUNNING)
static ssize_t i810_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : NULL;
+ struct i810_card *card=state ? state->card : 0;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
printk("i810_audio: i810_write called, count = %d\n", count);
#endif
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & ADC_RUNNING)
__stop_adc(state);
}
if (c != NULL) {
- I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
- while ( I810_IOREADB(state->card, c->port+OFF_CR) & 2 )
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ while ( inb(state->card->iobase+c->port+OFF_CR) & 2 )
cpu_relax();
- I810_IOWRITEL((u32)state->card->chandma +
+ outl((u32)state->card->chandma +
c->num*sizeof(struct i810_channel),
- state->card, c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card, c->port, 0);
+ state->card->iobase+c->port+OFF_BDBAR);
+ CIV_TO_LVI(state->card->iobase+c->port, 0);
}
spin_unlock_irqrestore(&state->card->lock, flags);
/* Global Status and Global Control register are now */
/* used to indicate this. */
- i_glob_cnt = I810_IOREADL(state->card, GLOB_CNT);
+ i_glob_cnt = inl(state->card->iobase + GLOB_CNT);
/* Current # of channels enabled */
if ( i_glob_cnt & 0x0100000 )
switch ( val ) {
case 2: /* 2 channels is always supported */
- I810_IOWRITEL(i_glob_cnt & 0xffcfffff,
- state->card, GLOB_CNT);
+ outl(i_glob_cnt & 0xffcfffff,
+ state->card->iobase + GLOB_CNT);
/* Do we need to change mixer settings???? */
break;
case 4: /* Supported on some chipsets, better check first */
if ( state->card->channels >= 4 ) {
- I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x100000,
- state->card, GLOB_CNT);
+ outl((i_glob_cnt & 0xffcfffff) | 0x100000,
+ state->card->iobase + GLOB_CNT);
/* Do we need to change mixer settings??? */
} else {
val = ret;
break;
case 6: /* Supported on some chipsets, better check first */
if ( state->card->channels >= 6 ) {
- I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x200000,
- state->card, GLOB_CNT);
+ outl((i_glob_cnt & 0xffcfffff) | 0x200000,
+ state->card->iobase + GLOB_CNT);
/* Do we need to change mixer settings??? */
} else {
val = ret;
} else {
i810_set_dac_rate(state, 8000);
/* Put the ACLink in 2 channel mode by default */
- i = I810_IOREADL(card, GLOB_CNT);
- I810_IOWRITEL(i & 0xffcfffff, card, GLOB_CNT);
+ i = inl(card->iobase + GLOB_CNT);
+ outl(i & 0xffcfffff, card->iobase + GLOB_CNT);
}
}
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
- return nonseekable_open(inode, file);
+ return 0;
}
static int i810_release(struct inode *inode, struct file *file)
int count = 100;
u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
- while(count-- && (I810_IOREADB(card, CAS) & 1))
+ while(count-- && (inb(card->iobase + CAS) & 1))
udelay(1);
return inw(card->ac97base + reg_set);
int count = 100;
u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
- while(count-- && (I810_IOREADB(card, CAS) & 1))
+ while(count-- && (inb(card->iobase + CAS) & 1))
udelay(1);
outw(data, card->ac97base + reg_set);
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
}
return -ENODEV;
static inline int i810_ac97_exists(struct i810_card *card, int ac97_number)
{
- u32 reg = I810_IOREADL(card, GLOB_STA);
+ u32 reg = inl(card->iobase + GLOB_STA);
switch (ac97_number) {
case 0:
return reg & (1<<8);
static int i810_ac97_power_up_bus(struct i810_card *card)
{
- u32 reg = I810_IOREADL(card, GLOB_CNT);
+ u32 reg = inl(card->iobase + GLOB_CNT);
int i;
int primary_codec_id = 0;
reg&=~8; /* ACLink on */
/* At this point we deassert AC_RESET # */
- I810_IOWRITEL(reg , card, GLOB_CNT);
+ outl(reg , card->iobase + GLOB_CNT);
/* We must now allow time for the Codec initialisation.
600mS is the specified time */
for(i=0;i<10;i++)
{
- if((I810_IOREADL(card, GLOB_CNT)&4)==0)
+ if((inl(card->iobase+GLOB_CNT)&4)==0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
* See if the primary codec comes ready. This must happen
* before we start doing DMA stuff
*/
- /* see i810_ac97_init for the next 10 lines (jsaw) */
- if (card->use_mmio)
- readw(card->ac97base_mmio);
- else
- inw(card->ac97base);
+ /* see i810_ac97_init for the next 7 lines (jsaw) */
+ inw(card->ac97base);
if (ich_use_mmio(card)) {
primary_codec_id = (int) readl(card->iobase_mmio + SDM) & 0x3;
printk(KERN_INFO "i810_audio: Primary codec has ID %d\n",
else
printk("no response.\n");
}
- if (card->use_mmio)
- readw(card->ac97base_mmio);
- else
- inw(card->ac97base);
+ inw(card->ac97base);
return 1;
}
/* to check.... */
card->channels = 2;
- reg = I810_IOREADL(card, GLOB_STA);
+ reg = inl(card->iobase + GLOB_STA);
if ( reg & 0x0200000 )
card->channels = 6;
else if ( reg & 0x0100000 )
card->channels = 4;
printk(KERN_INFO "i810_audio: Audio Controller supports %d channels.\n", card->channels);
printk(KERN_INFO "i810_audio: Defaulting to base 2 channel mode.\n");
- reg = I810_IOREADL(card, GLOB_CNT);
- I810_IOWRITEL(reg & 0xffcfffff, card, GLOB_CNT);
+ reg = inl(card->iobase + GLOB_CNT);
+ outl(reg & 0xffcfffff, card->iobase + GLOB_CNT);
for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++)
card->ac97_codec[num_ac97] = NULL;
for (num_ac97 = 0; num_ac97 < nr_ac97_max; num_ac97++) {
/* codec reset */
printk(KERN_INFO "i810_audio: Resetting connection %d\n", num_ac97);
- if (card->use_mmio)
- readw(card->ac97base_mmio + 0x80*num_ac97);
- else
- inw(card->ac97base + 0x80*num_ac97);
+ if (card->use_mmio) readw(card->ac97base_mmio + 0x80*num_ac97);
+ else inw(card->ac97base + 0x80*num_ac97);
/* If we have the SDATA_IN Map Register, as on ICH4, we
do not loop thru all possible codec IDs but thru all
goto config_out;
}
dmabuf->count = dmabuf->dmasize;
- CIV_TO_LVI(card, dmabuf->write_channel->port, -1);
+ CIV_TO_LVI(card->iobase+dmabuf->write_channel->port, -1);
local_irq_save(flags);
start_dac(state);
offset = i810_get_dma_addr(state, 0);
return -ENODEV;
}
+ if( pci_resource_start(pci_dev, 1) == 0)
+ {
+ /* MMIO only ICH5 .. here be dragons .. */
+ printk(KERN_ERR "i810_audio: Pure MMIO interfaces not yet supported.\n");
+ return -ENODEV;
+ }
+
if ((card = kmalloc(sizeof(struct i810_card), GFP_KERNEL)) == NULL) {
printk(KERN_ERR "i810_audio: out of memory\n");
return -ENOMEM;
card->ac97base = pci_resource_start (pci_dev, 0);
card->iobase = pci_resource_start (pci_dev, 1);
- if (!(card->ac97base) || !(card->iobase)) {
- card->ac97base = 0;
- card->iobase = 0;
- }
-
/* if chipset could have mmio capability, check it */
if (card_cap[pci_id->driver_data].flags & CAP_MMIO) {
card->ac97base_mmio_phys = pci_resource_start (pci_dev, 2);
}
}
- if (!(card->use_mmio) && (!(card->iobase) || !(card->ac97base))) {
- printk(KERN_ERR "i810_audio: No I/O resources available.\n");
- goto out_mem;
- }
-
card->irq = pci_dev->irq;
card->next = devs;
card->magic = I810_CARD_MAGIC;
break;
}
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int it8172_release_mixdev(struct inode *inode, struct file *file)
unsigned long flags;
int cnt, remainder, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
unsigned long flags;
int cnt, remainder, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int it8172_release(struct inode *inode, struct file *file)
if (!card)
return -ENODEV;
file->private_data = card;
- return nonseekable_open(inode, file);
+ return 0;
}
static int ess_release_mixdev(struct inode *inode, struct file *file)
unsigned char *combbuf = NULL;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
up(&s->open_sem);
spin_unlock_irqrestore(&c->lock, flags);
- return nonseekable_open(inode, file);
+ return 0;
}
static int m3_release(struct inode *inode, struct file *file)
file->private_data = card->ac97;
- return nonseekable_open(inode, file);
+ return 0;
}
static int m3_release_mixdev(struct inode *inode, struct file *file)
char *pinfiji = "Pinnacle/Fiji";
#endif
- if (!request_region(dev.io, dev.numio, "probing")) {
+ if (check_region(dev.io, dev.numio)) {
printk(KERN_ERR LOGNAME ": I/O port conflict\n");
return -ENODEV;
}
+ request_region(dev.io, dev.numio, "probing");
if (reset_dsp() < 0) {
release_region(dev.io, dev.numio);
/* Joystick */
pinnacle_devs[3].io0 = joystick_io;
- if (!request_region(cfg, 2, "Pinnacle/Fiji Config")) {
+ if (check_region(cfg, 2)) {
printk(KERN_ERR LOGNAME ": Config port 0x%x conflict\n", cfg);
return -EIO;
}
+ request_region(cfg, 2, "Pinnacle/Fiji Config");
if (msnd_pinnacle_cfg_devices(cfg, reset, pinnacle_devs)) {
printk(KERN_ERR LOGNAME ": Device configuration error\n");
release_region(cfg, 2);
break;
}
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int vrc5477_ac97_release_mixdev(struct inode *inode, struct file *file)
int copyCount;
size_t avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
unsigned long flags;
int copyCount, avail;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
struct list_head *list;
struct vrc5477_ac97_state *s;
int ret=0;
-
- nonseekable_open(inode, file);
+
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
DBG(printk("device num %d open\n",devnum));
- nonseekable_open(in, f);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
if(dma == NULL || (dma->s) == NULL)
return -ENXIO;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (dma->mmapped || !dma->opened)
return -ENXIO;
if(dma == NULL || (dma->s) == NULL)
return -ENXIO;
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
if (dma->mmapped || !dma->opened)
return -ENXIO;
COMM ("mixer open");
- nonseekable_open(inode, file);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
if (last_devc == NULL)
return 0;
- last_devc = NULL;
+ last_devc = 0;
if (hw_config->io_base <= 0)
{
}
VALIDATE_STATE(s);
file->private_data = s;
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
set_fmt(s, fmtm, fmts);
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_midi_release(struct inode *inode, struct file *file)
outb(1, s->iosynth+3); /* enable OPL3 */
s->open_mode |= FMODE_DMFM;
up(&s->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
}
static int sv_dmfm_release(struct inode *inode, struct file *file)
}
+// ---------------------------------------------------------------------
+
+static loff_t cs4297a_llseek(struct file *file, loff_t offset, int origin)
+{
+ return -ESPIPE;
+}
+
+
// ---------------------------------------------------------------------
static int cs4297a_open_mixdev(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
// ******************************************************************************************
static /*const */ struct file_operations cs4297a_mixer_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
+ .llseek = cs4297a_llseek,
.ioctl = cs4297a_ioctl_mixdev,
.open = cs4297a_open_mixdev,
.release = cs4297a_release_mixdev,
printk(KERN_INFO "cs4297a: cs4297a_read()+ %d \n", count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
count));
VALIDATE_STATE(s);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
}
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
printk(KERN_INFO "cs4297a: cs4297a_open()- 0\n"));
- return nonseekable_open(inode, file);
+ return 0;
}
// ******************************************************************************************
static /*const */ struct file_operations cs4297a_audio_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
+ .llseek = cs4297a_llseek,
.read = cs4297a_read,
.write = cs4297a_write,
.poll = cs4297a_poll,
pr_debug("trident: trident_read called, count = %d\n", count);
VALIDATE_STATE(state);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
pr_debug("trident: trident_write called, count = %d\n", count);
VALIDATE_STATE(state);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
/*
* Guard against an mmap or ioctl while writing
pr_debug("trident: open virtual channel %d, hard channel %d\n",
state->virt, dmabuf->channel->num);
- return nonseekable_open(inode, file);
+ return 0;
}
static int
match:
file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
static int
* NO WARRANTY
*
* For a list of known bugs (errata) and documentation,
- * see via-audio.pdf in Documentation/DocBook.
+ * see via-audio.pdf in linux/Documentation/DocBook.
* If this documentation does not exist, run "make pdfdocs".
*/
file->private_data = card->ac97;
DPRINTK ("EXIT, returning 0\n");
- return nonseekable_open(inode, file);
+ return 0;
}
static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
card = file->private_data;
assert (card != NULL);
+ if (ppos != &file->f_pos) {
+ DPRINTK ("EXIT, returning -ESPIPE\n");
+ return -ESPIPE;
+ }
+
rc = via_syscall_down (card, nonblock);
if (rc) goto out;
card = file->private_data;
assert (card != NULL);
+ if (ppos != &file->f_pos) {
+ DPRINTK ("EXIT, returning -ESPIPE\n");
+ return -ESPIPE;
+ }
+
rc = via_syscall_down (card, nonblock);
if (rc) goto out;
}
DPRINTK ("EXIT, returning 0\n");
- return nonseekable_open(inode, file);
+ return 0;
}
/*
* Sound driver for Silicon Graphics 320 and 540 Visual Workstations'
- * onboard audio. See notes in Documentation/sound/oss/vwsnd .
+ * onboard audio. See notes in ../../Documentation/sound/oss/vwsnd .
*
* Copyright 1999 Silicon Graphics, Inc. All rights reserved.
*
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
- { 0 }
+ { 0x0, 0x0 }
};
#define NEEDS_ACK 1
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned int) rbuf;
- rbuf = NULL;
+ rbuf = 0;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
+ if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, 0, wbuf)) == 0) {
dev.sample_status[sample_num] = WF_ST_EMPTY;
}
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
- if (wavefront_cmd (WFC_DOWNLOAD_PATCH, NULL, buf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_PATCH, 0, buf)) {
printk (KERN_ERR LOGNAME "download patch failed\n");
return -(EIO);
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
- if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, 0, buf)) {
printk (KERN_WARNING LOGNAME "download patch failed\n");
return -(EIO);
}
{
char rbuf[8];
- if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
+ if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, 0)) {
printk (KERN_WARNING LOGNAME "can't get memory stats.\n");
return -1;
} else {
UINT16 sample_short;
UINT32 length;
- UINT16 __user *data_end = NULL;
+ UINT16 __user *data_end = 0;
unsigned int i;
const int max_blksize = 4096/2;
unsigned int written;
if (wavefront_cmd (header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
- NULL, sample_hdr)) {
+ 0, sample_hdr)) {
printk (KERN_WARNING LOGNAME "sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
blocksize = ((length-written+7)&~0x7);
}
- if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, 0, 0)) {
printk (KERN_WARNING LOGNAME "download block "
"request refused.\n");
return -(EIO);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
- if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, 0, alias_hdr)) {
printk (KERN_ERR LOGNAME "download alias failed.\n");
return -(EIO);
}
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
- if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, 0, drumbuf)) {
printk (KERN_ERR LOGNAME "download drum failed.\n");
return -(EIO);
}
voices[0] = 32;
- if (wavefront_cmd (WFC_SET_NVOICES, NULL, voices)) {
+ if (wavefront_cmd (WFC_SET_NVOICES, 0, voices)) {
printk (KERN_WARNING LOGNAME
"cannot set number of voices to 32.\n");
goto gone_bad;
unsigned int swptr;
int cnt; /* This many to go in this revolution */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
YMFDBGW("ymf_write: count %d\n", count);
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
#endif
up(&unit->open_sem);
- return nonseekable_open(inode, file);
+ return 0;
out_nodma:
/*
match:
file->private_data = unit->ac97_codec[i];
- return nonseekable_open(inode, file);
+ return 0;
}
static int ymf_ioctl_mixdev(struct inode *inode, struct file *file,
codec->dma_area_ba = pba;
codec->dma_area_size = size + 0xff;
- off = (unsigned long)ptr & 0xff;
- if (off) {
+ if ((off = ((uint) ptr) & 0xff) != 0) {
ptr += 0x100 - off;
pba += 0x100 - off;
}
#endif
/* Global resources */
- s8 mixcapt[2];
- s8 mixplayb[4];
+ char mixcapt[2];
+ char mixplayb[4];
#ifndef CHIP_AU8820
- s8 mixspdif[2];
- s8 mixa3d[2]; /* mixers which collect all a3d streams. */
- s8 mixxtlk[2]; /* crosstalk canceler mixer inputs. */
+ char mixspdif[2];
+ char mixa3d[2]; /* mixers which collect all a3d streams. */
+ char mixxtlk[2]; /* crosstalk canceler mixer inputs. */
#endif
u32 fixed_res[5];
static int
snd_vortex_a3d_get(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- //a3dsrc_t *a = kcontrol->private_data;
+ //a3dsrc_t *a = (a3dsrc_t*)(kcontrol->private_value);
/* No read yet. Would this be really useable/needed ? */
return 0;
snd_vortex_a3d_hrtf_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int changed = 1, i;
int coord[6];
for (i = 0; i < 6; i++)
snd_vortex_a3d_itd_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int coord[6];
int i, changed = 1;
for (i = 0; i < 6; i++)
snd_vortex_a3d_ild_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int changed = 1;
int l, r;
/* There may be some scale tranlation needed here. */
snd_vortex_a3d_filter_put(snd_kcontrol_t
* kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = kcontrol->private_data;
+ a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
int i, changed = 1;
int params[6];
for (i = 0; i < 6; i++)
}
static snd_kcontrol_new_t vortex_a3d_kcontrol __devinitdata = {
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "Playback PCM advanced processing",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_vortex_a3d_hrtf_info,
- .get = snd_vortex_a3d_get,
- .put = snd_vortex_a3d_hrtf_put,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,.name =
+ "Playback PCM advanced processing",.index =
+ 0,.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,.private_value =
+ 0,.info = snd_vortex_a3d_hrtf_info,.get =
+ snd_vortex_a3d_get,.put = snd_vortex_a3d_hrtf_put
};
/* Control (un)registration. */
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_HRTF;
kcontrol->info = snd_vortex_a3d_hrtf_info;
kcontrol->put = snd_vortex_a3d_hrtf_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_ITD;
kcontrol->info = snd_vortex_a3d_itd_info;
kcontrol->put = snd_vortex_a3d_itd_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_GAINS;
kcontrol->info = snd_vortex_a3d_ild_info;
kcontrol->put = snd_vortex_a3d_ild_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_data = &vortex->a3d[i];
+ kcontrol->private_value = (int)&(vortex->a3d[i]);
kcontrol->id.numid = CTRLID_FILTER;
kcontrol->info = snd_vortex_a3d_filter_info;
kcontrol->put = snd_vortex_a3d_filter_put;
snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 0);
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef WIN9X
+#if WIN9X
/* FIXME: enable playback/recording??? */
status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 1);
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef WIN9X
+#if WIN9X
/* FIXME: enable playback/recording??? */
status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef QUERY_HARDWARE
+#if QUERY_HARDWARE
bufptr = inl(chip->codec_port+IDX_IO_PLAY_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
-#ifdef QUERY_HARDWARE
+#if QUERY_HARDWARE
bufptr = inl(chip->codec_port+IDX_IO_REC_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
}
static long snd_cs4281_BA0_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
cs4281_t *chip = snd_magic_cast(cs4281_t, entry->private_data, return -ENXIO);
size = count;
- if (pos + size > CS4281_BA0_SIZE)
- size = (long)CS4281_BA0_SIZE - pos;
+ if (file->f_pos + size > CS4281_BA0_SIZE)
+ size = (long)CS4281_BA0_SIZE - file->f_pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba0 + pos, size))
+ if (copy_to_user_fromio(buf, chip->ba0 + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
}
return size;
}
static long snd_cs4281_BA1_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
cs4281_t *chip = snd_magic_cast(cs4281_t, entry->private_data, return -ENXIO);
size = count;
- if (pos + size > CS4281_BA1_SIZE)
- size = (long)CS4281_BA1_SIZE - pos;
+ if (file->f_pos + size > CS4281_BA1_SIZE)
+ size = (long)CS4281_BA1_SIZE - file->f_pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba1 + pos, size))
+ if (copy_to_user_fromio(buf, chip->ba1 + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
}
return size;
}
*/
static long snd_cs46xx_io_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
snd_cs46xx_region_t *region = (snd_cs46xx_region_t *)entry->private_data;
size = count;
- if (pos + (size_t)size > region->size)
- size = region->size - pos;
+ if (file->f_pos + (size_t)size > region->size)
+ size = region->size - file->f_pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, region->remap_addr + pos, size))
+ if (copy_to_user_fromio(buf, region->remap_addr + file->f_pos, size))
return -EFAULT;
+ file->f_pos += size;
}
return size;
}
#define TOTAL_SIZE_CODE (0x200*8)
static long snd_emu10k1_fx8010_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
long size;
emu10k1_t *emu = snd_magic_cast(emu10k1_t, entry->private_data, return -ENXIO);
offset = emu->audigy ? A_FXGPREGBASE : FXGPREGBASE;
}
size = count;
- if (pos + size > entry->size)
- size = (long)entry->size - pos;
+ if (file->f_pos + size > entry->size)
+ size = (long)entry->size - file->f_pos;
if (size > 0) {
unsigned int *tmp;
long res;
unsigned int idx;
if ((tmp = kmalloc(size + 8, GFP_KERNEL)) == NULL)
return -ENOMEM;
- for (idx = 0; idx < ((pos & 3) + size + 3) >> 2; idx++)
- tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0);
- if (copy_to_user(buf, ((char *)tmp) + (pos & 3), size))
+ for (idx = 0; idx < ((file->f_pos & 3) + size + 3) >> 2; idx++)
+ tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (file->f_pos >> 2), 0);
+ if (copy_to_user(buf, ((char *)tmp) + (file->f_pos & 3), size))
res = -EFAULT;
else {
res = size;
+ file->f_pos += size;
}
kfree(tmp);
return res;
if ((val & 0xff00) < 0x1f00)
val += 0x0100;
}
- if (val == 0x1f1f)
- val |= 0x8000;
snd_ac97_write_cache(chip->ac97, AC97_MASTER, val);
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_volume->id);
snd_ice1712_hoontech_cards,
snd_ice1712_delta_cards,
snd_ice1712_ews_cards,
- NULL,
+ 0,
};
static unsigned char __devinit snd_ice1712_read_i2c(ice1712_t *ice,
return -EBUSY; /* FIXME: should handle blocking mode properly */
}
up(&ice->open_mutex);
- runtime->private_data = (void*)(1UL << (substream->number + 4));
+ runtime->private_data = (void*)(1 << (substream->number + 4));
ice->playback_con_substream_ds[substream->number] = substream;
runtime->hw = snd_vt1724_2ch_stereo;
snd_pcm_set_sync(substream);
snd_vt1724_revo_cards,
snd_vt1724_amp_cards,
snd_vt1724_aureon_cards,
- NULL,
+ 0,
};
{ 0x5455, "ALi M5455" },
{ 0x746d, "AMD AMD8111" },
#endif
- { 0 },
+ { 0, 0 },
};
static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
.amp_gpio = 0x03,
},
/* END */
- { NULL }
+ { 0 }
};
mixart_BA0 proc interface for BAR 0 - read callback
*/
static long snd_mixart_BA0_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
mixart_mgr_t *mgr = snd_magic_cast(mixart_mgr_t, entry->private_data, return -ENXIO);
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if(count <= 0)
return 0;
- if(pos + count > MIXART_BA0_SIZE)
- count = (long)(MIXART_BA0_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
+ if(file->f_pos + count > MIXART_BA0_SIZE)
+ count = (long)(MIXART_BA0_SIZE - file->f_pos);
+ if(copy_to_user_fromio(buf, MIXART_MEM( mgr, file->f_pos ), count))
return -EFAULT;
+ file->f_pos += count;
return count;
}
mixart_BA1 proc interface for BAR 1 - read callback
*/
static long snd_mixart_BA1_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf,
- unsigned long count, unsigned long pos)
+ struct file *file, char __user *buf, long count)
{
mixart_mgr_t *mgr = snd_magic_cast(mixart_mgr_t, entry->private_data, return -ENXIO);
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if(count <= 0)
return 0;
- if(pos + count > MIXART_BA1_SIZE)
- count = (long)(MIXART_BA1_SIZE - pos);
- if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
+ if(file->f_pos + count > MIXART_BA1_SIZE)
+ count = (long)(MIXART_BA1_SIZE - file->f_pos);
+ if(copy_to_user_fromio(buf, MIXART_REG( mgr, file->f_pos ), count))
return -EFAULT;
+ file->f_pos += count;
return count;
}
snd_nm256_capture_copy(snd_pcm_substream_t *substream,
int channel, /* not used (interleaved data) */
snd_pcm_uframes_t pos,
- void __user *dst,
+ void *dst,
snd_pcm_uframes_t count)
{
snd_pcm_runtime_t *runtime = substream->runtime;
hdsp->irq = -1;
hdsp->state = 0;
- hdsp->midi[0].rmidi = NULL;
- hdsp->midi[1].rmidi = NULL;
- hdsp->midi[0].input = NULL;
- hdsp->midi[1].input = NULL;
- hdsp->midi[0].output = NULL;
- hdsp->midi[1].output = NULL;
+ hdsp->midi[0].rmidi = 0;
+ hdsp->midi[1].rmidi = 0;
+ hdsp->midi[0].input = 0;
+ hdsp->midi[1].input = 0;
+ hdsp->midi[0].output = 0;
+ hdsp->midi[1].output = 0;
spin_lock_init(&hdsp->midi[0].lock);
spin_lock_init(&hdsp->midi[1].lock);
hdsp->iobase = 0;
- hdsp->res_port = NULL;
+ hdsp->res_port = 0;
hdsp->control_register = 0;
hdsp->control2_register = 0;
hdsp->io_type = Undefined;
sonic->mode |= SV_MODE_PLAY;
sonic->playback_substream = substream;
runtime->hw = snd_sonicvibes_playback;
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, NULL, SNDRV_PCM_HW_PARAM_RATE, -1);
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, 0, SNDRV_PCM_HW_PARAM_RATE, -1);
return 0;
}
menu "ALSA PowerMac devices"
depends on SND!=n && PPC
-comment "ALSA PowerMac requires I2C"
- depends on SND && I2C=n
-
config SND_POWERMAC
tristate "PowerMac (AWACS, DACA, Burgundy, Tumbler, Keywest)"
- depends on SND && I2C
+ depends on SND
select SND_PCM
endmenu
sound = sound->next;
if (! sound)
return -ENODEV;
- prop = (unsigned int *) get_property(sound, "sub-frame", NULL);
+ prop = (unsigned int *) get_property(sound, "sub-frame", 0);
if (prop && *prop < 16)
chip->subframe = *prop;
/* This should be verified on older screamers */
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
- prop = (unsigned int *)get_property(sound, "device-id", NULL);
+ prop = (unsigned int *)get_property(sound, "device-id", 0);
if (prop)
chip->device_id = *prop;
chip->has_iic = (find_devices("perch") != NULL);
{
if (u->urb) {
usb_free_urb(u->urb);
- u->urb = NULL;
+ u->urb = 0;
}
if (u->buf) {
kfree(u->buf);
- u->buf = NULL;
+ u->buf = 0;
}
}
release_urb_ctx(&subs->syncurb[i]);
if (subs->tmpbuf) {
kfree(subs->tmpbuf);
- subs->tmpbuf = NULL;
+ subs->tmpbuf = 0;
}
subs->nurbs = 0;
}
{
if (kctl->private_data) {
snd_magic_kfree((void *)kctl->private_data);
- kctl->private_data = NULL;
+ kctl->private_data = 0;
}
}
usb_mixer_elem_info_t *cval = snd_magic_cast(usb_mixer_elem_info_t, kctl->private_data,);
num_ins = cval->max;
snd_magic_kfree(cval);
- kctl->private_data = NULL;
+ kctl->private_data = 0;
}
if (kctl->private_value) {
char **itemlist = (char **)kctl->private_value;