which included commits to RCS files with non-trunk default branches.
Linux 2.6.x on MPC52xx family
-----------------------------
-For the latest info, go to http://www.246tNt.com/mpc52xx/state.txt
+For the latest info, go to http://www.246tNt.com/mpc52xx/
To compile/use :
- Of course, I inspired myself from the 2.4 port. If you think I forgot to
mention you/your company in the copyright of some code, I'll correct it
ASAP.
- - The codes wants the MBAR to be set at 0xf0000000 by the bootloader. It's
- mapped 1:1 with the MMU. If for whatever reason, you want to change this,
- beware that some code depends on the 0xf0000000 address and other depends
- on the 1:1 mapping.
- - Most of the code assumes that port multiplexing, frequency selection, ...
- has already been done. IMHO this should be done as early as possible, in
- the bootloader. If for whatever reason you can't do it there, do it in the
- platform setup code (if U-Boot) or in the arch/ppc/boot/simple/... (if
- DBug)
are locklessly updated.
Each scheduling domain spans a number of CPUs (stored in the ->span field).
-A domain's span MUST be a superset of it child's span, and a base domain
-for CPU i MUST span at least i. The top domain for each CPU will generally
-span all CPUs in the system although strictly it doesn't have to, but this
-could lead to a case where some CPUs will never be given tasks to run unless
-the CPUs allowed mask is explicitly set. A sched domain's span means "balance
-process load among these CPUs".
+A domain's span MUST be a superset of it child's span (this restriction could
+be relaxed if the need arises), and a base domain for CPU i MUST span at least
+i. The top domain for each CPU will generally span all CPUs in the system
+although strictly it doesn't have to, but this could lead to a case where some
+CPUs will never be given tasks to run unless the CPUs allowed mask is
+explicitly set. A sched domain's span means "balance process load among these
+CPUs".
Each scheduling domain must have one or more CPU groups (struct sched_group)
which are organised as a circular one way linked list from the ->groups
struct sched_domain fields, SD_FLAG_*, SD_*_INIT to get an idea of
the specifics and what to tune.
+For SMT, the architecture must define CONFIG_SCHED_SMT and provide a
+cpumask_t cpu_sibling_map[NR_CPUS], where cpu_sibling_map[i] is the mask of
+all "i"'s siblings as well as "i" itself.
+
+Architectures may retain the regular override the default SD_*_INIT flags
+while using the generic domain builder in kernel/sched.c if they wish to
+retain the traditional SMT->SMP->NUMA topology (or some subset of that). This
+can be done by #define'ing ARCH_HASH_SCHED_TUNE.
+
+Alternatively, the architecture may completely override the generic domain
+builder by #define'ing ARCH_HASH_SCHED_DOMAIN, and exporting your
+arch_init_sched_domains function. This function will attach domains to all
+CPUs using cpu_attach_domain.
+
Implementors should change the line
#undef SCHED_DOMAIN_DEBUG
to
long clk_round_rate(struct clk *clk, unsigned long rate)
{
- return rate;
+ struct icst525_vco vco;
+
+ vco = icst525_khz_to_vco(clk->params, rate / 1000);
+ return icst525_khz(clk->params, vco) * 1000;
}
EXPORT_SYMBOL(clk_round_rate);
if (clk->setvco) {
struct icst525_vco vco;
- vco = icst525_khz_to_vco(clk->params, rate);
- clk->rate = icst525_khz(clk->params, vco);
+ vco = icst525_khz_to_vco(clk->params, rate / 1000);
+ clk->rate = icst525_khz(clk->params, vco) * 1000;
printk("Clock %s: setting VCO reg params: S=%d R=%d V=%d\n",
clk->name, vco.s, vco.r, vco.v);
if (clk->setvco) {
struct icst525_vco vco;
- vco = icst525_khz_to_vco(clk->params, rate);
- clk->rate = icst525_khz(clk->params, vco);
+ vco = icst525_khz_to_vco(clk->params, rate / 1000);
+ clk->rate = icst525_khz(clk->params, vco) * 1000;
printk("Clock %s: setting VCO reg params: S=%d R=%d V=%d\n",
clk->name, vco.s, vco.r, vco.v);
return 0;
panic("Whooops in pcibios_map_irq");
}
-
-struct pci_fixup pcibios_fixups[] = {
- {0}
-};
return 0;
panic("Whooops in pcibios_map_irq");
}
-
-struct pci_fixup pcibios_fixups[] = {
- {0}
-};
return -1;
}
-
-struct pci_fixup pcibios_fixups[] = {
- {0}
-};
return irq;
}
-
-struct pci_fixup pcibios_fixups[] __initdata = {
- { .pass = 0, },
-};
#define PHY_INTERRUPT SIU_INT_IRQ7
+/* For our show_cpuinfo hooks. */
+#define CPUINFO_VENDOR "Embedded Planet"
+#define CPUINFO_MACHINE "EP8260 PowerPC"
+
+/* Warm reset vector. */
+#define BOOTROM_RESTART_ADDR ((uint)0xfff00104)
+
#endif /* __ASM_PLATFORMS_RPX8260_H__ */
#endif /* __KERNEL__ */
#
targets := vmlinux vmlinux.bin vmlinux.bin.gz \
- head.o misc.o cache.o piggy.o vmlinux.lds.o
+ head.o misc.o cache.o piggy.o vmlinux.lds
EXTRA_AFLAGS := -traditional
ZIMAGE_OFFSET = $(shell printf "0x%8x" $$[$(CONFIG_MEMORY_START)+0x400000+0x10000])
LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \
- -T $(obj)/../../kernel/vmlinux.lds.s \
+ -T $(obj)/../../kernel/vmlinux.lds \
--no-warn-mismatch
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T
OBJCOPYFLAGS += -R .empty_zero_page
-$(obj)/piggy.o: $(obj)/vmlinux.lds.s $(obj)/vmlinux.bin.gz FORCE
+$(obj)/piggy.o: $(obj)/vmlinux.lds $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,ld)
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
-#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/registers.h> /* required by inline asm statements */
#include <asm/hardware.h>
#include <asm/irq.h>
+#include <asm/arch/pxa-regs.h>
#include <asm/arch/mainstone.h>
#include "soc_common.h"
static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
+ /*
+ * Setup default state of GPIO outputs
+ * before we enable them as outputs.
+ */
+ GPSR(GPIO48_nPOE) =
+ GPIO_bit(GPIO48_nPOE) |
+ GPIO_bit(GPIO49_nPWE) |
+ GPIO_bit(GPIO50_nPIOR) |
+ GPIO_bit(GPIO51_nPIOW) |
+ GPIO_bit(GPIO85_nPCE_1) |
+ GPIO_bit(GPIO54_nPCE_2);
+
+ pxa_gpio_mode(GPIO48_nPOE_MD);
+ pxa_gpio_mode(GPIO49_nPWE_MD);
+ pxa_gpio_mode(GPIO50_nPIOR_MD);
+ pxa_gpio_mode(GPIO51_nPIOW_MD);
+ pxa_gpio_mode(GPIO85_nPCE_1_MD);
+ pxa_gpio_mode(GPIO54_nPCE_2_MD);
+ pxa_gpio_mode(GPIO79_pSKTSEL_MD);
+ pxa_gpio_mode(GPIO55_nPREG_MD);
+ pxa_gpio_mode(GPIO56_nPWAIT_MD);
+ pxa_gpio_mode(GPIO57_nIOIS16_MD);
+
skt->irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ;
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
}
* For speed we use the collation rule @cr as an index into two tables of
* function pointers to call the appropriate collation function.
*/
-int ntfs_collate(ntfs_volume *vol, COLLATION_RULES cr,
+int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
const void *data1, const int data1_len,
const void *data2, const int data2_len) {
+ int i;
+
ntfs_debug("Entering.");
/*
* FIXME: At the moment we only support COLLATION_BINARY and
* COLLATION_NTOFS_ULONG, so we BUG() for everything else for now.
*/
BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG);
- cr = le32_to_cpu(cr);
- BUG_ON(cr < 0);
- if (cr <= 0x02)
- return ntfs_do_collate0x0[cr](vol, data1, data1_len,
+ i = le32_to_cpu(cr);
+ BUG_ON(i < 0);
+ if (i <= 0x02)
+ return ntfs_do_collate0x0[i](vol, data1, data1_len,
data2, data2_len);
- BUG_ON(cr < 0x10);
- cr -= 0x10;
- if (likely(cr <= 3))
- return ntfs_do_collate0x1[cr](vol, data1, data1_len,
+ BUG_ON(i < 0x10);
+ i -= 0x10;
+ if (likely(i <= 3))
+ return ntfs_do_collate0x1[i](vol, data1, data1_len,
data2, data2_len);
BUG();
return 0;
struct reiserfs_xattr_handler security_handler = {
- prefix: XATTR_SECURITY_PREFIX,
- get: security_get,
- set: security_set,
- del: security_del,
- list: security_list,
+ .prefix = XATTR_SECURITY_PREFIX,
+ .get = security_get,
+ .set = security_set,
+ .del = security_del,
+ .list = security_list,
};
struct reiserfs_xattr_handler trusted_handler = {
- prefix: XATTR_TRUSTED_PREFIX,
- get: trusted_get,
- set: trusted_set,
- del: trusted_del,
- list: trusted_list,
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .get = trusted_get,
+ .set = trusted_set,
+ .del = trusted_del,
+ .list = trusted_list,
};
}
struct reiserfs_xattr_handler user_handler = {
- prefix: XATTR_USER_PREFIX,
- get: user_get,
- set: user_set,
- del: user_del,
- list: user_list,
+ .prefix = XATTR_USER_PREFIX,
+ .get = user_get,
+ .set = user_set,
+ .del = user_del,
+ .list = user_list,
};
extern int fs_noerr(void);
extern int fs_nosys(void);
-extern int fs_nodev(void);
extern void fs_noval(void);
extern void fs_tosspages(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
extern void fs_flushinval_pages(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
* kind, whether express or implied.
*/
-#ifndef __MPC52xx_PSC_H__
-#define __MPC52xx_PSC_H__
+#ifndef __ASM_MPC52xx_PSC_H__
+#define __ASM_MPC52xx_PSC_H__
#include <asm/types.h>
/* Structure of the hardware registers */
struct mpc52xx_psc {
- volatile u8 mode; /* PSC + 0x00 */
- volatile u8 reserved0[3];
- union { /* PSC + 0x04 */
- volatile u16 status;
- volatile u16 clock_select;
+ u8 mode; /* PSC + 0x00 */
+ u8 reserved0[3];
+ union { /* PSC + 0x04 */
+ u16 status;
+ u16 clock_select;
} sr_csr;
#define mpc52xx_psc_status sr_csr.status
-#define mpc52xx_psc_clock_select sr_csr.clock_select
- volatile u16 reserved1;
- volatile u8 command; /* PSC + 0x08 */
-volatile u8 reserved2[3];
- union { /* PSC + 0x0c */
- volatile u8 buffer_8;
- volatile u16 buffer_16;
- volatile u32 buffer_32;
+#define mpc52xx_psc_clock_select sr_csr.clock_select
+ u16 reserved1;
+ u8 command; /* PSC + 0x08 */
+ u8 reserved2[3];
+ union { /* PSC + 0x0c */
+ u8 buffer_8;
+ u16 buffer_16;
+ u32 buffer_32;
} buffer;
#define mpc52xx_psc_buffer_8 buffer.buffer_8
#define mpc52xx_psc_buffer_16 buffer.buffer_16
#define mpc52xx_psc_buffer_32 buffer.buffer_32
- union { /* PSC + 0x10 */
- volatile u8 ipcr;
- volatile u8 acr;
+ union { /* PSC + 0x10 */
+ u8 ipcr;
+ u8 acr;
} ipcr_acr;
#define mpc52xx_psc_ipcr ipcr_acr.ipcr
#define mpc52xx_psc_acr ipcr_acr.acr
- volatile u8 reserved3[3];
- union { /* PSC + 0x14 */
- volatile u16 isr;
- volatile u16 imr;
+ u8 reserved3[3];
+ union { /* PSC + 0x14 */
+ u16 isr;
+ u16 imr;
} isr_imr;
#define mpc52xx_psc_isr isr_imr.isr
#define mpc52xx_psc_imr isr_imr.imr
- volatile u16 reserved4;
- volatile u8 ctur; /* PSC + 0x18 */
- volatile u8 reserved5[3];
- volatile u8 ctlr; /* PSC + 0x1c */
- volatile u8 reserved6[3];
- volatile u16 ccr; /* PSC + 0x20 */
- volatile u8 reserved7[14];
- volatile u8 ivr; /* PSC + 0x30 */
- volatile u8 reserved8[3];
- volatile u8 ip; /* PSC + 0x34 */
- volatile u8 reserved9[3];
- volatile u8 op1; /* PSC + 0x38 */
- volatile u8 reserved10[3];
- volatile u8 op0; /* PSC + 0x3c */
- volatile u8 reserved11[3];
- volatile u32 sicr; /* PSC + 0x40 */
- volatile u8 ircr1; /* PSC + 0x44 */
- volatile u8 reserved13[3];
- volatile u8 ircr2; /* PSC + 0x44 */
- volatile u8 reserved14[3];
- volatile u8 irsdr; /* PSC + 0x4c */
- volatile u8 reserved15[3];
- volatile u8 irmdr; /* PSC + 0x50 */
- volatile u8 reserved16[3];
- volatile u8 irfdr; /* PSC + 0x54 */
- volatile u8 reserved17[3];
- volatile u16 rfnum; /* PSC + 0x58 */
- volatile u16 reserved18;
- volatile u16 tfnum; /* PSC + 0x5c */
- volatile u16 reserved19;
- volatile u32 rfdata; /* PSC + 0x60 */
- volatile u16 rfstat; /* PSC + 0x64 */
- volatile u16 reserved20;
- volatile u8 rfcntl; /* PSC + 0x68 */
- volatile u8 reserved21[5];
- volatile u16 rfalarm; /* PSC + 0x6e */
- volatile u16 reserved22;
- volatile u16 rfrptr; /* PSC + 0x72 */
- volatile u16 reserved23;
- volatile u16 rfwptr; /* PSC + 0x76 */
- volatile u16 reserved24;
- volatile u16 rflrfptr; /* PSC + 0x7a */
- volatile u16 reserved25;
- volatile u16 rflwfptr; /* PSC + 0x7e */
- volatile u32 tfdata; /* PSC + 0x80 */
- volatile u16 tfstat; /* PSC + 0x84 */
- volatile u16 reserved26;
- volatile u8 tfcntl; /* PSC + 0x88 */
- volatile u8 reserved27[5];
- volatile u16 tfalarm; /* PSC + 0x8e */
- volatile u16 reserved28;
- volatile u16 tfrptr; /* PSC + 0x92 */
- volatile u16 reserved29;
- volatile u16 tfwptr; /* PSC + 0x96 */
- volatile u16 reserved30;
- volatile u16 tflrfptr; /* PSC + 0x9a */
- volatile u16 reserved31;
- volatile u16 tflwfptr; /* PSC + 0x9e */
+ u16 reserved4;
+ u8 ctur; /* PSC + 0x18 */
+ u8 reserved5[3];
+ u8 ctlr; /* PSC + 0x1c */
+ u8 reserved6[3];
+ u16 ccr; /* PSC + 0x20 */
+ u8 reserved7[14];
+ u8 ivr; /* PSC + 0x30 */
+ u8 reserved8[3];
+ u8 ip; /* PSC + 0x34 */
+ u8 reserved9[3];
+ u8 op1; /* PSC + 0x38 */
+ u8 reserved10[3];
+ u8 op0; /* PSC + 0x3c */
+ u8 reserved11[3];
+ u32 sicr; /* PSC + 0x40 */
+ u8 ircr1; /* PSC + 0x44 */
+ u8 reserved13[3];
+ u8 ircr2; /* PSC + 0x44 */
+ u8 reserved14[3];
+ u8 irsdr; /* PSC + 0x4c */
+ u8 reserved15[3];
+ u8 irmdr; /* PSC + 0x50 */
+ u8 reserved16[3];
+ u8 irfdr; /* PSC + 0x54 */
+ u8 reserved17[3];
+ u16 rfnum; /* PSC + 0x58 */
+ u16 reserved18;
+ u16 tfnum; /* PSC + 0x5c */
+ u16 reserved19;
+ u32 rfdata; /* PSC + 0x60 */
+ u16 rfstat; /* PSC + 0x64 */
+ u16 reserved20;
+ u8 rfcntl; /* PSC + 0x68 */
+ u8 reserved21[5];
+ u16 rfalarm; /* PSC + 0x6e */
+ u16 reserved22;
+ u16 rfrptr; /* PSC + 0x72 */
+ u16 reserved23;
+ u16 rfwptr; /* PSC + 0x76 */
+ u16 reserved24;
+ u16 rflrfptr; /* PSC + 0x7a */
+ u16 reserved25;
+ u16 rflwfptr; /* PSC + 0x7e */
+ u32 tfdata; /* PSC + 0x80 */
+ u16 tfstat; /* PSC + 0x84 */
+ u16 reserved26;
+ u8 tfcntl; /* PSC + 0x88 */
+ u8 reserved27[5];
+ u16 tfalarm; /* PSC + 0x8e */
+ u16 reserved28;
+ u16 tfrptr; /* PSC + 0x92 */
+ u16 reserved29;
+ u16 tfwptr; /* PSC + 0x96 */
+ u16 reserved30;
+ u16 tflrfptr; /* PSC + 0x9a */
+ u16 reserved31;
+ u16 tflwfptr; /* PSC + 0x9e */
};
-#endif /* __MPC52xx_PSC_H__ */
+#endif /* __ASM_MPC52xx_PSC_H__ */
atomic_t count;
int sleepers;
wait_queue_head_t wait;
-#ifdef WAITQUEUE_DEBUG
- long __magic;
-#endif
};
-#ifdef WAITQUEUE_DEBUG
-# define __SEM_DEBUG_INIT(name) \
- , (int)&(name).__magic
-#else
-# define __SEM_DEBUG_INIT(name)
-#endif
-
-#define __SEMAPHORE_INITIALIZER(name,count) \
-{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
- __SEM_DEBUG_INIT(name) }
+#define __SEMAPHORE_INITIALIZER(name, n) \
+{ \
+ .count = ATOMIC_INIT(n), \
+ .sleepers = 0, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+}
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1)
atomic_set(&sem->count, val);
sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
-#ifdef WAITQUEUE_DEBUG
- sem->__magic = (int)&sem->__magic;
-#endif
}
static inline void init_MUTEX (struct semaphore *sem)
static inline void down(struct semaphore * sem)
{
-#ifdef WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
-#ifdef WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
-#ifdef WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
*/
static inline void up(struct semaphore * sem)
{
-#ifdef WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
if (atomic_inc_return(&sem->count) <= 0)
__up(sem);
}
unsigned long mask;
unsigned long value;
int size_level;
+
+ struct prio_tree_root *root;
+ pgoff_t r_index;
+ pgoff_t h_index;
};
+static inline void prio_tree_iter_init(struct prio_tree_iter *iter,
+ struct prio_tree_root *root, pgoff_t r_index, pgoff_t h_index)
+{
+ iter->root = root;
+ iter->r_index = r_index;
+ iter->h_index = h_index;
+}
+
#define INIT_PRIO_TREE_ROOT(ptr) \
do { \
(ptr)->prio_tree_node = NULL; \
return index_bits_to_maxindex[bits - 1];
}
+static void prio_tree_remove(struct prio_tree_root *, struct prio_tree_node *);
+
/*
* Extend a priority search tree so that it can store a node with heap_index
* max_heap_index. In the worst case, this algorithm takes O((log n)^2).
static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root,
struct prio_tree_node *node, unsigned long max_heap_index)
{
- static void prio_tree_remove(struct prio_tree_root *,
- struct prio_tree_node *);
struct prio_tree_node *first = NULL, *prev, *last = NULL;
if (max_heap_index > prio_tree_maxindex(root->index_bits))
* 'm' is the number of prio_tree_nodes that overlap the interval X.
*/
-static struct prio_tree_node *prio_tree_left(
- struct prio_tree_root *root, struct prio_tree_iter *iter,
- unsigned long radix_index, unsigned long heap_index,
+static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter,
unsigned long *r_index, unsigned long *h_index)
{
if (prio_tree_left_empty(iter->cur))
GET_INDEX(iter->cur->left, *r_index, *h_index);
- if (radix_index <= *h_index) {
+ if (iter->r_index <= *h_index) {
iter->cur = iter->cur->left;
iter->mask >>= 1;
if (iter->mask) {
iter->mask = ULONG_MAX;
} else {
iter->size_level = 1;
- iter->mask = 1UL << (root->index_bits - 1);
+ iter->mask = 1UL << (iter->root->index_bits - 1);
}
}
return iter->cur;
return NULL;
}
-static struct prio_tree_node *prio_tree_right(
- struct prio_tree_root *root, struct prio_tree_iter *iter,
- unsigned long radix_index, unsigned long heap_index,
+static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter,
unsigned long *r_index, unsigned long *h_index)
{
unsigned long value;
else
value = iter->value | iter->mask;
- if (heap_index < value)
+ if (iter->h_index < value)
return NULL;
GET_INDEX(iter->cur->right, *r_index, *h_index);
- if (radix_index <= *h_index) {
+ if (iter->r_index <= *h_index) {
iter->cur = iter->cur->right;
iter->mask >>= 1;
iter->value = value;
iter->mask = ULONG_MAX;
} else {
iter->size_level = 1;
- iter->mask = 1UL << (root->index_bits - 1);
+ iter->mask = 1UL << (iter->root->index_bits - 1);
}
}
return iter->cur;
return iter->cur;
}
-static inline int overlap(unsigned long radix_index, unsigned long heap_index,
+static inline int overlap(struct prio_tree_iter *iter,
unsigned long r_index, unsigned long h_index)
{
- return heap_index >= r_index && radix_index <= h_index;
+ return iter->h_index >= r_index && iter->r_index <= h_index;
}
/*
* heap_index]. Note that always radix_index <= heap_index. We do a pre-order
* traversal of the tree.
*/
-static struct prio_tree_node *prio_tree_first(struct prio_tree_root *root,
- struct prio_tree_iter *iter, unsigned long radix_index,
- unsigned long heap_index)
+static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter)
{
+ struct prio_tree_root *root;
unsigned long r_index, h_index;
INIT_PRIO_TREE_ITER(iter);
+ root = iter->root;
if (prio_tree_empty(root))
return NULL;
GET_INDEX(root->prio_tree_node, r_index, h_index);
- if (radix_index > h_index)
+ if (iter->r_index > h_index)
return NULL;
iter->mask = 1UL << (root->index_bits - 1);
iter->cur = root->prio_tree_node;
while (1) {
- if (overlap(radix_index, heap_index, r_index, h_index))
+ if (overlap(iter, r_index, h_index))
return iter->cur;
- if (prio_tree_left(root, iter, radix_index, heap_index,
- &r_index, &h_index))
+ if (prio_tree_left(iter, &r_index, &h_index))
continue;
- if (prio_tree_right(root, iter, radix_index, heap_index,
- &r_index, &h_index))
+ if (prio_tree_right(iter, &r_index, &h_index))
continue;
break;
*
* Get the next prio_tree_node that overlaps with the input interval in iter
*/
-static struct prio_tree_node *prio_tree_next(struct prio_tree_root *root,
- struct prio_tree_iter *iter, unsigned long radix_index,
- unsigned long heap_index)
+static struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter)
{
unsigned long r_index, h_index;
repeat:
- while (prio_tree_left(root, iter, radix_index,
- heap_index, &r_index, &h_index)) {
- if (overlap(radix_index, heap_index, r_index, h_index))
+ while (prio_tree_left(iter, &r_index, &h_index))
+ if (overlap(iter, r_index, h_index))
return iter->cur;
- }
- while (!prio_tree_right(root, iter, radix_index,
- heap_index, &r_index, &h_index)) {
+ while (!prio_tree_right(iter, &r_index, &h_index)) {
while (!prio_tree_root(iter->cur) &&
iter->cur->parent->right == iter->cur)
prio_tree_parent(iter);
prio_tree_parent(iter);
}
- if (overlap(radix_index, heap_index, r_index, h_index))
+ if (overlap(iter, r_index, h_index))
return iter->cur;
goto repeat;
BUG_ON(RADIX_INDEX(vma) != RADIX_INDEX(old));
BUG_ON(HEAP_INDEX(vma) != HEAP_INDEX(old));
+ vma->shared.vm_set.head = NULL;
+ vma->shared.vm_set.parent = NULL;
+
if (!old->shared.vm_set.parent)
list_add(&vma->shared.vm_set.list,
&old->shared.vm_set.list);
struct prio_tree_node *ptr;
struct vm_area_struct *old;
+ vma->shared.vm_set.head = NULL;
+
ptr = prio_tree_insert(root, &vma->shared.prio_tree_node);
if (ptr != &vma->shared.prio_tree_node) {
old = prio_tree_entry(ptr, struct vm_area_struct,
* page in the given range of contiguous file pages.
*/
struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
- struct prio_tree_root *root, struct prio_tree_iter *iter,
- pgoff_t begin, pgoff_t end)
+ struct prio_tree_iter *iter)
{
struct prio_tree_node *ptr;
struct vm_area_struct *next;
/*
* First call is with NULL vma
*/
- ptr = prio_tree_first(root, iter, begin, end);
+ ptr = prio_tree_first(iter);
if (ptr) {
next = prio_tree_entry(ptr, struct vm_area_struct,
shared.prio_tree_node);
}
}
- ptr = prio_tree_next(root, iter, begin, end);
+ ptr = prio_tree_next(iter);
if (ptr) {
next = prio_tree_entry(ptr, struct vm_area_struct,
shared.prio_tree_node);
-host-progs := modpost mk_elfconfig
-always := $(host-progs) empty.o
+hostprogs-y := modpost mk_elfconfig
+always := $(hostprogs-y) empty.o
modpost-objs := modpost.o file2alias.o sumversion.o
set -e
# Some variables and settings used throughout the script
-version="$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION"
+version=$KERNELRELEASE
tmpdir="$objtree/debian/tmp"
# Setup the directory structure