/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_IA64_SN_PDA_H #define _ASM_IA64_SN_PDA_H #include #include #include #include /* * CPU-specific data structure. * * One of these structures is allocated for each cpu of a NUMA system. * * This structure provides a convenient way of keeping together * all SN per-cpu data structures. */ typedef struct pda_s { /* Having a pointer in the begining of PDA tends to increase * the chance of having this pointer in cache. (Yes something * else gets pushed out). Doing this reduces the number of memory * access to all nodepda variables to be one */ struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */ struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */ /* * Support for SN LEDs */ volatile short *led_address; u16 nasid_bitmask; u8 shub2; u8 nasid_shift; u8 as_shift; u8 shub_1_1_found; u8 led_state; u8 hb_state; /* supports blinking heartbeat leds */ unsigned int hb_count; unsigned int idle_flag; volatile unsigned long *bedrock_rev_id; volatile unsigned long *pio_write_status_addr; unsigned long pio_write_status_val; volatile unsigned long *pio_shub_war_cam_addr; struct bteinfo_s *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */ unsigned long sn_soft_irr[4]; unsigned long sn_in_service_ivecs[4]; short cnodeid_to_nasid_table[MAX_NUMNODES]; int sn_lb_int_war_ticks; int sn_last_irq; int sn_first_irq; } pda_t; #define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) /* * PDA * Per-cpu private data area for each cpu. The PDA is located immediately after * the IA64 cpu_data area. A full page is allocated for the cp_data area for each * cpu but only a small amout of the page is actually used. We put the SNIA PDA * in the same page as the cpu_data area. Note that there is a check in the setup * code to verify that we don't overflow the page. * * Seems like we should should cache-line align the pda so that any changes in the * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128 * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later. */ DECLARE_PER_CPU(struct pda_s, pda_percpu); #define pda (&__ia64_per_cpu_var(pda_percpu)) #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu)) /* * Use this macro to test if shub 1.1 wars should be enabled */ #define enable_shub_wars_1_1() (pda->shub_1_1_found) #define is_shub2() (pda->shub2) #define is_shub1() (pda->shub2 == 0) #endif /* _ASM_IA64_SN_PDA_H */