X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fs390%2Fcio%2Fcio.c;h=ae1bf231d08945bcad13903266f44ad1a3a00be6;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=3c786706b5f6cf1655a11fd103a790995edaf6f7;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3c786706b..ae1bf231d 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1,28 +1,26 @@ /* * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls - * $Revision: 1.121 $ * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright (C) IBM Corp. 1999,2006 * Author(s): Ingo Adlung (adlung@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include -#include #include #include #include #include - -#include +#include #include #include #include - +#include +#include +#include #include "airq.h" #include "cio.h" #include "css.h" @@ -30,6 +28,7 @@ #include "ioasm.h" #include "blacklist.h" #include "cio_debug.h" +#include "../s390mach.h" debug_info_t *cio_debug_msg_id; debug_info_t *cio_debug_trace_id; @@ -63,21 +62,21 @@ __setup ("cio_msg=", cio_setup); static int __init cio_debug_init (void) { - cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16*sizeof (long)); + cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long)); if (!cio_debug_msg_id) goto out_unregister; debug_register_view (cio_debug_msg_id, &debug_sprintf_view); - debug_set_level (cio_debug_msg_id, 6); - cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8); + debug_set_level (cio_debug_msg_id, 2); + cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 16); if (!cio_debug_trace_id) goto out_unregister; debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); - debug_set_level (cio_debug_trace_id, 6); - cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16*sizeof (long)); + debug_set_level (cio_debug_trace_id, 2); + cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long)); if (!cio_debug_crw_id) goto out_unregister; debug_register_view (cio_debug_crw_id, &debug_sprintf_view); - debug_set_level (cio_debug_crw_id, 6); + debug_set_level (cio_debug_crw_id, 2); pr_debug("debugging initialized\n"); return 0; @@ -135,19 +134,21 @@ cio_tpi(void) return 0; irb = (struct irb *) __LC_IRB; /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) != 0) + if (tsch (tpi_info->schid, irb) != 0) /* Not status pending or not operational. */ return 1; sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (!sch) return 1; + local_bh_disable(); irq_enter (); - spin_lock(&sch->lock); + spin_lock(sch->lock); memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(&sch->dev); - spin_unlock(&sch->lock); + spin_unlock(sch->lock); irq_exit (); + _local_bh_enable(); return 1; } @@ -161,10 +162,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) else sch->lpm = 0; - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " - "subchannel %04x!\n", sch->irq); + "subchannel 0.%x.%04x!\n", sch->schid.ssid, + sch->schid.sch_no); sprintf(dbf_text, "no%s", sch->dev.bus_id); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); @@ -173,9 +175,10 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) } int -cio_start (struct subchannel *sch, /* subchannel structure */ - struct ccw1 * cpa, /* logical channel prog addr */ - __u8 lpm) /* logical path mask */ +cio_start_key (struct subchannel *sch, /* subchannel structure */ + struct ccw1 * cpa, /* logical channel prog addr */ + __u8 lpm, /* logical path mask */ + __u8 key) /* storage key */ { char dbf_txt[15]; int ccode; @@ -190,20 +193,20 @@ cio_start (struct subchannel *sch, /* subchannel structure */ sch->orb.pfch = sch->options.prefetch == 0; sch->orb.spnd = sch->options.suspend; sch->orb.ssic = sch->options.suspend && sch->options.inter; - sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; -#ifdef CONFIG_ARCH_S390X + sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; +#ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ sch->orb.c64 = 1; sch->orb.i2k = 0; #endif + sch->orb.key = key >> 4; + /* issue "Start Subchannel" */ sch->orb.cpa = (__u32) __pa (cpa); + ccode = ssch (sch->schid, &sch->orb); - /* - * Issue "Start subchannel" and process condition code - */ - ccode = ssch (sch->irq, &sch->orb); + /* process condition code */ sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (4, dbf_txt); @@ -222,6 +225,12 @@ cio_start (struct subchannel *sch, /* subchannel structure */ } } +int +cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) +{ + return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); +} + /* * resume suspended I/O operation */ @@ -234,7 +243,7 @@ cio_resume (struct subchannel *sch) CIO_TRACE_EVENT (4, "resIO"); CIO_TRACE_EVENT (4, sch->dev.bus_id); - ccode = rsch (sch->irq); + ccode = rsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (4, dbf_txt); @@ -274,7 +283,7 @@ cio_halt(struct subchannel *sch) /* * Issue "Halt subchannel" and process condition code */ - ccode = hsch (sch->irq); + ccode = hsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -309,7 +318,7 @@ cio_clear(struct subchannel *sch) /* * Issue "Clear subchannel" and process condition code */ - ccode = csch (sch->irq); + ccode = csch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -342,7 +351,7 @@ cio_cancel (struct subchannel *sch) CIO_TRACE_EVENT (2, "cancelIO"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = xsch (sch->irq); + ccode = xsch (sch->schid); sprintf (dbf_txt, "ccode:%d", ccode); CIO_TRACE_EVENT (2, dbf_txt); @@ -350,7 +359,7 @@ cio_cancel (struct subchannel *sch) switch (ccode) { case 0: /* success */ /* Update information in scsw. */ - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); return 0; case 1: /* status pending */ return -EBUSY; @@ -372,7 +381,7 @@ cio_modify (struct subchannel *sch) ret = 0; for (retry = 0; retry < 5; retry++) { - ccode = msch_err (sch->irq, &sch->schib); + ccode = msch_err (sch->schid, &sch->schib); if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { @@ -405,14 +414,16 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) CIO_TRACE_EVENT (2, "ensch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = stsch (sch->irq, &sch->schib); + if (sch_is_pseudo_sch(sch)) + return -EINVAL; + ccode = stsch (sch->schid, &sch->schib); if (ccode) return -ENODEV; - sch->schib.pmcw.ena = 1; - sch->schib.pmcw.isc = isc; - sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; for (retry = 5, ret = 0; retry > 0; retry--) { + sch->schib.pmcw.ena = 1; + sch->schib.pmcw.isc = isc; + sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; ret = cio_modify(sch); if (ret == -ENODEV) break; @@ -423,13 +434,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) */ sch->schib.pmcw.csense = 0; if (ret == 0) { - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); if (sch->schib.pmcw.ena) break; } if (ret == -EBUSY) { struct irb irb; - if (tsch(sch->irq, &irb) != 0) + if (tsch(sch->schid, &irb) != 0) break; } } @@ -452,7 +463,9 @@ cio_disable_subchannel (struct subchannel *sch) CIO_TRACE_EVENT (2, "dissch"); CIO_TRACE_EVENT (2, sch->dev.bus_id); - ccode = stsch (sch->irq, &sch->schib); + if (sch_is_pseudo_sch(sch)) + return 0; + ccode = stsch (sch->schid, &sch->schib); if (ccode == 3) /* Not operational. */ return -ENODEV; @@ -463,9 +476,8 @@ cio_disable_subchannel (struct subchannel *sch) */ return -EBUSY; - - sch->schib.pmcw.ena = 0; for (retry = 5, ret = 0; retry > 0; retry--) { + sch->schib.pmcw.ena = 0; ret = cio_modify(sch); if (ret == -ENODEV) break; @@ -477,7 +489,7 @@ cio_disable_subchannel (struct subchannel *sch) */ break; if (ret == 0) { - stsch (sch->irq, &sch->schib); + stsch (sch->schid, &sch->schib); if (!sch->schib.pmcw.ena) break; } @@ -487,6 +499,15 @@ cio_disable_subchannel (struct subchannel *sch) return ret; } +int cio_create_sch_lock(struct subchannel *sch) +{ + sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!sch->lock) + return -ENOMEM; + spin_lock_init(sch->lock); + return 0; +} + /* * cio_validate_subchannel() * @@ -500,31 +521,42 @@ cio_disable_subchannel (struct subchannel *sch) * -ENODEV for subchannels with invalid device number or blacklisted devices */ int -cio_validate_subchannel (struct subchannel *sch, unsigned int irq) +cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; + int err; - sprintf (dbf_txt, "valsch%x", irq); + sprintf (dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT (4, dbf_txt); /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); - spin_lock_init(&sch->lock); - + sch->schid = schid; + if (cio_is_console(schid)) { + sch->lock = cio_get_console_lock(); + } else { + err = cio_create_sch_lock(sch); + if (err) + goto out; + } + mutex_init(&sch->reg_mutex); /* Set a name for the subchannel */ - snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); + snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, + schid.sch_no); /* * The first subchannel that is not-operational (ccode==3) * indicates that there aren't any more devices available. + * If stsch gets an exception, it means the current subchannel set + * is not valid. */ - sch->irq = irq; - ccode = stsch (irq, &sch->schib); - if (ccode) - return -ENXIO; - + ccode = stsch_err (schid, &sch->schib); + if (ccode) { + err = (ccode == 3) ? -ENXIO : ccode; + goto out; + } /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; @@ -533,39 +565,42 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) */ if (sch->st != 0) { CIO_DEBUG(KERN_INFO, 0, - "Subchannel %04X reports " + "Subchannel 0.%x.%04x reports " "non-I/O subchannel type %04X\n", - sch->irq, sch->st); + sch->schid.ssid, sch->schid.sch_no, sch->st); /* We stop here for non-io subchannels. */ - return sch->st; + err = sch->st; + goto out; } /* Initialization for io subchannels. */ - if (!sch->schib.pmcw.dnv) + if (!sch->schib.pmcw.dnv) { /* io subchannel but device number is invalid. */ - return -ENODEV; - + err = -ENODEV; + goto out; + } /* Devno is valid. */ - if (is_blacklisted (sch->schib.pmcw.dev)) { + if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { /* * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ CIO_MSG_EVENT(0, "Blacklisted device detected " - "at devno %04X\n", sch->schib.pmcw.dev); - return -ENODEV; + "at devno %04X, subchannel set %x\n", + sch->schib.pmcw.dev, sch->schid.ssid); + err = -ENODEV; + goto out; } sch->opm = 0xff; - chsc_validate_chpids(sch); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + if (!cio_is_console(sch->schid)) + chsc_validate_chpids(sch); + sch->lpm = sch->schib.pmcw.pam & sch->opm; CIO_DEBUG(KERN_INFO, 0, - "Detected device %04X on subchannel %04X" + "Detected device %04x on subchannel 0.%x.%04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); /* @@ -582,6 +617,11 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq) if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; /* multipath mode */ return 0; +out: + if (!cio_is_console(schid)) + kfree(sch->lock); + sch->lock = NULL; + return err; } /* @@ -596,11 +636,17 @@ do_IRQ (struct pt_regs *regs) struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; + struct pt_regs *old_regs; - irq_enter (); + old_regs = set_irq_regs(regs); + irq_enter(); asm volatile ("mc 0,0"); if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) - account_ticks(regs); + /** + * Make sure that the i/o interrupt did not "overtake" + * the last HZ timer interrupt. + */ + account_ticks(); /* * Get interrupt information from lowcore */ @@ -618,9 +664,9 @@ do_IRQ (struct pt_regs *regs) } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; if (sch) - spin_lock(&sch->lock); + spin_lock(sch->lock); /* Store interrupt response block to lowcore. */ - if (tsch (tpi_info->irq, irb) == 0 && sch) { + if (tsch (tpi_info->schid, irb) == 0 && sch) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); @@ -629,7 +675,7 @@ do_IRQ (struct pt_regs *regs) sch->driver->irq(&sch->dev); } if (sch) - spin_unlock(&sch->lock); + spin_unlock(sch->lock); /* * Are more interrupts pending? * If so, the tpi instruction will update the lowcore @@ -638,7 +684,8 @@ do_IRQ (struct pt_regs *regs) * out of the sie which costs more cycles than it saves. */ } while (!MACHINE_IS_VM && tpi (NULL) != 0); - irq_exit (); + irq_exit(); + set_irq_regs(old_regs); } #ifdef CONFIG_CCW_CONSOLE @@ -667,10 +714,10 @@ wait_cons_dev (void) __ctl_load (cr6, 6, 6); do { - spin_unlock(&console_subchannel.lock); + spin_unlock(console_subchannel.lock); if (!cio_tpi()) cpu_relax(); - spin_lock(&console_subchannel.lock); + spin_lock(console_subchannel.lock); } while (console_subchannel.schib.scsw.actl != 0); /* * restore previous isc value @@ -679,28 +726,36 @@ wait_cons_dev (void) } static int -cio_console_irq(void) +cio_test_for_console(struct subchannel_id schid, void *data) { - int irq; + if (stsch_err(schid, &console_subchannel.schib) != 0) + return -ENXIO; + if (console_subchannel.schib.pmcw.dnv && + console_subchannel.schib.pmcw.dev == + console_devno) { + console_irq = schid.sch_no; + return 1; /* found */ + } + return 0; +} + + +static int +cio_get_console_sch_no(void) +{ + struct subchannel_id schid; + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ - if (stsch(console_irq, &console_subchannel.schib) != 0 || + schid.sch_no = console_irq; + if (stsch(schid, &console_subchannel.schib) != 0 || !console_subchannel.schib.pmcw.dnv) return -1; - console_device = console_subchannel.schib.pmcw.dev; - } else if (console_device != -1) { + console_devno = console_subchannel.schib.pmcw.dev; + } else if (console_devno != -1) { /* At least the console device number is known. */ - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - if (stsch(irq, &console_subchannel.schib) != 0) - break; - if (console_subchannel.schib.pmcw.dnv && - console_subchannel.schib.pmcw.dev == - console_device) { - console_irq = irq; - break; - } - } + for_each_subchannel(cio_test_for_console, NULL); if (console_irq == -1) return -1; } else { @@ -716,17 +771,20 @@ cio_console_irq(void) struct subchannel * cio_probe_console(void) { - int irq, ret; + int sch_no, ret; + struct subchannel_id schid; if (xchg(&console_subchannel_in_use, 1) != 0) return ERR_PTR(-EBUSY); - irq = cio_console_irq(); - if (irq == -1) { + sch_no = cio_get_console_sch_no(); + if (sch_no == -1) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); - ret = cio_validate_subchannel(&console_subchannel, irq); + init_subchannel_id(&schid); + schid.sch_no = sch_no; + ret = cio_validate_subchannel(&console_subchannel, schid); if (ret) { console_subchannel_in_use = 0; return ERR_PTR(-ENODEV); @@ -758,24 +816,24 @@ cio_release_console(void) /* Bah... hack to catch console special sausages. */ int -cio_is_console(int irq) +cio_is_console(struct subchannel_id schid) { if (!console_subchannel_in_use) return 0; - return (irq == console_subchannel.irq); + return schid_equal(&schid, &console_subchannel.schid); } struct subchannel * cio_get_console_subchannel(void) { if (!console_subchannel_in_use) - return 0; + return NULL; return &console_subchannel; } #endif static inline int -__disable_subchannel_easy(unsigned int schid, struct schib *schib) +__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; @@ -793,7 +851,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib) } static inline int -__clear_subchannel_easy(unsigned int schid) +__clear_subchannel_easy(struct subchannel_id schid) { int retry; @@ -803,39 +861,210 @@ __clear_subchannel_easy(unsigned int schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(schid, (struct irb *)__LC_IRB); - return 0; + tsch(ti.schid, (struct irb *)__LC_IRB); + if (schid_equal(&ti.schid, &schid)) + return 0; } udelay(100); } return -EBUSY; } -extern void do_reipl(unsigned long devno); -/* Make sure all subchannels are quiet before we re-ipl an lpar. */ -void -reipl(unsigned long devno) +static int pgm_check_occured; + +static void cio_reset_pgm_check_handler(void) { - unsigned int schid; + pgm_check_occured = 1; +} - local_irq_disable(); - for (schid=0;schid<=highest_subchannel;schid++) { - struct schib schib; - if (stsch(schid, &schib)) - goto out; - if (!schib.pmcw.ena) - continue; - switch(__disable_subchannel_easy(schid, &schib)) { - case 0: - case -ENODEV: +static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) +{ + int rc; + + pgm_check_occured = 0; + s390_reset_pgm_handler = cio_reset_pgm_check_handler; + rc = stsch(schid, addr); + s390_reset_pgm_handler = NULL; + + /* The program check handler could have changed pgm_check_occured */ + barrier(); + + if (pgm_check_occured) + return -EIO; + else + return rc; +} + +static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) +{ + struct schib schib; + + if (stsch_reset(schid, &schib)) + return -ENXIO; + if (!schib.pmcw.ena) + return 0; + switch(__disable_subchannel_easy(schid, &schib)) { + case 0: + case -ENODEV: + break; + default: /* -EBUSY */ + if (__clear_subchannel_easy(schid)) + break; /* give up... */ + stsch(schid, &schib); + __disable_subchannel_easy(schid, &schib); + } + return 0; +} + +static atomic_t chpid_reset_count; + +static void s390_reset_chpids_mcck_handler(void) +{ + struct crw crw; + struct mci *mci; + + /* Check for pending channel report word. */ + mci = (struct mci *)&S390_lowcore.mcck_interruption_code; + if (!mci->cp) + return; + /* Process channel report words. */ + while (stcrw(&crw) == 0) { + /* Check for responses to RCHP. */ + if (crw.slct && crw.rsc == CRW_RSC_CPATH) + atomic_dec(&chpid_reset_count); + } +} + +#define RCHP_TIMEOUT (30 * USEC_PER_SEC) +static void css_reset(void) +{ + int i, ret; + unsigned long long timeout; + + /* Reset subchannels. */ + for_each_subchannel(__shutdown_subchannel_easy, NULL); + /* Reset channel paths. */ + s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; + /* Enable channel report machine checks. */ + __ctl_set_bit(14, 28); + /* Temporarily reenable machine checks. */ + local_mcck_enable(); + for (i = 0; i <= __MAX_CHPID; i++) { + ret = rchp(i); + if ((ret == 0) || (ret == 2)) + /* + * rchp either succeeded, or another rchp is already + * in progress. In either case, we'll get a crw. + */ + atomic_inc(&chpid_reset_count); + } + /* Wait for machine check for all channel paths. */ + timeout = get_clock() + (RCHP_TIMEOUT << 12); + while (atomic_read(&chpid_reset_count) != 0) { + if (get_clock() > timeout) break; - default: /* -EBUSY */ - if (__clear_subchannel_easy(schid)) - break; /* give up... */ - stsch(schid, &schib); - __disable_subchannel_easy(schid, &schib); - } + cpu_relax(); } -out: - do_reipl(devno); + /* Disable machine checks again. */ + local_mcck_disable(); + /* Disable channel report machine checks. */ + __ctl_clear_bit(14, 28); + s390_reset_mcck_handler = NULL; +} + +static struct reset_call css_reset_call = { + .fn = css_reset, +}; + +static int __init init_css_reset_call(void) +{ + atomic_set(&chpid_reset_count, 0); + register_reset_call(&css_reset_call); + return 0; +} + +arch_initcall(init_css_reset_call); + +struct sch_match_id { + struct subchannel_id schid; + struct ccw_dev_id devid; + int rc; +}; + +static int __reipl_subchannel_match(struct subchannel_id schid, void *data) +{ + struct schib schib; + struct sch_match_id *match_id = data; + + if (stsch_reset(schid, &schib)) + return -ENXIO; + if (schib.pmcw.dnv && + (schib.pmcw.dev == match_id->devid.devno) && + (schid.ssid == match_id->devid.ssid)) { + match_id->schid = schid; + match_id->rc = 0; + return 1; + } + return 0; +} + +static int reipl_find_schid(struct ccw_dev_id *devid, + struct subchannel_id *schid) +{ + struct sch_match_id match_id; + + match_id.devid = *devid; + match_id.rc = -ENODEV; + for_each_subchannel(__reipl_subchannel_match, &match_id); + if (match_id.rc == 0) + *schid = match_id.schid; + return match_id.rc; +} + +extern void do_reipl_asm(__u32 schid); + +/* Make sure all subchannels are quiet before we re-ipl an lpar. */ +void reipl_ccw_dev(struct ccw_dev_id *devid) +{ + struct subchannel_id schid; + + s390_reset_system(); + if (reipl_find_schid(devid, &schid) != 0) + panic("IPL Device not found\n"); + do_reipl_asm(*((__u32*)&schid)); +} + +extern struct schib ipl_schib; + +/* + * ipl_save_parameters gets called very early. It is not allowed to access + * anything in the bss section at all. The bss section is not cleared yet, + * but may contain some ipl parameters written by the firmware. + * These parameters (if present) are copied to 0x2000. + * To avoid corruption of the ipl parameters, all variables used by this + * function must reside on the stack or in the data section. + */ +void ipl_save_parameters(void) +{ + struct subchannel_id schid; + unsigned int *ipl_ptr; + void *src, *dst; + + schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; + if (!schid.one) + return; + if (stsch(schid, &ipl_schib)) + return; + if (!ipl_schib.pmcw.dnv) + return; + ipl_devno = ipl_schib.pmcw.dev; + ipl_flags |= IPL_DEVNO_VALID; + if (!ipl_schib.pmcw.qf) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; }