2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
8 * Author(s): Ingo Adlung (adlung@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Arnd Bergmann (arndb@de.ibm.com)
11 * Martin Schwidefsky (schwidefsky@de.ibm.com)
14 #include <linux/module.h>
15 #include <linux/config.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/device.h>
19 #include <linux/kernel_stat.h>
21 #include <asm/hardirq.h>
23 #include <asm/delay.h>
31 #include "blacklist.h"
32 #include "cio_debug.h"
34 debug_info_t *cio_debug_msg_id;
35 debug_info_t *cio_debug_trace_id;
36 debug_info_t *cio_debug_crw_id;
41 cio_setup (char *parm)
43 if (!strcmp (parm, "yes"))
45 else if (!strcmp (parm, "no"))
48 printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
53 __setup ("cio_msg=", cio_setup);
56 * Function: cio_debug_init
57 * Initializes three debug logs (under /proc/s390dbf) for common I/O:
58 * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
59 * - cio_trace logs the calling of different functions
60 * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
61 * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
66 cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16*sizeof (long));
67 if (!cio_debug_msg_id)
69 debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
70 debug_set_level (cio_debug_msg_id, 2);
71 cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8);
72 if (!cio_debug_trace_id)
74 debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
75 debug_set_level (cio_debug_trace_id, 2);
76 cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16*sizeof (long));
77 if (!cio_debug_crw_id)
79 debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
80 debug_set_level (cio_debug_crw_id, 2);
81 pr_debug("debugging initialized\n");
86 debug_unregister (cio_debug_msg_id);
87 if (cio_debug_trace_id)
88 debug_unregister (cio_debug_trace_id);
90 debug_unregister (cio_debug_crw_id);
91 pr_debug("could not initialize debugging\n");
95 arch_initcall (cio_debug_init);
98 cio_set_options (struct subchannel *sch, int flags)
100 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
101 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
102 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
106 /* FIXME: who wants to use this? */
108 cio_get_options (struct subchannel *sch)
113 if (sch->options.suspend)
114 flags |= DOIO_ALLOW_SUSPEND;
115 if (sch->options.prefetch)
116 flags |= DOIO_DENY_PREFETCH;
117 if (sch->options.inter)
118 flags |= DOIO_SUPPRESS_INTER;
123 * Use tpi to get a pending interrupt, call the interrupt handler and
124 * return a pointer to the subchannel structure.
129 struct tpi_info *tpi_info;
130 struct subchannel *sch;
133 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
136 irb = (struct irb *) __LC_IRB;
137 /* Store interrupt response block to lowcore. */
138 if (tsch (tpi_info->irq, irb) != 0)
139 /* Not status pending or not operational. */
141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
145 spin_lock(&sch->lock);
146 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
147 if (sch->driver && sch->driver->irq)
148 sch->driver->irq(&sch->dev);
149 spin_unlock(&sch->lock);
155 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
164 stsch (sch->irq, &sch->schib);
166 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
167 "subchannel %04x!\n", sch->irq);
168 sprintf(dbf_text, "no%s", sch->dev.bus_id);
169 CIO_TRACE_EVENT(0, dbf_text);
170 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
172 return (sch->lpm ? -EACCES : -ENODEV);
176 cio_start (struct subchannel *sch, /* subchannel structure */
177 struct ccw1 * cpa, /* logical channel prog addr */
178 __u8 lpm) /* logical path mask */
183 CIO_TRACE_EVENT (4, "stIO");
184 CIO_TRACE_EVENT (4, sch->dev.bus_id);
186 /* sch is always under 2G. */
187 sch->orb.intparm = (__u32)(unsigned long)sch;
190 sch->orb.pfch = sch->options.prefetch == 0;
191 sch->orb.spnd = sch->options.suspend;
192 sch->orb.ssic = sch->options.suspend && sch->options.inter;
193 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
194 #ifdef CONFIG_ARCH_S390X
196 * for 64 bit we always support 64 bit IDAWs with 4k page size only
201 sch->orb.cpa = (__u32) __pa (cpa);
204 * Issue "Start subchannel" and process condition code
206 ccode = ssch (sch->irq, &sch->orb);
207 sprintf (dbf_txt, "ccode:%d", ccode);
208 CIO_TRACE_EVENT (4, dbf_txt);
213 * initialize device status information
215 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
217 case 1: /* status pending */
220 default: /* device/path not operational */
221 return cio_start_handle_notoper(sch, lpm);
226 * resume suspended I/O operation
229 cio_resume (struct subchannel *sch)
234 CIO_TRACE_EVENT (4, "resIO");
235 CIO_TRACE_EVENT (4, sch->dev.bus_id);
237 ccode = rsch (sch->irq);
239 sprintf (dbf_txt, "ccode:%d", ccode);
240 CIO_TRACE_EVENT (4, dbf_txt);
244 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND;
252 * useless to wait for request completion
253 * as device is no longer operational !
263 cio_halt(struct subchannel *sch)
271 CIO_TRACE_EVENT (2, "haltIO");
272 CIO_TRACE_EVENT (2, sch->dev.bus_id);
275 * Issue "Halt subchannel" and process condition code
277 ccode = hsch (sch->irq);
279 sprintf (dbf_txt, "ccode:%d", ccode);
280 CIO_TRACE_EVENT (2, dbf_txt);
284 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
286 case 1: /* status pending */
289 default: /* device not operational */
295 * Clear I/O operation
298 cio_clear(struct subchannel *sch)
306 CIO_TRACE_EVENT (2, "clearIO");
307 CIO_TRACE_EVENT (2, sch->dev.bus_id);
310 * Issue "Clear subchannel" and process condition code
312 ccode = csch (sch->irq);
314 sprintf (dbf_txt, "ccode:%d", ccode);
315 CIO_TRACE_EVENT (2, dbf_txt);
319 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
321 default: /* device not operational */
327 * Function: cio_cancel
328 * Issues a "Cancel Subchannel" on the specified subchannel
329 * Note: We don't need any fancy intparms and flags here
330 * since xsch is executed synchronously.
331 * Only for common I/O internal use as for now.
334 cio_cancel (struct subchannel *sch)
342 CIO_TRACE_EVENT (2, "cancelIO");
343 CIO_TRACE_EVENT (2, sch->dev.bus_id);
345 ccode = xsch (sch->irq);
347 sprintf (dbf_txt, "ccode:%d", ccode);
348 CIO_TRACE_EVENT (2, dbf_txt);
351 case 0: /* success */
352 /* Update information in scsw. */
353 stsch (sch->irq, &sch->schib);
355 case 1: /* status pending */
357 case 2: /* not applicable */
359 default: /* not oper */
365 * Function: cio_modify
366 * Issues a "Modify Subchannel" on the specified subchannel
369 cio_modify (struct subchannel *sch)
371 int ccode, retry, ret;
374 for (retry = 0; retry < 5; retry++) {
375 ccode = msch_err (sch->irq, &sch->schib);
376 if (ccode < 0) /* -EIO if msch gets a program check. */
379 case 0: /* successfull */
381 case 1: /* status pending */
384 udelay (100); /* allow for recovery */
387 case 3: /* not operational */
398 cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
405 CIO_TRACE_EVENT (2, "ensch");
406 CIO_TRACE_EVENT (2, sch->dev.bus_id);
408 ccode = stsch (sch->irq, &sch->schib);
412 sch->schib.pmcw.ena = 1;
413 sch->schib.pmcw.isc = isc;
414 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
415 for (retry = 5, ret = 0; retry > 0; retry--) {
416 ret = cio_modify(sch);
421 * Got a program check in cio_modify. Try without
422 * the concurrent sense bit the next time.
424 sch->schib.pmcw.csense = 0;
426 stsch (sch->irq, &sch->schib);
427 if (sch->schib.pmcw.ena)
432 if (tsch(sch->irq, &irb) != 0)
436 sprintf (dbf_txt, "ret:%d", ret);
437 CIO_TRACE_EVENT (2, dbf_txt);
442 * Disable subchannel.
445 cio_disable_subchannel (struct subchannel *sch)
452 CIO_TRACE_EVENT (2, "dissch");
453 CIO_TRACE_EVENT (2, sch->dev.bus_id);
455 ccode = stsch (sch->irq, &sch->schib);
456 if (ccode == 3) /* Not operational. */
459 if (sch->schib.scsw.actl != 0)
461 * the disable function must not be called while there are
462 * requests pending for completion !
467 sch->schib.pmcw.ena = 0;
468 for (retry = 5, ret = 0; retry > 0; retry--) {
469 ret = cio_modify(sch);
474 * The subchannel is busy or status pending.
475 * We'll disable when the next interrupt was delivered
476 * via the state machine.
480 stsch (sch->irq, &sch->schib);
481 if (!sch->schib.pmcw.ena)
485 sprintf (dbf_txt, "ret:%d", ret);
486 CIO_TRACE_EVENT (2, dbf_txt);
491 * cio_validate_subchannel()
493 * Find out subchannel type and initialize struct subchannel.
495 * SUBCHANNEL_TYPE_IO for a normal io subchannel
496 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
497 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
498 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
499 * -ENXIO for non-defined subchannels
500 * -ENODEV for subchannels with invalid device number or blacklisted devices
503 cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
508 sprintf (dbf_txt, "valsch%x", irq);
509 CIO_TRACE_EVENT (4, dbf_txt);
511 /* Nuke all fields. */
512 memset(sch, 0, sizeof(struct subchannel));
514 spin_lock_init(&sch->lock);
516 /* Set a name for the subchannel */
517 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
520 * The first subchannel that is not-operational (ccode==3)
521 * indicates that there aren't any more devices available.
524 ccode = stsch (irq, &sch->schib);
528 /* Copy subchannel type from path management control word. */
529 sch->st = sch->schib.pmcw.st;
532 * ... just being curious we check for non I/O subchannels
535 CIO_DEBUG(KERN_INFO, 0,
536 "Subchannel %04X reports "
537 "non-I/O subchannel type %04X\n",
539 /* We stop here for non-io subchannels. */
543 /* Initialization for io subchannels. */
544 if (!sch->schib.pmcw.dnv)
545 /* io subchannel but device number is invalid. */
548 /* Devno is valid. */
549 if (is_blacklisted (sch->schib.pmcw.dev)) {
551 * This device must not be known to Linux. So we simply
552 * say that there is no device and return ENODEV.
554 CIO_MSG_EVENT(0, "Blacklisted device detected "
555 "at devno %04X\n", sch->schib.pmcw.dev);
559 chsc_validate_chpids(sch);
560 sch->lpm = sch->schib.pmcw.pim &
561 sch->schib.pmcw.pam &
562 sch->schib.pmcw.pom &
565 CIO_DEBUG(KERN_INFO, 0,
566 "Detected device %04X on subchannel %04X"
567 " - PIM = %02X, PAM = %02X, POM = %02X\n",
568 sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim,
569 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
572 * We now have to initially ...
573 * ... set "interruption subclass"
574 * ... enable "concurrent sense"
575 * ... enable "multipath mode" if more than one
576 * CHPID is available. This is done regardless
577 * whether multiple paths are available for us.
579 sch->schib.pmcw.isc = 3; /* could be smth. else */
580 sch->schib.pmcw.csense = 1; /* concurrent sense */
581 sch->schib.pmcw.ena = 0;
582 if ((sch->lpm & (sch->lpm - 1)) != 0)
583 sch->schib.pmcw.mp = 1; /* multipath mode */
588 * do_IRQ() handles all normal I/O device IRQ's (the special
589 * SMP cross-CPU interrupts have their own specific
594 do_IRQ (struct pt_regs *regs)
596 struct tpi_info *tpi_info;
597 struct subchannel *sch;
601 asm volatile ("mc 0,0");
602 if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
605 * Get interrupt information from lowcore
607 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
608 irb = (struct irb *) __LC_IRB;
610 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
612 * Non I/O-subchannel thin interrupts are processed differently
614 if (tpi_info->adapter_IO == 1 &&
615 tpi_info->int_type == IO_INTERRUPT_TYPE) {
619 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
621 spin_lock(&sch->lock);
622 /* Store interrupt response block to lowcore. */
623 if (tsch (tpi_info->irq, irb) == 0 && sch) {
624 /* Keep subchannel information word up to date. */
625 memcpy (&sch->schib.scsw, &irb->scsw,
627 /* Call interrupt handler if there is one. */
628 if (sch->driver && sch->driver->irq)
629 sch->driver->irq(&sch->dev);
632 spin_unlock(&sch->lock);
634 * Are more interrupts pending?
635 * If so, the tpi instruction will update the lowcore
636 * to hold the info for the next interrupt.
637 * We don't do this for VM because a tpi drops the cpu
638 * out of the sie which costs more cycles than it saves.
640 } while (!MACHINE_IS_VM && tpi (NULL) != 0);
644 #ifdef CONFIG_CCW_CONSOLE
645 static struct subchannel console_subchannel;
646 static int console_subchannel_in_use;
649 * busy wait for the next interrupt on the console
654 unsigned long cr6 __attribute__ ((aligned (8)));
655 unsigned long save_cr6 __attribute__ ((aligned (8)));
658 * before entering the spinlock we may already have
659 * processed the interrupt on a different CPU...
661 if (!console_subchannel_in_use)
664 /* disable all but isc 7 (console device) */
665 __ctl_store (save_cr6, 6, 6);
667 __ctl_load (cr6, 6, 6);
670 spin_unlock(&console_subchannel.lock);
673 spin_lock(&console_subchannel.lock);
674 } while (console_subchannel.schib.scsw.actl != 0);
676 * restore previous isc value
678 __ctl_load (save_cr6, 6, 6);
682 cio_console_irq(void)
686 if (console_irq != -1) {
687 /* VM provided us with the irq number of the console. */
688 if (stsch(console_irq, &console_subchannel.schib) != 0 ||
689 !console_subchannel.schib.pmcw.dnv)
691 console_devno = console_subchannel.schib.pmcw.dev;
692 } else if (console_devno != -1) {
693 /* At least the console device number is known. */
694 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
695 if (stsch(irq, &console_subchannel.schib) != 0)
697 if (console_subchannel.schib.pmcw.dnv &&
698 console_subchannel.schib.pmcw.dev ==
704 if (console_irq == -1)
707 /* unlike in 2.4, we cannot autoprobe here, since
708 * the channel subsystem is not fully initialized.
709 * With some luck, the HWC console can take over */
710 printk(KERN_WARNING "No ccw console found!\n");
717 cio_probe_console(void)
721 if (xchg(&console_subchannel_in_use, 1) != 0)
722 return ERR_PTR(-EBUSY);
723 irq = cio_console_irq();
725 console_subchannel_in_use = 0;
726 return ERR_PTR(-ENODEV);
728 memset(&console_subchannel, 0, sizeof(struct subchannel));
729 ret = cio_validate_subchannel(&console_subchannel, irq);
731 console_subchannel_in_use = 0;
732 return ERR_PTR(-ENODEV);
736 * enable console I/O-interrupt subclass 7
739 console_subchannel.schib.pmcw.isc = 7;
740 console_subchannel.schib.pmcw.intparm =
741 (__u32)(unsigned long)&console_subchannel;
742 ret = cio_modify(&console_subchannel);
744 console_subchannel_in_use = 0;
747 return &console_subchannel;
751 cio_release_console(void)
753 console_subchannel.schib.pmcw.intparm = 0;
754 cio_modify(&console_subchannel);
755 ctl_clear_bit(6, 24);
756 console_subchannel_in_use = 0;
759 /* Bah... hack to catch console special sausages. */
761 cio_is_console(int irq)
763 if (!console_subchannel_in_use)
765 return (irq == console_subchannel.irq);
769 cio_get_console_subchannel(void)
771 if (!console_subchannel_in_use)
773 return &console_subchannel;
778 __disable_subchannel_easy(unsigned int schid, struct schib *schib)
783 for (retry=0;retry<3;retry++) {
785 cc = msch(schid, schib);
787 return (cc==3?-ENODEV:-EBUSY);
789 if (!schib->pmcw.ena)
792 return -EBUSY; /* uhm... */
796 __clear_subchannel_easy(unsigned int schid)
802 for (retry=0;retry<20;retry++) {
806 tsch(schid, (struct irb *)__LC_IRB);
814 extern void do_reipl(unsigned long devno);
815 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
817 reipl(unsigned long devno)
822 for (schid=0;schid<=highest_subchannel;schid++) {
824 if (stsch(schid, &schib))
828 switch(__disable_subchannel_easy(schid, &schib)) {
832 default: /* -EBUSY */
833 if (__clear_subchannel_easy(schid))
834 break; /* give up... */
835 stsch(schid, &schib);
836 __disable_subchannel_easy(schid, &schib);