2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/kmod.h>
14 #include <linux/bootmem.h>
15 #include <linux/err.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/cpumask.h>
23 #include <asm/s390_ext.h>
24 #include <asm/processor.h>
28 #define SCLP_CORE_PRINT_HEADER "sclp low level driver: "
30 /* Structure for register_early_external_interrupt. */
31 static ext_int_info_t ext_int_info_hwc;
33 /* spinlock to protect global variables of sclp_core */
34 static spinlock_t sclp_lock;
36 /* Mask of valid sclp events */
37 static sccb_mask_t sclp_receive_mask;
38 static sccb_mask_t sclp_send_mask;
40 /* List of registered event types */
41 static struct list_head sclp_reg_list;
44 static struct list_head sclp_req_queue;
46 /* sccb for unconditional read */
47 static struct sclp_req sclp_read_req;
48 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 /* sccb for write mask sccb */
50 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
52 /* Timer for init mask retries. */
53 static struct timer_list retry_timer;
55 static volatile unsigned long sclp_status = 0;
56 /* some status flags */
58 #define SCLP_RUNNING 1
59 #define SCLP_READING 2
61 #define SCLP_INIT_POLL_INTERVAL 1
63 #define SCLP_COMMAND_INITIATED 0
65 #define SCLP_NOT_OPERATIONAL 3
68 * assembler instruction for Service Call
71 __service_call(sclp_cmdw_t command, void *sccb)
76 * Mnemonic: SERVC Rx, Ry [RRE]
78 * Rx: SCLP command word
82 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
86 : "d" (command), "a" (__pa(sccb))
89 * cc == 0: Service Call succesful initiated
90 * cc == 2: SCLP busy, new Service Call not initiated,
92 * cc == 3: SCLP function not operational
94 if (cc == SCLP_NOT_OPERATIONAL)
97 * We set the SCLP_RUNNING bit for cc 2 as well because if
98 * service_call returns cc 2 some old request is running
99 * that has to complete first
101 set_bit(SCLP_RUNNING, &sclp_status);
108 sclp_start_request(void)
110 struct sclp_req *req;
114 /* quick exit if sclp is already in use */
115 if (test_bit(SCLP_RUNNING, &sclp_status))
117 spin_lock_irqsave(&sclp_lock, flags);
118 /* Get first request on queue if available */
120 if (!list_empty(&sclp_req_queue))
121 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
123 rc = __service_call(req->command, req->sccb);
125 req->status = SCLP_REQ_FAILED;
126 list_del(&req->list);
128 req->status = SCLP_REQ_RUNNING;
131 spin_unlock_irqrestore(&sclp_lock, flags);
132 if (rc == -EIO && req->callback != NULL)
133 req->callback(req, req->callback_data);
138 sclp_process_evbufs(struct sccb_header *sccb)
142 struct evbuf_header *evbuf;
144 struct sclp_register *t;
146 spin_lock_irqsave(&sclp_lock, flags);
147 evbuf = (struct evbuf_header *) (sccb + 1);
149 while ((addr_t) evbuf < (addr_t) sccb + sccb->length) {
150 /* check registered event */
152 list_for_each(l, &sclp_reg_list) {
153 t = list_entry(l, struct sclp_register, list);
154 if (t->receive_mask & (1 << (32 - evbuf->type))) {
155 if (t->receiver_fn != NULL) {
156 spin_unlock_irqrestore(&sclp_lock,
158 t->receiver_fn(evbuf);
159 spin_lock_irqsave(&sclp_lock, flags);
166 /* Check for unrequested event buffer */
169 evbuf = (struct evbuf_header *)
170 ((addr_t) evbuf + evbuf->length);
172 spin_unlock_irqrestore(&sclp_lock, flags);
177 sclp_error_message(u16 rc)
182 { 0x0000, "No response code stored (machine malfunction)" },
183 { 0x0020, "Normal Completion" },
184 { 0x0040, "SCLP equipment check" },
185 { 0x0100, "SCCB boundary violation" },
186 { 0x01f0, "Invalid command" },
187 { 0x0220, "Normal Completion; suppressed buffers pending" },
188 { 0x0300, "Insufficient SCCB length" },
189 { 0x0340, "Contained SCLP equipment check" },
190 { 0x05f0, "Target resource in improper state" },
191 { 0x40f0, "Invalid function code/not installed" },
192 { 0x60f0, "No buffers stored" },
193 { 0x62f0, "No buffers stored; suppressed buffers pending" },
194 { 0x70f0, "Invalid selection mask" },
195 { 0x71f0, "Event buffer exceeds available space" },
196 { 0x72f0, "Inconsistent lengths" },
197 { 0x73f0, "Event buffer syntax error" }
200 for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++)
201 if (rc == sclp_errors[i].code)
202 return sclp_errors[i].msg;
203 return "Invalid response code";
207 * postprocessing of unconditional read service call
210 sclp_unconditional_read_cb(struct sclp_req *read_req, void *data)
212 struct sccb_header *sccb;
214 sccb = read_req->sccb;
215 if (sccb->response_code == 0x0020 ||
216 sccb->response_code == 0x0220) {
217 if (sclp_process_evbufs(sccb) != 0)
218 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
219 "unconditional read: "
220 "unrequested event buffer received.\n");
223 if (sccb->response_code != 0x0020)
224 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
225 "unconditional read: %s (response code=0x%x).\n",
226 sclp_error_message(sccb->response_code),
227 sccb->response_code);
229 clear_bit(SCLP_READING, &sclp_status);
233 * Function to queue Read Event Data/Unconditional Read
236 __sclp_unconditional_read(void)
238 struct sccb_header *sccb;
239 struct sclp_req *read_req;
242 * Don't try to initiate Unconditional Read if we are not able to
245 if (sclp_receive_mask == 0)
247 /* Don't try reading if a read is already outstanding */
248 if (test_and_set_bit(SCLP_READING, &sclp_status))
250 /* Initialize read sccb */
251 sccb = (struct sccb_header *) sclp_read_sccb;
253 sccb->length = PAGE_SIZE;
254 sccb->function_code = 0; /* unconditional read */
255 sccb->control_mask[2] = 0x80; /* variable length response */
256 /* Initialize request structure */
257 read_req = &sclp_read_req;
258 read_req->command = SCLP_CMDW_READDATA;
259 read_req->status = SCLP_REQ_QUEUED;
260 read_req->callback = sclp_unconditional_read_cb;
261 read_req->sccb = sccb;
262 /* Add read request to the head of queue */
263 list_add(&read_req->list, &sclp_req_queue);
266 /* Bit masks to interpret external interruption parameter contents. */
267 #define EXT_INT_SCCB_MASK 0xfffffff8
268 #define EXT_INT_STATECHANGE_PENDING 0x00000002
269 #define EXT_INT_EVBUF_PENDING 0x00000001
272 * Handler for service-signal external interruptions
275 sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
277 u32 ext_int_param, finished_sccb, evbuf_pending;
279 struct sclp_req *req, *tmp;
281 spin_lock(&sclp_lock);
283 * Only process interrupt if sclp is initialized.
284 * This avoids strange effects for a pending request
285 * from before the last re-ipl.
287 if (!test_bit(SCLP_INIT, &sclp_status)) {
288 /* Now clear the running bit */
289 clear_bit(SCLP_RUNNING, &sclp_status);
290 spin_unlock(&sclp_lock);
293 ext_int_param = S390_lowcore.ext_params;
294 finished_sccb = ext_int_param & EXT_INT_SCCB_MASK;
295 evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING |
296 EXT_INT_STATECHANGE_PENDING);
298 if (finished_sccb != 0U) {
299 list_for_each(l, &sclp_req_queue) {
300 tmp = list_entry(l, struct sclp_req, list);
301 if (finished_sccb == (u32)(addr_t) tmp->sccb) {
302 list_del(&tmp->list);
308 spin_unlock(&sclp_lock);
309 /* Perform callback */
311 req->status = SCLP_REQ_DONE;
312 if (req->callback != NULL)
313 req->callback(req, req->callback_data);
315 spin_lock(&sclp_lock);
316 /* Head queue a read sccb if an event buffer is pending */
318 __sclp_unconditional_read();
319 /* Now clear the running bit if SCLP indicated a finished SCCB */
320 if (finished_sccb != 0U)
321 clear_bit(SCLP_RUNNING, &sclp_status);
322 spin_unlock(&sclp_lock);
323 /* and start next request on the queue */
324 sclp_start_request();
328 * Wait synchronously for external interrupt of sclp. We may not receive
329 * any other external interrupt, so we disable all other external interrupts
330 * in control register 0.
335 unsigned long psw_mask;
336 unsigned long cr0, cr0_sync;
338 /* Prevent BH from executing. */
342 * enable service signal external interruption (cr0.22)
343 * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31
344 * don't touch any other bit in cr0
346 __ctl_store(cr0, 0, 0);
348 cr0_sync |= 0x00000200;
349 cr0_sync &= 0xFFFFF3AC;
350 __ctl_load(cr0_sync, 0, 0);
352 /* enable external interruptions (PSW-mask.7) */
353 asm volatile ("STOSM 0(%1),0x01"
354 : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
356 /* wait until ISR signals receipt of interrupt */
357 while (test_bit(SCLP_RUNNING, &sclp_status)) {
362 /* disable external interruptions */
363 asm volatile ("SSM 0(%0)"
364 : : "a" (&psw_mask) : "memory");
367 __ctl_load(cr0, 0, 0);
372 * Queue an SCLP request. Request will immediately be processed if queue is
376 sclp_add_request(struct sclp_req *req)
380 if (!test_bit(SCLP_INIT, &sclp_status)) {
381 req->status = SCLP_REQ_FAILED;
382 if (req->callback != NULL)
383 req->callback(req, req->callback_data);
386 spin_lock_irqsave(&sclp_lock, flags);
387 /* queue the request */
388 req->status = SCLP_REQ_QUEUED;
389 list_add_tail(&req->list, &sclp_req_queue);
390 spin_unlock_irqrestore(&sclp_lock, flags);
391 /* try to start the first request on the queue */
392 sclp_start_request();
395 /* state change notification */
396 struct sclp_statechangebuf {
397 struct evbuf_header header;
398 u8 validity_sclp_active_facility_mask : 1;
399 u8 validity_sclp_receive_mask : 1;
400 u8 validity_sclp_send_mask : 1;
401 u8 validity_read_data_function_mask : 1;
404 u64 sclp_active_facility_mask;
405 sccb_mask_t sclp_receive_mask;
406 sccb_mask_t sclp_send_mask;
407 u32 read_data_function_mask;
408 } __attribute__((packed));
411 __sclp_notify_state_change(void)
414 struct sclp_register *t;
415 sccb_mask_t receive_mask, send_mask;
417 list_for_each(l, &sclp_reg_list) {
418 t = list_entry(l, struct sclp_register, list);
419 receive_mask = t->receive_mask & sclp_receive_mask;
420 send_mask = t->send_mask & sclp_send_mask;
421 if (t->sclp_receive_mask != receive_mask ||
422 t->sclp_send_mask != send_mask) {
423 t->sclp_receive_mask = receive_mask;
424 t->sclp_send_mask = send_mask;
425 if (t->state_change_fn != NULL)
426 t->state_change_fn(t);
432 sclp_state_change(struct evbuf_header *evbuf)
435 struct sclp_statechangebuf *scbuf;
437 spin_lock_irqsave(&sclp_lock, flags);
438 scbuf = (struct sclp_statechangebuf *) evbuf;
440 if (scbuf->validity_sclp_receive_mask) {
441 if (scbuf->mask_length != sizeof(sccb_mask_t))
442 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
443 "state change event with mask length %i\n",
446 /* set new receive mask */
447 sclp_receive_mask = scbuf->sclp_receive_mask;
450 if (scbuf->validity_sclp_send_mask) {
451 if (scbuf->mask_length != sizeof(sccb_mask_t))
452 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
453 "state change event with mask length %i\n",
456 /* set new send mask */
457 sclp_send_mask = scbuf->sclp_send_mask;
460 __sclp_notify_state_change();
461 spin_unlock_irqrestore(&sclp_lock, flags);
464 static struct sclp_register sclp_state_change_event = {
465 .receive_mask = EvTyp_StateChange_Mask,
466 .receiver_fn = sclp_state_change
471 * SCLP quiesce event handler
475 do_load_quiesce_psw(void * __unused)
478 unsigned long status;
481 if (smp_processor_id() != 0)
482 signal_processor(smp_processor_id(), sigp_stop);
483 /* Wait for all other cpus to enter stopped state */
485 while (i < NR_CPUS) {
486 if (!cpu_online(i)) {
490 switch (signal_processor_ps(&status, 0, i, sigp_sense)) {
491 case sigp_order_code_accepted:
492 case sigp_status_stored:
493 /* Check for stopped and check stop state */
494 if (test_bit(6, &status) || test_bit(4, &status))
499 case sigp_not_operational:
504 /* Quiesce the last cpu with the special psw */
505 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
506 quiesce_psw.addr = 0xfff;
507 __load_psw(quiesce_psw);
511 do_machine_quiesce(void)
513 on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
517 do_machine_quiesce(void)
521 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
522 quiesce_psw.addr = 0xfff;
523 __load_psw(quiesce_psw);
527 extern void ctrl_alt_del(void);
530 sclp_quiesce(struct evbuf_header *evbuf)
533 * We got a "shutdown" request.
534 * Add a call to an appropriate "shutdown" routine here. This
535 * routine should set all PSWs to 'disabled-wait', 'stopped'
536 * or 'check-stopped' - except 1 PSW which needs to carry a
537 * special bit pattern called 'quiesce PSW'.
539 _machine_restart = (void *) do_machine_quiesce;
540 _machine_halt = do_machine_quiesce;
541 _machine_power_off = do_machine_quiesce;
545 static struct sclp_register sclp_quiesce_event = {
546 .receive_mask = EvTyp_SigQuiesce_Mask,
547 .receiver_fn = sclp_quiesce
550 /* initialisation of SCLP */
552 struct sccb_header header;
555 sccb_mask_t receive_mask;
556 sccb_mask_t send_mask;
557 sccb_mask_t sclp_send_mask;
558 sccb_mask_t sclp_receive_mask;
559 } __attribute__((packed));
561 static void sclp_init_mask_retry(unsigned long);
567 struct init_sccb *sccb;
568 struct sclp_req *req;
570 struct sclp_register *t;
573 sccb = (struct init_sccb *) sclp_init_sccb;
574 /* stick the request structure to the end of the init sccb page */
575 req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1;
577 /* SCLP setup concerning receiving and sending Event Buffers */
578 req->command = SCLP_CMDW_WRITEMASK;
579 req->status = SCLP_REQ_QUEUED;
580 req->callback = NULL;
582 /* setup sccb for writemask command */
583 memset(sccb, 0, sizeof(struct init_sccb));
584 sccb->header.length = sizeof(struct init_sccb);
585 sccb->mask_length = sizeof(sccb_mask_t);
586 /* copy in the sccb mask of the registered event types */
587 spin_lock_irqsave(&sclp_lock, flags);
588 list_for_each(l, &sclp_reg_list) {
589 t = list_entry(l, struct sclp_register, list);
590 sccb->receive_mask |= t->receive_mask;
591 sccb->send_mask |= t->send_mask;
593 sccb->sclp_receive_mask = 0;
594 sccb->sclp_send_mask = 0;
595 if (test_bit(SCLP_INIT, &sclp_status)) {
596 /* add request to sclp queue */
597 list_add_tail(&req->list, &sclp_req_queue);
598 spin_unlock_irqrestore(&sclp_lock, flags);
599 /* and start if SCLP is idle */
600 sclp_start_request();
601 /* now wait for completion */
602 while (req->status != SCLP_REQ_DONE &&
603 req->status != SCLP_REQ_FAILED)
605 spin_lock_irqsave(&sclp_lock, flags);
608 * Special case for the very first write mask command.
609 * The interrupt handler is not removing request from
610 * the request queue and doesn't call callbacks yet
611 * because there might be an pending old interrupt
612 * after a Re-IPL. We have to receive and ignore it.
615 rc = __service_call(req->command, req->sccb);
616 spin_unlock_irqrestore(&sclp_lock, flags);
620 spin_lock_irqsave(&sclp_lock, flags);
621 } while (rc == -EBUSY);
623 if (sccb->header.response_code != 0x0020) {
624 /* WRITEMASK failed - we cannot rely on receiving a state
625 change event, so initially, polling is the only alternative
626 for us to ever become operational. */
627 if (!timer_pending(&retry_timer) ||
628 !mod_timer(&retry_timer,
629 jiffies + SCLP_INIT_POLL_INTERVAL*HZ)) {
630 retry_timer.function = sclp_init_mask_retry;
631 retry_timer.data = 0;
632 retry_timer.expires = jiffies +
633 SCLP_INIT_POLL_INTERVAL*HZ;
634 add_timer(&retry_timer);
637 sclp_receive_mask = sccb->sclp_receive_mask;
638 sclp_send_mask = sccb->sclp_send_mask;
639 __sclp_notify_state_change();
641 spin_unlock_irqrestore(&sclp_lock, flags);
646 sclp_init_mask_retry(unsigned long data)
652 * sclp setup function. Called early (no kmalloc!) from sclp_console_init().
659 if (test_bit(SCLP_INIT, &sclp_status))
660 /* Already initialized. */
663 spin_lock_init(&sclp_lock);
664 INIT_LIST_HEAD(&sclp_req_queue);
666 /* init event list */
667 INIT_LIST_HEAD(&sclp_reg_list);
668 list_add(&sclp_state_change_event.list, &sclp_reg_list);
669 list_add(&sclp_quiesce_event.list, &sclp_reg_list);
672 * request the 0x2401 external interrupt
673 * The sclp driver is initialized early (before kmalloc works). We
674 * need to use register_early_external_interrupt.
676 if (register_early_external_interrupt(0x2401, sclp_interrupt_handler,
677 &ext_int_info_hwc) != 0)
680 /* enable service-signal external interruptions,
681 * Control Register 0 bit 22 := 1
682 * (besides PSW bit 7 must be set to 1 sometimes for external
687 init_timer(&retry_timer);
688 /* do the initial write event mask */
689 rc = sclp_init_mask();
691 /* Ok, now everything is setup right. */
692 set_bit(SCLP_INIT, &sclp_status);
696 /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */
698 unregister_early_external_interrupt(0x2401, sclp_interrupt_handler,
705 * Register the SCLP event listener identified by REG. Return 0 on success.
706 * Some error codes and their meaning:
708 * -ENODEV = SCLP interface is not supported on this machine
709 * -EBUSY = there is already a listener registered for the requested
711 * -EIO = SCLP interface is currently not operational
714 sclp_register(struct sclp_register *reg)
718 struct sclp_register *t;
720 if (!MACHINE_HAS_SCLP)
723 if (!test_bit(SCLP_INIT, &sclp_status))
725 spin_lock_irqsave(&sclp_lock, flags);
726 /* check already registered event masks for collisions */
727 list_for_each(l, &sclp_reg_list) {
728 t = list_entry(l, struct sclp_register, list);
729 if (t->receive_mask & reg->receive_mask ||
730 t->send_mask & reg->send_mask) {
731 spin_unlock_irqrestore(&sclp_lock, flags);
736 * set present mask to 0 to trigger state change
737 * callback in sclp_init_mask
739 reg->sclp_receive_mask = 0;
740 reg->sclp_send_mask = 0;
741 list_add(®->list, &sclp_reg_list);
742 spin_unlock_irqrestore(&sclp_lock, flags);
748 * Unregister the SCLP event listener identified by REG.
751 sclp_unregister(struct sclp_register *reg)
755 spin_lock_irqsave(&sclp_lock, flags);
756 list_del(®->list);
757 spin_unlock_irqrestore(&sclp_lock, flags);
761 #define SCLP_EVBUF_PROCESSED 0x80
764 * Traverse array of event buffers contained in SCCB and remove all buffers
765 * with a set "processed" flag. Return the number of unprocessed buffers.
768 sclp_remove_processed(struct sccb_header *sccb)
770 struct evbuf_header *evbuf;
774 evbuf = (struct evbuf_header *) (sccb + 1);
776 remaining = sccb->length - sizeof(struct sccb_header);
777 while (remaining > 0) {
778 remaining -= evbuf->length;
779 if (evbuf->flags & SCLP_EVBUF_PROCESSED) {
780 sccb->length -= evbuf->length;
781 memcpy((void *) evbuf,
782 (void *) ((addr_t) evbuf + evbuf->length),
786 evbuf = (struct evbuf_header *)
787 ((addr_t) evbuf + evbuf->length);
794 module_init(sclp_init);
796 EXPORT_SYMBOL(sclp_add_request);
797 EXPORT_SYMBOL(sclp_sync_wait);
798 EXPORT_SYMBOL(sclp_register);
799 EXPORT_SYMBOL(sclp_unregister);
800 EXPORT_SYMBOL(sclp_error_message);