2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/kmod.h>
14 #include <linux/bootmem.h>
15 #include <linux/err.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/cpumask.h>
23 #include <asm/s390_ext.h>
24 #include <asm/processor.h>
28 #define SCLP_CORE_PRINT_HEADER "sclp low level driver: "
30 /* Structure for register_early_external_interrupt. */
31 static ext_int_info_t ext_int_info_hwc;
33 /* spinlock to protect global variables of sclp_core */
34 static spinlock_t sclp_lock;
36 /* Mask of valid sclp events */
37 static sccb_mask_t sclp_receive_mask;
38 static sccb_mask_t sclp_send_mask;
40 /* List of registered event types */
41 static struct list_head sclp_reg_list;
44 static struct list_head sclp_req_queue;
46 /* sccb for unconditional read */
47 static struct sclp_req sclp_read_req;
48 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 /* sccb for write mask sccb */
50 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
52 /* Timer for init mask retries. */
53 static struct timer_list retry_timer;
55 /* Timer for busy retries. */
56 static struct timer_list sclp_busy_timer;
58 static volatile unsigned long sclp_status = 0;
59 /* some status flags */
61 #define SCLP_RUNNING 1
62 #define SCLP_READING 2
64 #define SCLP_INIT_POLL_INTERVAL 1
65 #define SCLP_BUSY_POLL_INTERVAL 1
67 #define SCLP_COMMAND_INITIATED 0
69 #define SCLP_NOT_OPERATIONAL 3
72 * assembler instruction for Service Call
75 __service_call(sclp_cmdw_t command, void *sccb)
80 * Mnemonic: SERVC Rx, Ry [RRE]
82 * Rx: SCLP command word
86 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
90 : "d" (command), "a" (__pa(sccb))
93 * cc == 0: Service Call succesful initiated
94 * cc == 2: SCLP busy, new Service Call not initiated,
96 * cc == 3: SCLP function not operational
98 if (cc == SCLP_NOT_OPERATIONAL)
106 sclp_start_request(void)
108 struct sclp_req *req;
112 spin_lock_irqsave(&sclp_lock, flags);
113 /* quick exit if sclp is already in use */
114 if (test_bit(SCLP_RUNNING, &sclp_status)) {
115 spin_unlock_irqrestore(&sclp_lock, flags);
118 /* Try to start requests from the request queue. */
119 while (!list_empty(&sclp_req_queue)) {
120 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
121 rc = __service_call(req->command, req->sccb);
123 /* Sucessfully started request. */
124 req->status = SCLP_REQ_RUNNING;
125 /* Request active. Set running indication. */
126 set_bit(SCLP_RUNNING, &sclp_status);
131 * SCLP is busy but no request is running.
134 if (!timer_pending(&sclp_busy_timer) ||
135 !mod_timer(&sclp_busy_timer,
136 jiffies + SCLP_BUSY_POLL_INTERVAL*HZ)) {
137 sclp_busy_timer.function =
138 (void *) sclp_start_request;
139 sclp_busy_timer.expires =
140 jiffies + SCLP_BUSY_POLL_INTERVAL*HZ;
141 add_timer(&sclp_busy_timer);
145 /* Request failed. */
146 req->status = SCLP_REQ_FAILED;
147 list_del(&req->list);
149 spin_unlock_irqrestore(&sclp_lock, flags);
150 req->callback(req, req->callback_data);
151 spin_lock_irqsave(&sclp_lock, flags);
154 spin_unlock_irqrestore(&sclp_lock, flags);
158 sclp_process_evbufs(struct sccb_header *sccb)
162 struct evbuf_header *evbuf;
164 struct sclp_register *t;
166 spin_lock_irqsave(&sclp_lock, flags);
167 evbuf = (struct evbuf_header *) (sccb + 1);
169 while ((addr_t) evbuf < (addr_t) sccb + sccb->length) {
170 /* check registered event */
172 list_for_each(l, &sclp_reg_list) {
173 t = list_entry(l, struct sclp_register, list);
174 if (t->receive_mask & (1 << (32 - evbuf->type))) {
175 if (t->receiver_fn != NULL) {
176 spin_unlock_irqrestore(&sclp_lock,
178 t->receiver_fn(evbuf);
179 spin_lock_irqsave(&sclp_lock, flags);
186 /* Check for unrequested event buffer */
189 evbuf = (struct evbuf_header *)
190 ((addr_t) evbuf + evbuf->length);
192 spin_unlock_irqrestore(&sclp_lock, flags);
197 sclp_error_message(u16 rc)
202 { 0x0000, "No response code stored (machine malfunction)" },
203 { 0x0020, "Normal Completion" },
204 { 0x0040, "SCLP equipment check" },
205 { 0x0100, "SCCB boundary violation" },
206 { 0x01f0, "Invalid command" },
207 { 0x0220, "Normal Completion; suppressed buffers pending" },
208 { 0x0300, "Insufficient SCCB length" },
209 { 0x0340, "Contained SCLP equipment check" },
210 { 0x05f0, "Target resource in improper state" },
211 { 0x40f0, "Invalid function code/not installed" },
212 { 0x60f0, "No buffers stored" },
213 { 0x62f0, "No buffers stored; suppressed buffers pending" },
214 { 0x70f0, "Invalid selection mask" },
215 { 0x71f0, "Event buffer exceeds available space" },
216 { 0x72f0, "Inconsistent lengths" },
217 { 0x73f0, "Event buffer syntax error" }
220 for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++)
221 if (rc == sclp_errors[i].code)
222 return sclp_errors[i].msg;
223 return "Invalid response code";
227 * postprocessing of unconditional read service call
230 sclp_unconditional_read_cb(struct sclp_req *read_req, void *data)
232 struct sccb_header *sccb;
234 sccb = read_req->sccb;
235 if (sccb->response_code == 0x0020 ||
236 sccb->response_code == 0x0220) {
237 if (sclp_process_evbufs(sccb) != 0)
238 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
239 "unconditional read: "
240 "unrequested event buffer received.\n");
243 if (sccb->response_code != 0x0020)
244 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
245 "unconditional read: %s (response code=0x%x).\n",
246 sclp_error_message(sccb->response_code),
247 sccb->response_code);
249 clear_bit(SCLP_READING, &sclp_status);
253 * Function to queue Read Event Data/Unconditional Read
256 __sclp_unconditional_read(void)
258 struct sccb_header *sccb;
259 struct sclp_req *read_req;
262 * Don't try to initiate Unconditional Read if we are not able to
265 if (sclp_receive_mask == 0)
267 /* Don't try reading if a read is already outstanding */
268 if (test_and_set_bit(SCLP_READING, &sclp_status))
270 /* Initialize read sccb */
271 sccb = (struct sccb_header *) sclp_read_sccb;
273 sccb->length = PAGE_SIZE;
274 sccb->function_code = 0; /* unconditional read */
275 sccb->control_mask[2] = 0x80; /* variable length response */
276 /* Initialize request structure */
277 read_req = &sclp_read_req;
278 read_req->command = SCLP_CMDW_READDATA;
279 read_req->status = SCLP_REQ_QUEUED;
280 read_req->callback = sclp_unconditional_read_cb;
281 read_req->sccb = sccb;
282 /* Add read request to the head of queue */
283 list_add(&read_req->list, &sclp_req_queue);
286 /* Bit masks to interpret external interruption parameter contents. */
287 #define EXT_INT_SCCB_MASK 0xfffffff8
288 #define EXT_INT_STATECHANGE_PENDING 0x00000002
289 #define EXT_INT_EVBUF_PENDING 0x00000001
292 * Handler for service-signal external interruptions
295 sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
297 u32 ext_int_param, finished_sccb, evbuf_pending;
299 struct sclp_req *req, *tmp;
301 spin_lock(&sclp_lock);
303 * Only process interrupt if sclp is initialized.
304 * This avoids strange effects for a pending request
305 * from before the last re-ipl.
307 if (!test_bit(SCLP_INIT, &sclp_status)) {
308 /* Now clear the running bit */
309 clear_bit(SCLP_RUNNING, &sclp_status);
310 spin_unlock(&sclp_lock);
313 ext_int_param = S390_lowcore.ext_params;
314 finished_sccb = ext_int_param & EXT_INT_SCCB_MASK;
315 evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING |
316 EXT_INT_STATECHANGE_PENDING);
318 if (finished_sccb != 0U) {
319 list_for_each(l, &sclp_req_queue) {
320 tmp = list_entry(l, struct sclp_req, list);
321 if (finished_sccb == (u32)(addr_t) tmp->sccb) {
322 list_del(&tmp->list);
328 spin_unlock(&sclp_lock);
329 /* Perform callback */
331 req->status = SCLP_REQ_DONE;
332 if (req->callback != NULL)
333 req->callback(req, req->callback_data);
335 spin_lock(&sclp_lock);
336 /* Head queue a read sccb if an event buffer is pending */
338 __sclp_unconditional_read();
339 /* Now clear the running bit if SCLP indicated a finished SCCB */
340 if (finished_sccb != 0U)
341 clear_bit(SCLP_RUNNING, &sclp_status);
342 spin_unlock(&sclp_lock);
343 /* and start next request on the queue */
344 sclp_start_request();
348 * Wait synchronously for external interrupt of sclp. We may not receive
349 * any other external interrupt, so we disable all other external interrupts
350 * in control register 0.
355 unsigned long psw_mask;
356 unsigned long cr0, cr0_sync;
358 /* Prevent BH from executing. */
362 * enable service signal external interruption (cr0.22)
363 * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31
364 * don't touch any other bit in cr0
366 __ctl_store(cr0, 0, 0);
368 cr0_sync |= 0x00000200;
369 cr0_sync &= 0xFFFFF3AC;
370 __ctl_load(cr0_sync, 0, 0);
372 /* enable external interruptions (PSW-mask.7) */
373 asm volatile ("STOSM 0(%1),0x01"
374 : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
376 /* wait until ISR signals receipt of interrupt */
377 while (test_bit(SCLP_RUNNING, &sclp_status)) {
382 /* disable external interruptions */
383 asm volatile ("SSM 0(%0)"
384 : : "a" (&psw_mask) : "memory");
387 __ctl_load(cr0, 0, 0);
392 * Queue an SCLP request. Request will immediately be processed if queue is
396 sclp_add_request(struct sclp_req *req)
400 if (!test_bit(SCLP_INIT, &sclp_status)) {
401 req->status = SCLP_REQ_FAILED;
402 if (req->callback != NULL)
403 req->callback(req, req->callback_data);
406 spin_lock_irqsave(&sclp_lock, flags);
407 /* queue the request */
408 req->status = SCLP_REQ_QUEUED;
409 list_add_tail(&req->list, &sclp_req_queue);
410 spin_unlock_irqrestore(&sclp_lock, flags);
411 /* try to start the first request on the queue */
412 sclp_start_request();
415 /* state change notification */
416 struct sclp_statechangebuf {
417 struct evbuf_header header;
418 u8 validity_sclp_active_facility_mask : 1;
419 u8 validity_sclp_receive_mask : 1;
420 u8 validity_sclp_send_mask : 1;
421 u8 validity_read_data_function_mask : 1;
424 u64 sclp_active_facility_mask;
425 sccb_mask_t sclp_receive_mask;
426 sccb_mask_t sclp_send_mask;
427 u32 read_data_function_mask;
428 } __attribute__((packed));
431 __sclp_notify_state_change(void)
434 struct sclp_register *t;
435 sccb_mask_t receive_mask, send_mask;
437 list_for_each(l, &sclp_reg_list) {
438 t = list_entry(l, struct sclp_register, list);
439 receive_mask = t->receive_mask & sclp_receive_mask;
440 send_mask = t->send_mask & sclp_send_mask;
441 if (t->sclp_receive_mask != receive_mask ||
442 t->sclp_send_mask != send_mask) {
443 t->sclp_receive_mask = receive_mask;
444 t->sclp_send_mask = send_mask;
445 if (t->state_change_fn != NULL)
446 t->state_change_fn(t);
452 sclp_state_change(struct evbuf_header *evbuf)
455 struct sclp_statechangebuf *scbuf;
457 spin_lock_irqsave(&sclp_lock, flags);
458 scbuf = (struct sclp_statechangebuf *) evbuf;
460 if (scbuf->validity_sclp_receive_mask) {
461 if (scbuf->mask_length != sizeof(sccb_mask_t))
462 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
463 "state change event with mask length %i\n",
466 /* set new receive mask */
467 sclp_receive_mask = scbuf->sclp_receive_mask;
470 if (scbuf->validity_sclp_send_mask) {
471 if (scbuf->mask_length != sizeof(sccb_mask_t))
472 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
473 "state change event with mask length %i\n",
476 /* set new send mask */
477 sclp_send_mask = scbuf->sclp_send_mask;
480 __sclp_notify_state_change();
481 spin_unlock_irqrestore(&sclp_lock, flags);
484 static struct sclp_register sclp_state_change_event = {
485 .receive_mask = EvTyp_StateChange_Mask,
486 .receiver_fn = sclp_state_change
491 * SCLP quiesce event handler
495 do_load_quiesce_psw(void * __unused)
497 static atomic_t cpuid = ATOMIC_INIT(-1);
502 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
503 signal_processor(smp_processor_id(), sigp_stop);
504 /* Wait for all other cpus to enter stopped state */
506 while (i < NR_CPUS) {
507 if (!cpu_online(i)) {
511 switch (signal_processor_ps(&status, 0, i, sigp_sense)) {
512 case sigp_order_code_accepted:
513 case sigp_status_stored:
514 /* Check for stopped and check stop state */
520 case sigp_not_operational:
525 /* Quiesce the last cpu with the special psw */
526 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
527 quiesce_psw.addr = 0xfff;
528 __load_psw(quiesce_psw);
532 do_machine_quiesce(void)
534 on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
538 do_machine_quiesce(void)
542 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
543 quiesce_psw.addr = 0xfff;
544 __load_psw(quiesce_psw);
548 extern void ctrl_alt_del(void);
551 sclp_quiesce(struct evbuf_header *evbuf)
554 * We got a "shutdown" request.
555 * Add a call to an appropriate "shutdown" routine here. This
556 * routine should set all PSWs to 'disabled-wait', 'stopped'
557 * or 'check-stopped' - except 1 PSW which needs to carry a
558 * special bit pattern called 'quiesce PSW'.
560 _machine_restart = (void *) do_machine_quiesce;
561 _machine_halt = do_machine_quiesce;
562 _machine_power_off = do_machine_quiesce;
566 static struct sclp_register sclp_quiesce_event = {
567 .receive_mask = EvTyp_SigQuiesce_Mask,
568 .receiver_fn = sclp_quiesce
571 /* initialisation of SCLP */
573 struct sccb_header header;
576 sccb_mask_t receive_mask;
577 sccb_mask_t send_mask;
578 sccb_mask_t sclp_send_mask;
579 sccb_mask_t sclp_receive_mask;
580 } __attribute__((packed));
582 static void sclp_init_mask_retry(unsigned long);
588 struct init_sccb *sccb;
589 struct sclp_req *req;
591 struct sclp_register *t;
594 sccb = (struct init_sccb *) sclp_init_sccb;
595 /* stick the request structure to the end of the init sccb page */
596 req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1;
598 /* SCLP setup concerning receiving and sending Event Buffers */
599 req->command = SCLP_CMDW_WRITEMASK;
600 req->status = SCLP_REQ_QUEUED;
601 req->callback = NULL;
603 /* setup sccb for writemask command */
604 memset(sccb, 0, sizeof(struct init_sccb));
605 sccb->header.length = sizeof(struct init_sccb);
606 sccb->mask_length = sizeof(sccb_mask_t);
607 /* copy in the sccb mask of the registered event types */
608 spin_lock_irqsave(&sclp_lock, flags);
609 list_for_each(l, &sclp_reg_list) {
610 t = list_entry(l, struct sclp_register, list);
611 sccb->receive_mask |= t->receive_mask;
612 sccb->send_mask |= t->send_mask;
614 sccb->sclp_receive_mask = 0;
615 sccb->sclp_send_mask = 0;
616 if (test_bit(SCLP_INIT, &sclp_status)) {
617 /* add request to sclp queue */
618 list_add_tail(&req->list, &sclp_req_queue);
619 spin_unlock_irqrestore(&sclp_lock, flags);
620 /* and start if SCLP is idle */
621 sclp_start_request();
622 /* now wait for completion */
623 while (req->status != SCLP_REQ_DONE &&
624 req->status != SCLP_REQ_FAILED)
626 spin_lock_irqsave(&sclp_lock, flags);
629 * Special case for the very first write mask command.
630 * The interrupt handler is not removing request from
631 * the request queue and doesn't call callbacks yet
632 * because there might be an pending old interrupt
633 * after a Re-IPL. We have to receive and ignore it.
636 rc = __service_call(req->command, req->sccb);
638 set_bit(SCLP_RUNNING, &sclp_status);
639 spin_unlock_irqrestore(&sclp_lock, flags);
643 spin_lock_irqsave(&sclp_lock, flags);
644 } while (rc == -EBUSY);
646 if (sccb->header.response_code != 0x0020) {
647 /* WRITEMASK failed - we cannot rely on receiving a state
648 change event, so initially, polling is the only alternative
649 for us to ever become operational. */
650 if (!timer_pending(&retry_timer) ||
651 !mod_timer(&retry_timer,
652 jiffies + SCLP_INIT_POLL_INTERVAL*HZ)) {
653 retry_timer.function = sclp_init_mask_retry;
654 retry_timer.data = 0;
655 retry_timer.expires = jiffies +
656 SCLP_INIT_POLL_INTERVAL*HZ;
657 add_timer(&retry_timer);
660 sclp_receive_mask = sccb->sclp_receive_mask;
661 sclp_send_mask = sccb->sclp_send_mask;
662 __sclp_notify_state_change();
664 spin_unlock_irqrestore(&sclp_lock, flags);
669 sclp_init_mask_retry(unsigned long data)
675 * sclp setup function. Called early (no kmalloc!) from sclp_console_init().
682 if (test_bit(SCLP_INIT, &sclp_status))
683 /* Already initialized. */
686 spin_lock_init(&sclp_lock);
687 INIT_LIST_HEAD(&sclp_req_queue);
689 /* init event list */
690 INIT_LIST_HEAD(&sclp_reg_list);
691 list_add(&sclp_state_change_event.list, &sclp_reg_list);
692 list_add(&sclp_quiesce_event.list, &sclp_reg_list);
695 * request the 0x2401 external interrupt
696 * The sclp driver is initialized early (before kmalloc works). We
697 * need to use register_early_external_interrupt.
699 if (register_early_external_interrupt(0x2401, sclp_interrupt_handler,
700 &ext_int_info_hwc) != 0)
703 /* enable service-signal external interruptions,
704 * Control Register 0 bit 22 := 1
705 * (besides PSW bit 7 must be set to 1 sometimes for external
710 init_timer(&retry_timer);
711 init_timer(&sclp_busy_timer);
712 /* do the initial write event mask */
713 rc = sclp_init_mask();
715 /* Ok, now everything is setup right. */
716 set_bit(SCLP_INIT, &sclp_status);
720 /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */
722 unregister_early_external_interrupt(0x2401, sclp_interrupt_handler,
729 * Register the SCLP event listener identified by REG. Return 0 on success.
730 * Some error codes and their meaning:
732 * -ENODEV = SCLP interface is not supported on this machine
733 * -EBUSY = there is already a listener registered for the requested
735 * -EIO = SCLP interface is currently not operational
738 sclp_register(struct sclp_register *reg)
742 struct sclp_register *t;
744 if (!MACHINE_HAS_SCLP)
747 if (!test_bit(SCLP_INIT, &sclp_status))
749 spin_lock_irqsave(&sclp_lock, flags);
750 /* check already registered event masks for collisions */
751 list_for_each(l, &sclp_reg_list) {
752 t = list_entry(l, struct sclp_register, list);
753 if (t->receive_mask & reg->receive_mask ||
754 t->send_mask & reg->send_mask) {
755 spin_unlock_irqrestore(&sclp_lock, flags);
760 * set present mask to 0 to trigger state change
761 * callback in sclp_init_mask
763 reg->sclp_receive_mask = 0;
764 reg->sclp_send_mask = 0;
765 list_add(®->list, &sclp_reg_list);
766 spin_unlock_irqrestore(&sclp_lock, flags);
772 * Unregister the SCLP event listener identified by REG.
775 sclp_unregister(struct sclp_register *reg)
779 spin_lock_irqsave(&sclp_lock, flags);
780 list_del(®->list);
781 spin_unlock_irqrestore(&sclp_lock, flags);
785 #define SCLP_EVBUF_PROCESSED 0x80
788 * Traverse array of event buffers contained in SCCB and remove all buffers
789 * with a set "processed" flag. Return the number of unprocessed buffers.
792 sclp_remove_processed(struct sccb_header *sccb)
794 struct evbuf_header *evbuf;
798 evbuf = (struct evbuf_header *) (sccb + 1);
800 remaining = sccb->length - sizeof(struct sccb_header);
801 while (remaining > 0) {
802 remaining -= evbuf->length;
803 if (evbuf->flags & SCLP_EVBUF_PROCESSED) {
804 sccb->length -= evbuf->length;
805 memcpy((void *) evbuf,
806 (void *) ((addr_t) evbuf + evbuf->length),
810 evbuf = (struct evbuf_header *)
811 ((addr_t) evbuf + evbuf->length);
818 module_init(sclp_init);
820 EXPORT_SYMBOL(sclp_add_request);
821 EXPORT_SYMBOL(sclp_sync_wait);
822 EXPORT_SYMBOL(sclp_register);
823 EXPORT_SYMBOL(sclp_unregister);
824 EXPORT_SYMBOL(sclp_error_message);