ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / s390 / char / sclp.c
1 /*
2  *  drivers/s390/char/sclp.c
3  *     core function to access sclp interface
4  *
5  *  S390 version
6  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7  *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *               Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/kmod.h>
14 #include <linux/bootmem.h>
15 #include <linux/err.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/cpumask.h>
23 #include <asm/s390_ext.h>
24 #include <asm/processor.h>
25
26 #include "sclp.h"
27
28 #define SCLP_CORE_PRINT_HEADER "sclp low level driver: "
29
30 /* Structure for register_early_external_interrupt. */
31 static ext_int_info_t ext_int_info_hwc;
32
33 /* spinlock to protect global variables of sclp_core */
34 static spinlock_t sclp_lock;
35
36 /* Mask of valid sclp events */
37 static sccb_mask_t sclp_receive_mask;
38 static sccb_mask_t sclp_send_mask;
39
40 /* List of registered event types */
41 static struct list_head sclp_reg_list;
42
43 /* sccb queue */
44 static struct list_head sclp_req_queue;
45
46 /* sccb for unconditional read */
47 static struct sclp_req sclp_read_req;
48 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 /* sccb for write mask sccb */
50 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
51
52 /* Timer for init mask retries. */
53 static struct timer_list retry_timer;
54
55 static volatile unsigned long sclp_status = 0;
56 /* some status flags */
57 #define SCLP_INIT               0
58 #define SCLP_RUNNING            1
59 #define SCLP_READING            2
60
61 #define SCLP_INIT_POLL_INTERVAL 1
62
63 #define SCLP_COMMAND_INITIATED  0
64 #define SCLP_BUSY               2
65 #define SCLP_NOT_OPERATIONAL    3
66
67 /*
68  * assembler instruction for Service Call
69  */
70 static int
71 __service_call(sclp_cmdw_t command, void *sccb)
72 {
73         int cc;
74
75         /*
76          *  Mnemonic:   SERVC   Rx, Ry  [RRE]
77          *
78          *  Rx: SCLP command word
79          *  Ry: address of SCCB
80          */
81         __asm__ __volatile__(
82                 "   .insn rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
83                 "   ipm   %0\n"
84                 "   srl   %0,28"
85                 : "=&d" (cc)
86                 : "d" (command), "a" (__pa(sccb))
87                 : "cc", "memory" );
88         /*
89          * cc == 0:   Service Call succesful initiated
90          * cc == 2:   SCLP busy, new Service Call not initiated,
91          *            new SCCB unchanged
92          * cc == 3:   SCLP function not operational
93          */
94         if (cc == SCLP_NOT_OPERATIONAL)
95                 return -EIO;
96         /*
97          * We set the SCLP_RUNNING bit for cc 2 as well because if
98          * service_call returns cc 2 some old request is running
99          * that has to complete first
100          */
101         set_bit(SCLP_RUNNING, &sclp_status);
102         if (cc == SCLP_BUSY)
103                 return -EBUSY;
104         return 0;
105 }
106
107 static int
108 sclp_start_request(void)
109 {
110         struct sclp_req *req;
111         int rc;
112         unsigned long flags;
113
114         /* quick exit if sclp is already in use */
115         if (test_bit(SCLP_RUNNING, &sclp_status))
116                 return -EBUSY;
117         spin_lock_irqsave(&sclp_lock, flags);
118         /* Get first request on queue if available */
119         req = NULL;
120         if (!list_empty(&sclp_req_queue))
121                 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
122         if (req) {
123                 rc = __service_call(req->command, req->sccb);
124                 if (rc) {
125                         req->status = SCLP_REQ_FAILED;
126                         list_del(&req->list);
127                 } else
128                         req->status = SCLP_REQ_RUNNING;
129         } else
130                 rc = -EINVAL;
131         spin_unlock_irqrestore(&sclp_lock, flags);
132         if (rc == -EIO && req->callback != NULL)
133                 req->callback(req, req->callback_data);
134         return rc;
135 }
136
137 static int
138 sclp_process_evbufs(struct sccb_header *sccb)
139 {
140         int result;
141         unsigned long flags;
142         struct evbuf_header *evbuf;
143         struct list_head *l;
144         struct sclp_register *t;
145
146         spin_lock_irqsave(&sclp_lock, flags);
147         evbuf = (struct evbuf_header *) (sccb + 1);
148         result = 0;
149         while ((addr_t) evbuf < (addr_t) sccb + sccb->length) {
150                 /* check registered event */
151                 t = NULL;
152                 list_for_each(l, &sclp_reg_list) {
153                         t = list_entry(l, struct sclp_register, list);
154                         if (t->receive_mask & (1 << (32 - evbuf->type))) {
155                                 if (t->receiver_fn != NULL) {
156                                         spin_unlock_irqrestore(&sclp_lock,
157                                                                flags);
158                                         t->receiver_fn(evbuf);
159                                         spin_lock_irqsave(&sclp_lock, flags);
160                                 }
161                                 break;
162                         }
163                         else
164                                 t = NULL;
165                 }
166                 /* Check for unrequested event buffer */
167                 if (t == NULL)
168                         result = -ENOSYS;
169                 evbuf = (struct evbuf_header *)
170                                 ((addr_t) evbuf + evbuf->length);
171         }
172         spin_unlock_irqrestore(&sclp_lock, flags);
173         return result;
174 }
175
176 char *
177 sclp_error_message(u16 rc)
178 {
179         static struct {
180                 u16 code; char *msg;
181         } sclp_errors[] = {
182                 { 0x0000, "No response code stored (machine malfunction)" },
183                 { 0x0020, "Normal Completion" },
184                 { 0x0040, "SCLP equipment check" },
185                 { 0x0100, "SCCB boundary violation" },
186                 { 0x01f0, "Invalid command" },
187                 { 0x0220, "Normal Completion; suppressed buffers pending" },
188                 { 0x0300, "Insufficient SCCB length" },
189                 { 0x0340, "Contained SCLP equipment check" },
190                 { 0x05f0, "Target resource in improper state" },
191                 { 0x40f0, "Invalid function code/not installed" },
192                 { 0x60f0, "No buffers stored" },
193                 { 0x62f0, "No buffers stored; suppressed buffers pending" },
194                 { 0x70f0, "Invalid selection mask" },
195                 { 0x71f0, "Event buffer exceeds available space" },
196                 { 0x72f0, "Inconsistent lengths" },
197                 { 0x73f0, "Event buffer syntax error" }
198         };
199         int i;
200         for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++)
201                 if (rc == sclp_errors[i].code)
202                         return sclp_errors[i].msg;
203         return "Invalid response code";
204 }
205
206 /*
207  * postprocessing of unconditional read service call
208  */
209 static void
210 sclp_unconditional_read_cb(struct sclp_req *read_req, void *data)
211 {
212         struct sccb_header *sccb;
213
214         sccb = read_req->sccb;
215         if (sccb->response_code == 0x0020 ||
216             sccb->response_code == 0x0220) {
217                 if (sclp_process_evbufs(sccb) != 0)
218                         printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
219                                "unconditional read: "
220                                "unrequested event buffer received.\n");
221         }
222
223         if (sccb->response_code != 0x0020)
224                 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
225                        "unconditional read: %s (response code=0x%x).\n",
226                        sclp_error_message(sccb->response_code),
227                        sccb->response_code);
228
229         clear_bit(SCLP_READING, &sclp_status);
230 }
231
232 /*
233  * Function to queue Read Event Data/Unconditional Read
234  */
235 static void
236 __sclp_unconditional_read(void)
237 {
238         struct sccb_header *sccb;
239         struct sclp_req *read_req;
240
241         /*
242          * Don't try to initiate Unconditional Read if we are not able to
243          * receive anything
244          */
245         if (sclp_receive_mask == 0)
246                 return;
247         /* Don't try reading if a read is already outstanding */
248         if (test_and_set_bit(SCLP_READING, &sclp_status))
249                 return;
250         /* Initialize read sccb */
251         sccb = (struct sccb_header *) sclp_read_sccb;
252         clear_page(sccb);
253         sccb->length = PAGE_SIZE;
254         sccb->function_code = 0;        /* unconditional read */
255         sccb->control_mask[2] = 0x80;   /* variable length response */
256         /* Initialize request structure */
257         read_req = &sclp_read_req;
258         read_req->command = SCLP_CMDW_READDATA;
259         read_req->status = SCLP_REQ_QUEUED;
260         read_req->callback = sclp_unconditional_read_cb;
261         read_req->sccb = sccb;
262         /* Add read request to the head of queue */
263         list_add(&read_req->list, &sclp_req_queue);
264 }
265
266 /* Bit masks to interpret external interruption parameter contents. */
267 #define EXT_INT_SCCB_MASK               0xfffffff8
268 #define EXT_INT_STATECHANGE_PENDING     0x00000002
269 #define EXT_INT_EVBUF_PENDING           0x00000001
270
271 /*
272  * Handler for service-signal external interruptions
273  */
274 static void
275 sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
276 {
277         u32 ext_int_param, finished_sccb, evbuf_pending;
278         struct list_head *l;
279         struct sclp_req *req, *tmp;
280
281         spin_lock(&sclp_lock);
282         /*
283          * Only process interrupt if sclp is initialized.
284          * This avoids strange effects for a pending request
285          * from before the last re-ipl.
286          */
287         if (!test_bit(SCLP_INIT, &sclp_status)) {
288                 /* Now clear the running bit */
289                 clear_bit(SCLP_RUNNING, &sclp_status);
290                 spin_unlock(&sclp_lock);
291                 return;
292         }
293         ext_int_param = S390_lowcore.ext_params;
294         finished_sccb = ext_int_param & EXT_INT_SCCB_MASK;
295         evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING |
296                                          EXT_INT_STATECHANGE_PENDING);
297         req = NULL;
298         if (finished_sccb != 0U) {
299                 list_for_each(l, &sclp_req_queue) {
300                         tmp = list_entry(l, struct sclp_req, list);
301                         if (finished_sccb == (u32)(addr_t) tmp->sccb) {
302                                 list_del(&tmp->list);
303                                 req = tmp;
304                                 break;
305                         }
306                 }
307         }
308         spin_unlock(&sclp_lock);
309         /* Perform callback */
310         if (req != NULL) {
311                 req->status = SCLP_REQ_DONE;
312                 if (req->callback != NULL)
313                         req->callback(req, req->callback_data);
314         }
315         spin_lock(&sclp_lock);
316         /* Head queue a read sccb if an event buffer is pending */
317         if (evbuf_pending)
318                 __sclp_unconditional_read();
319         /* Now clear the running bit if SCLP indicated a finished SCCB */
320         if (finished_sccb != 0U)
321                 clear_bit(SCLP_RUNNING, &sclp_status);
322         spin_unlock(&sclp_lock);
323         /* and start next request on the queue */
324         sclp_start_request();
325 }
326
327 /*
328  * Wait synchronously for external interrupt of sclp. We may not receive
329  * any other external interrupt, so we disable all other external interrupts
330  * in control register 0.
331  */
332 void
333 sclp_sync_wait(void)
334 {
335         unsigned long psw_mask;
336         unsigned long cr0, cr0_sync;
337
338         /* Prevent BH from executing. */
339         local_bh_disable();
340         /*
341          * save cr0
342          * enable service signal external interruption (cr0.22)
343          * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31
344          * don't touch any other bit in cr0
345          */
346         __ctl_store(cr0, 0, 0);
347         cr0_sync = cr0;
348         cr0_sync |= 0x00000200;
349         cr0_sync &= 0xFFFFF3AC;
350         __ctl_load(cr0_sync, 0, 0);
351
352         /* enable external interruptions (PSW-mask.7) */
353         asm volatile ("STOSM 0(%1),0x01"
354                       : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
355
356         /* wait until ISR signals receipt of interrupt */
357         while (test_bit(SCLP_RUNNING, &sclp_status)) {
358                 barrier();
359                 cpu_relax();
360         }
361
362         /* disable external interruptions */
363         asm volatile ("SSM 0(%0)"
364                       : : "a" (&psw_mask) : "memory");
365
366         /* restore cr0 */
367         __ctl_load(cr0, 0, 0);
368         __local_bh_enable();
369 }
370
371 /*
372  * Queue an SCLP request. Request will immediately be processed if queue is
373  * empty.
374  */
375 void
376 sclp_add_request(struct sclp_req *req)
377 {
378         unsigned long flags;
379
380         if (!test_bit(SCLP_INIT, &sclp_status)) {
381                 req->status = SCLP_REQ_FAILED;
382                 if (req->callback != NULL)
383                         req->callback(req, req->callback_data);
384                 return;
385         }
386         spin_lock_irqsave(&sclp_lock, flags);
387         /* queue the request */
388         req->status = SCLP_REQ_QUEUED;
389         list_add_tail(&req->list, &sclp_req_queue);
390         spin_unlock_irqrestore(&sclp_lock, flags);
391         /* try to start the first request on the queue */
392         sclp_start_request();
393 }
394
395 /* state change notification */
396 struct sclp_statechangebuf {
397         struct evbuf_header     header;
398         u8              validity_sclp_active_facility_mask : 1;
399         u8              validity_sclp_receive_mask : 1;
400         u8              validity_sclp_send_mask : 1;
401         u8              validity_read_data_function_mask : 1;
402         u16             _zeros : 12;
403         u16             mask_length;
404         u64             sclp_active_facility_mask;
405         sccb_mask_t     sclp_receive_mask;
406         sccb_mask_t     sclp_send_mask;
407         u32             read_data_function_mask;
408 } __attribute__((packed));
409
410 static inline void
411 __sclp_notify_state_change(void)
412 {
413         struct list_head *l;
414         struct sclp_register *t;
415         sccb_mask_t receive_mask, send_mask;
416
417         list_for_each(l, &sclp_reg_list) {
418                 t = list_entry(l, struct sclp_register, list);
419                 receive_mask = t->receive_mask & sclp_receive_mask;
420                 send_mask = t->send_mask & sclp_send_mask;
421                 if (t->sclp_receive_mask != receive_mask ||
422                     t->sclp_send_mask != send_mask) {
423                         t->sclp_receive_mask = receive_mask;
424                         t->sclp_send_mask = send_mask;
425                         if (t->state_change_fn != NULL)
426                                 t->state_change_fn(t);
427                 }
428         }
429 }
430
431 static void
432 sclp_state_change(struct evbuf_header *evbuf)
433 {
434         unsigned long flags;
435         struct sclp_statechangebuf *scbuf;
436
437         spin_lock_irqsave(&sclp_lock, flags);
438         scbuf = (struct sclp_statechangebuf *) evbuf;
439
440         if (scbuf->validity_sclp_receive_mask) {
441                 if (scbuf->mask_length != sizeof(sccb_mask_t))
442                         printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
443                                "state change event with mask length %i\n",
444                                scbuf->mask_length);
445                 else
446                         /* set new receive mask */
447                         sclp_receive_mask = scbuf->sclp_receive_mask;
448         }
449
450         if (scbuf->validity_sclp_send_mask) {
451                 if (scbuf->mask_length != sizeof(sccb_mask_t))
452                         printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
453                                "state change event with mask length %i\n",
454                                scbuf->mask_length);
455                 else
456                         /* set new send mask */
457                         sclp_send_mask = scbuf->sclp_send_mask;
458         }
459
460         __sclp_notify_state_change();
461         spin_unlock_irqrestore(&sclp_lock, flags);
462 }
463
464 static struct sclp_register sclp_state_change_event = {
465         .receive_mask = EvTyp_StateChange_Mask,
466         .receiver_fn = sclp_state_change
467 };
468
469
470 /*
471  * SCLP quiesce event handler
472  */
473 #ifdef CONFIG_SMP
474 static void
475 do_load_quiesce_psw(void * __unused)
476 {
477         psw_t quiesce_psw;
478         unsigned long status;
479         int i;
480
481         if (smp_processor_id() != 0)
482                 signal_processor(smp_processor_id(), sigp_stop);
483         /* Wait for all other cpus to enter stopped state */
484         i = 1;
485         while (i < NR_CPUS) {
486                 if (!cpu_online(i)) {
487                         i++;
488                         continue;
489                 }
490                 switch (signal_processor_ps(&status, 0, i, sigp_sense)) {
491                 case sigp_order_code_accepted:
492                 case sigp_status_stored:
493                         /* Check for stopped and check stop state */
494                         if (test_bit(6, &status) || test_bit(4, &status))
495                                 i++;
496                         break;
497                 case sigp_busy:
498                         break;
499                 case sigp_not_operational:
500                         i++;
501                         break;
502                 }
503         }
504         /* Quiesce the last cpu with the special psw */
505         quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
506         quiesce_psw.addr = 0xfff;
507         __load_psw(quiesce_psw);
508 }
509
510 static void
511 do_machine_quiesce(void)
512 {
513         on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
514 }
515 #else
516 static void
517 do_machine_quiesce(void)
518 {
519         psw_t quiesce_psw;
520
521         quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
522         quiesce_psw.addr = 0xfff;
523         __load_psw(quiesce_psw);
524 }
525 #endif
526
527 extern void ctrl_alt_del(void);
528
529 static void
530 sclp_quiesce(struct evbuf_header *evbuf)
531 {
532         /*
533          * We got a "shutdown" request.
534          * Add a call to an appropriate "shutdown" routine here. This
535          * routine should set all PSWs to 'disabled-wait', 'stopped'
536          * or 'check-stopped' - except 1 PSW which needs to carry a
537          * special bit pattern called 'quiesce PSW'.
538          */
539         _machine_restart = (void *) do_machine_quiesce;
540         _machine_halt = do_machine_quiesce;
541         _machine_power_off = do_machine_quiesce;
542         ctrl_alt_del();
543 }
544
545 static struct sclp_register sclp_quiesce_event = {
546         .receive_mask = EvTyp_SigQuiesce_Mask,
547         .receiver_fn = sclp_quiesce
548 };
549
550 /* initialisation of SCLP */
551 struct init_sccb {
552         struct sccb_header header;
553         u16 _reserved;
554         u16 mask_length;
555         sccb_mask_t receive_mask;
556         sccb_mask_t send_mask;
557         sccb_mask_t sclp_send_mask;
558         sccb_mask_t sclp_receive_mask;
559 } __attribute__((packed));
560
561 static void sclp_init_mask_retry(unsigned long);
562
563 static int
564 sclp_init_mask(void)
565 {
566         unsigned long flags;
567         struct init_sccb *sccb;
568         struct sclp_req *req;
569         struct list_head *l;
570         struct sclp_register *t;
571         int rc;
572
573         sccb = (struct init_sccb *) sclp_init_sccb;
574         /* stick the request structure to the end of the init sccb page */
575         req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1;
576
577         /* SCLP setup concerning receiving and sending Event Buffers */
578         req->command = SCLP_CMDW_WRITEMASK;
579         req->status = SCLP_REQ_QUEUED;
580         req->callback = NULL;
581         req->sccb = sccb;
582         /* setup sccb for writemask command */
583         memset(sccb, 0, sizeof(struct init_sccb));
584         sccb->header.length = sizeof(struct init_sccb);
585         sccb->mask_length = sizeof(sccb_mask_t);
586         /* copy in the sccb mask of the registered event types */
587         spin_lock_irqsave(&sclp_lock, flags);
588         list_for_each(l, &sclp_reg_list) {
589                 t = list_entry(l, struct sclp_register, list);
590                 sccb->receive_mask |= t->receive_mask;
591                 sccb->send_mask |= t->send_mask;
592         }
593         sccb->sclp_receive_mask = 0;
594         sccb->sclp_send_mask = 0;
595         if (test_bit(SCLP_INIT, &sclp_status)) {
596                 /* add request to sclp queue */
597                 list_add_tail(&req->list, &sclp_req_queue);
598                 spin_unlock_irqrestore(&sclp_lock, flags);
599                 /* and start if SCLP is idle */
600                 sclp_start_request();
601                 /* now wait for completion */
602                 while (req->status != SCLP_REQ_DONE &&
603                        req->status != SCLP_REQ_FAILED)
604                         sclp_sync_wait();
605                 spin_lock_irqsave(&sclp_lock, flags);
606         } else {
607                 /*
608                  * Special case for the very first write mask command.
609                  * The interrupt handler is not removing request from
610                  * the request queue and doesn't call callbacks yet
611                  * because there might be an pending old interrupt
612                  * after a Re-IPL. We have to receive and ignore it.
613                  */
614                 do {
615                         rc = __service_call(req->command, req->sccb);
616                         spin_unlock_irqrestore(&sclp_lock, flags);
617                         if (rc == -EIO)
618                                 return -ENOSYS;
619                         sclp_sync_wait();
620                         spin_lock_irqsave(&sclp_lock, flags);
621                 } while (rc == -EBUSY);
622         }
623         if (sccb->header.response_code != 0x0020) {
624                 /* WRITEMASK failed - we cannot rely on receiving a state
625                    change event, so initially, polling is the only alternative
626                    for us to ever become operational. */
627                 if (!timer_pending(&retry_timer) ||
628                     !mod_timer(&retry_timer,
629                                jiffies + SCLP_INIT_POLL_INTERVAL*HZ)) {
630                         retry_timer.function = sclp_init_mask_retry;
631                         retry_timer.data = 0;
632                         retry_timer.expires = jiffies +
633                                 SCLP_INIT_POLL_INTERVAL*HZ;
634                         add_timer(&retry_timer);
635                 }
636         } else {
637                 sclp_receive_mask = sccb->sclp_receive_mask;
638                 sclp_send_mask = sccb->sclp_send_mask;
639                 __sclp_notify_state_change();
640         }
641         spin_unlock_irqrestore(&sclp_lock, flags);
642         return 0;
643 }
644
645 static void
646 sclp_init_mask_retry(unsigned long data) 
647 {
648         sclp_init_mask();
649 }
650
651 /*
652  * sclp setup function. Called early (no kmalloc!) from sclp_console_init().
653  */
654 static int
655 sclp_init(void)
656 {
657         int rc;
658
659         if (test_bit(SCLP_INIT, &sclp_status))
660                 /* Already initialized. */
661                 return 0;
662
663         spin_lock_init(&sclp_lock);
664         INIT_LIST_HEAD(&sclp_req_queue);
665
666         /* init event list */
667         INIT_LIST_HEAD(&sclp_reg_list);
668         list_add(&sclp_state_change_event.list, &sclp_reg_list);
669         list_add(&sclp_quiesce_event.list, &sclp_reg_list);
670
671         /*
672          * request the 0x2401 external interrupt
673          * The sclp driver is initialized early (before kmalloc works). We
674          * need to use register_early_external_interrupt.
675          */
676         if (register_early_external_interrupt(0x2401, sclp_interrupt_handler,
677                                               &ext_int_info_hwc) != 0)
678                 return -EBUSY;
679
680         /* enable service-signal external interruptions,
681          * Control Register 0 bit 22 := 1
682          * (besides PSW bit 7 must be set to 1 sometimes for external
683          * interruptions)
684          */
685         ctl_set_bit(0, 9);
686
687         init_timer(&retry_timer);
688         /* do the initial write event mask */
689         rc = sclp_init_mask();
690         if (rc == 0) {
691                 /* Ok, now everything is setup right. */
692                 set_bit(SCLP_INIT, &sclp_status);
693                 return 0;
694         }
695
696         /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */
697         ctl_clear_bit(0,9);
698         unregister_early_external_interrupt(0x2401, sclp_interrupt_handler,
699                                             &ext_int_info_hwc);
700
701         return rc;
702 }
703
704 /*
705  * Register the SCLP event listener identified by REG. Return 0 on success.
706  * Some error codes and their meaning:
707  *
708  *  -ENODEV = SCLP interface is not supported on this machine
709  *   -EBUSY = there is already a listener registered for the requested
710  *            event type
711  *     -EIO = SCLP interface is currently not operational
712  */
713 int
714 sclp_register(struct sclp_register *reg)
715 {
716         unsigned long flags;
717         struct list_head *l;
718         struct sclp_register *t;
719
720         if (!MACHINE_HAS_SCLP)
721                 return -ENODEV;
722
723         if (!test_bit(SCLP_INIT, &sclp_status))
724                 sclp_init();
725         spin_lock_irqsave(&sclp_lock, flags);
726         /* check already registered event masks for collisions */
727         list_for_each(l, &sclp_reg_list) {
728                 t = list_entry(l, struct sclp_register, list);
729                 if (t->receive_mask & reg->receive_mask ||
730                     t->send_mask & reg->send_mask) {
731                         spin_unlock_irqrestore(&sclp_lock, flags);
732                         return -EBUSY;
733                 }
734         }
735         /*
736          * set present mask to 0 to trigger state change
737          * callback in sclp_init_mask
738          */
739         reg->sclp_receive_mask = 0;
740         reg->sclp_send_mask = 0;
741         list_add(&reg->list, &sclp_reg_list);
742         spin_unlock_irqrestore(&sclp_lock, flags);
743         sclp_init_mask();
744         return 0;
745 }
746
747 /*
748  * Unregister the SCLP event listener identified by REG.
749  */
750 void
751 sclp_unregister(struct sclp_register *reg)
752 {
753         unsigned long flags;
754
755         spin_lock_irqsave(&sclp_lock, flags);
756         list_del(&reg->list);
757         spin_unlock_irqrestore(&sclp_lock, flags);
758         sclp_init_mask();
759 }
760
761 #define SCLP_EVBUF_PROCESSED    0x80
762
763 /*
764  * Traverse array of event buffers contained in SCCB and remove all buffers
765  * with a set "processed" flag. Return the number of unprocessed buffers.
766  */
767 int
768 sclp_remove_processed(struct sccb_header *sccb)
769 {
770         struct evbuf_header *evbuf;
771         int unprocessed;
772         u16 remaining;
773
774         evbuf = (struct evbuf_header *) (sccb + 1);
775         unprocessed = 0;
776         remaining = sccb->length - sizeof(struct sccb_header);
777         while (remaining > 0) {
778                 remaining -= evbuf->length;
779                 if (evbuf->flags & SCLP_EVBUF_PROCESSED) {
780                         sccb->length -= evbuf->length;
781                         memcpy((void *) evbuf,
782                                (void *) ((addr_t) evbuf + evbuf->length),
783                                remaining);
784                 } else {
785                         unprocessed++;
786                         evbuf = (struct evbuf_header *)
787                                         ((addr_t) evbuf + evbuf->length);
788                 }
789         }
790
791         return unprocessed;
792 }
793
794 module_init(sclp_init);
795
796 EXPORT_SYMBOL(sclp_add_request);
797 EXPORT_SYMBOL(sclp_sync_wait);
798 EXPORT_SYMBOL(sclp_register);
799 EXPORT_SYMBOL(sclp_unregister);
800 EXPORT_SYMBOL(sclp_error_message);