VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / s390 / char / sclp.c
1 /*
2  *  drivers/s390/char/sclp.c
3  *     core function to access sclp interface
4  *
5  *  S390 version
6  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7  *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *               Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/kmod.h>
14 #include <linux/bootmem.h>
15 #include <linux/err.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/timer.h>
21 #include <linux/init.h>
22 #include <linux/cpumask.h>
23 #include <asm/s390_ext.h>
24 #include <asm/processor.h>
25
26 #include "sclp.h"
27
28 #define SCLP_CORE_PRINT_HEADER "sclp low level driver: "
29
30 /* Structure for register_early_external_interrupt. */
31 static ext_int_info_t ext_int_info_hwc;
32
33 /* spinlock to protect global variables of sclp_core */
34 static spinlock_t sclp_lock;
35
36 /* Mask of valid sclp events */
37 static sccb_mask_t sclp_receive_mask;
38 static sccb_mask_t sclp_send_mask;
39
40 /* List of registered event types */
41 static struct list_head sclp_reg_list;
42
43 /* sccb queue */
44 static struct list_head sclp_req_queue;
45
46 /* sccb for unconditional read */
47 static struct sclp_req sclp_read_req;
48 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 /* sccb for write mask sccb */
50 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
51
52 /* Timer for init mask retries. */
53 static struct timer_list retry_timer;
54
55 /* Timer for busy retries. */
56 static struct timer_list sclp_busy_timer;
57
58 static volatile unsigned long sclp_status = 0;
59 /* some status flags */
60 #define SCLP_INIT               0
61 #define SCLP_RUNNING            1
62 #define SCLP_READING            2
63
64 #define SCLP_INIT_POLL_INTERVAL 1
65 #define SCLP_BUSY_POLL_INTERVAL 1
66
67 #define SCLP_COMMAND_INITIATED  0
68 #define SCLP_BUSY               2
69 #define SCLP_NOT_OPERATIONAL    3
70
71 /*
72  * assembler instruction for Service Call
73  */
74 static int
75 __service_call(sclp_cmdw_t command, void *sccb)
76 {
77         int cc;
78
79         /*
80          *  Mnemonic:   SERVC   Rx, Ry  [RRE]
81          *
82          *  Rx: SCLP command word
83          *  Ry: address of SCCB
84          */
85         __asm__ __volatile__(
86                 "   .insn rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
87                 "   ipm   %0\n"
88                 "   srl   %0,28"
89                 : "=&d" (cc)
90                 : "d" (command), "a" (__pa(sccb))
91                 : "cc", "memory" );
92         /*
93          * cc == 0:   Service Call succesful initiated
94          * cc == 2:   SCLP busy, new Service Call not initiated,
95          *            new SCCB unchanged
96          * cc == 3:   SCLP function not operational
97          */
98         if (cc == SCLP_NOT_OPERATIONAL)
99                 return -EIO;
100         if (cc == SCLP_BUSY)
101                 return -EBUSY;
102         return 0;
103 }
104
105 static void
106 sclp_start_request(void)
107 {
108         struct sclp_req *req;
109         int rc;
110         unsigned long flags;
111
112         spin_lock_irqsave(&sclp_lock, flags);
113         /* quick exit if sclp is already in use */
114         if (test_bit(SCLP_RUNNING, &sclp_status)) {
115                 spin_unlock_irqrestore(&sclp_lock, flags);
116                 return;
117         }
118         /* Try to start requests from the request queue. */
119         while (!list_empty(&sclp_req_queue)) {
120                 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
121                 rc = __service_call(req->command, req->sccb);
122                 if (rc == 0) {
123                         /* Sucessfully started request. */
124                         req->status = SCLP_REQ_RUNNING;
125                         /* Request active. Set running indication. */
126                         set_bit(SCLP_RUNNING, &sclp_status);
127                         break;
128                 }
129                 if (rc == -EBUSY) {
130                         /**
131                          * SCLP is busy but no request is running.
132                          * Try again later.
133                          */
134                         if (!timer_pending(&sclp_busy_timer) ||
135                             !mod_timer(&sclp_busy_timer,
136                                        jiffies + SCLP_BUSY_POLL_INTERVAL*HZ)) {
137                                 sclp_busy_timer.function =
138                                         (void *) sclp_start_request;
139                                 sclp_busy_timer.expires =
140                                         jiffies + SCLP_BUSY_POLL_INTERVAL*HZ;
141                                 add_timer(&sclp_busy_timer);
142                         }
143                         break;
144                 }
145                 /* Request failed. */
146                 req->status = SCLP_REQ_FAILED;
147                 list_del(&req->list);
148                 if (req->callback) {
149                         spin_unlock_irqrestore(&sclp_lock, flags);
150                         req->callback(req, req->callback_data);
151                         spin_lock_irqsave(&sclp_lock, flags);
152                 }
153         }
154         spin_unlock_irqrestore(&sclp_lock, flags);
155 }
156
157 static int
158 sclp_process_evbufs(struct sccb_header *sccb)
159 {
160         int result;
161         unsigned long flags;
162         struct evbuf_header *evbuf;
163         struct list_head *l;
164         struct sclp_register *t;
165
166         spin_lock_irqsave(&sclp_lock, flags);
167         evbuf = (struct evbuf_header *) (sccb + 1);
168         result = 0;
169         while ((addr_t) evbuf < (addr_t) sccb + sccb->length) {
170                 /* check registered event */
171                 t = NULL;
172                 list_for_each(l, &sclp_reg_list) {
173                         t = list_entry(l, struct sclp_register, list);
174                         if (t->receive_mask & (1 << (32 - evbuf->type))) {
175                                 if (t->receiver_fn != NULL) {
176                                         spin_unlock_irqrestore(&sclp_lock,
177                                                                flags);
178                                         t->receiver_fn(evbuf);
179                                         spin_lock_irqsave(&sclp_lock, flags);
180                                 }
181                                 break;
182                         }
183                         else
184                                 t = NULL;
185                 }
186                 /* Check for unrequested event buffer */
187                 if (t == NULL)
188                         result = -ENOSYS;
189                 evbuf = (struct evbuf_header *)
190                                 ((addr_t) evbuf + evbuf->length);
191         }
192         spin_unlock_irqrestore(&sclp_lock, flags);
193         return result;
194 }
195
196 char *
197 sclp_error_message(u16 rc)
198 {
199         static struct {
200                 u16 code; char *msg;
201         } sclp_errors[] = {
202                 { 0x0000, "No response code stored (machine malfunction)" },
203                 { 0x0020, "Normal Completion" },
204                 { 0x0040, "SCLP equipment check" },
205                 { 0x0100, "SCCB boundary violation" },
206                 { 0x01f0, "Invalid command" },
207                 { 0x0220, "Normal Completion; suppressed buffers pending" },
208                 { 0x0300, "Insufficient SCCB length" },
209                 { 0x0340, "Contained SCLP equipment check" },
210                 { 0x05f0, "Target resource in improper state" },
211                 { 0x40f0, "Invalid function code/not installed" },
212                 { 0x60f0, "No buffers stored" },
213                 { 0x62f0, "No buffers stored; suppressed buffers pending" },
214                 { 0x70f0, "Invalid selection mask" },
215                 { 0x71f0, "Event buffer exceeds available space" },
216                 { 0x72f0, "Inconsistent lengths" },
217                 { 0x73f0, "Event buffer syntax error" }
218         };
219         int i;
220         for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++)
221                 if (rc == sclp_errors[i].code)
222                         return sclp_errors[i].msg;
223         return "Invalid response code";
224 }
225
226 /*
227  * postprocessing of unconditional read service call
228  */
229 static void
230 sclp_unconditional_read_cb(struct sclp_req *read_req, void *data)
231 {
232         struct sccb_header *sccb;
233
234         sccb = read_req->sccb;
235         if (sccb->response_code == 0x0020 ||
236             sccb->response_code == 0x0220) {
237                 if (sclp_process_evbufs(sccb) != 0)
238                         printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
239                                "unconditional read: "
240                                "unrequested event buffer received.\n");
241         }
242
243         if (sccb->response_code != 0x0020)
244                 printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
245                        "unconditional read: %s (response code=0x%x).\n",
246                        sclp_error_message(sccb->response_code),
247                        sccb->response_code);
248
249         clear_bit(SCLP_READING, &sclp_status);
250 }
251
252 /*
253  * Function to queue Read Event Data/Unconditional Read
254  */
255 static void
256 __sclp_unconditional_read(void)
257 {
258         struct sccb_header *sccb;
259         struct sclp_req *read_req;
260
261         /*
262          * Don't try to initiate Unconditional Read if we are not able to
263          * receive anything
264          */
265         if (sclp_receive_mask == 0)
266                 return;
267         /* Don't try reading if a read is already outstanding */
268         if (test_and_set_bit(SCLP_READING, &sclp_status))
269                 return;
270         /* Initialize read sccb */
271         sccb = (struct sccb_header *) sclp_read_sccb;
272         clear_page(sccb);
273         sccb->length = PAGE_SIZE;
274         sccb->function_code = 0;        /* unconditional read */
275         sccb->control_mask[2] = 0x80;   /* variable length response */
276         /* Initialize request structure */
277         read_req = &sclp_read_req;
278         read_req->command = SCLP_CMDW_READDATA;
279         read_req->status = SCLP_REQ_QUEUED;
280         read_req->callback = sclp_unconditional_read_cb;
281         read_req->sccb = sccb;
282         /* Add read request to the head of queue */
283         list_add(&read_req->list, &sclp_req_queue);
284 }
285
286 /* Bit masks to interpret external interruption parameter contents. */
287 #define EXT_INT_SCCB_MASK               0xfffffff8
288 #define EXT_INT_STATECHANGE_PENDING     0x00000002
289 #define EXT_INT_EVBUF_PENDING           0x00000001
290
291 /*
292  * Handler for service-signal external interruptions
293  */
294 static void
295 sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
296 {
297         u32 ext_int_param, finished_sccb, evbuf_pending;
298         struct list_head *l;
299         struct sclp_req *req, *tmp;
300
301         spin_lock(&sclp_lock);
302         /*
303          * Only process interrupt if sclp is initialized.
304          * This avoids strange effects for a pending request
305          * from before the last re-ipl.
306          */
307         if (!test_bit(SCLP_INIT, &sclp_status)) {
308                 /* Now clear the running bit */
309                 clear_bit(SCLP_RUNNING, &sclp_status);
310                 spin_unlock(&sclp_lock);
311                 return;
312         }
313         ext_int_param = S390_lowcore.ext_params;
314         finished_sccb = ext_int_param & EXT_INT_SCCB_MASK;
315         evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING |
316                                          EXT_INT_STATECHANGE_PENDING);
317         req = NULL;
318         if (finished_sccb != 0U) {
319                 list_for_each(l, &sclp_req_queue) {
320                         tmp = list_entry(l, struct sclp_req, list);
321                         if (finished_sccb == (u32)(addr_t) tmp->sccb) {
322                                 list_del(&tmp->list);
323                                 req = tmp;
324                                 break;
325                         }
326                 }
327         }
328         spin_unlock(&sclp_lock);
329         /* Perform callback */
330         if (req != NULL) {
331                 req->status = SCLP_REQ_DONE;
332                 if (req->callback != NULL)
333                         req->callback(req, req->callback_data);
334         }
335         spin_lock(&sclp_lock);
336         /* Head queue a read sccb if an event buffer is pending */
337         if (evbuf_pending)
338                 __sclp_unconditional_read();
339         /* Now clear the running bit if SCLP indicated a finished SCCB */
340         if (finished_sccb != 0U)
341                 clear_bit(SCLP_RUNNING, &sclp_status);
342         spin_unlock(&sclp_lock);
343         /* and start next request on the queue */
344         sclp_start_request();
345 }
346
347 /*
348  * Wait synchronously for external interrupt of sclp. We may not receive
349  * any other external interrupt, so we disable all other external interrupts
350  * in control register 0.
351  */
352 void
353 sclp_sync_wait(void)
354 {
355         unsigned long psw_mask;
356         unsigned long cr0, cr0_sync;
357
358         /* Prevent BH from executing. */
359         local_bh_disable();
360         /*
361          * save cr0
362          * enable service signal external interruption (cr0.22)
363          * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31
364          * don't touch any other bit in cr0
365          */
366         __ctl_store(cr0, 0, 0);
367         cr0_sync = cr0;
368         cr0_sync |= 0x00000200;
369         cr0_sync &= 0xFFFFF3AC;
370         __ctl_load(cr0_sync, 0, 0);
371
372         /* enable external interruptions (PSW-mask.7) */
373         asm volatile ("STOSM 0(%1),0x01"
374                       : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
375
376         /* wait until ISR signals receipt of interrupt */
377         while (test_bit(SCLP_RUNNING, &sclp_status)) {
378                 barrier();
379                 cpu_relax();
380         }
381
382         /* disable external interruptions */
383         asm volatile ("SSM 0(%0)"
384                       : : "a" (&psw_mask) : "memory");
385
386         /* restore cr0 */
387         __ctl_load(cr0, 0, 0);
388         __local_bh_enable();
389 }
390
391 /*
392  * Queue an SCLP request. Request will immediately be processed if queue is
393  * empty.
394  */
395 void
396 sclp_add_request(struct sclp_req *req)
397 {
398         unsigned long flags;
399
400         if (!test_bit(SCLP_INIT, &sclp_status)) {
401                 req->status = SCLP_REQ_FAILED;
402                 if (req->callback != NULL)
403                         req->callback(req, req->callback_data);
404                 return;
405         }
406         spin_lock_irqsave(&sclp_lock, flags);
407         /* queue the request */
408         req->status = SCLP_REQ_QUEUED;
409         list_add_tail(&req->list, &sclp_req_queue);
410         spin_unlock_irqrestore(&sclp_lock, flags);
411         /* try to start the first request on the queue */
412         sclp_start_request();
413 }
414
415 /* state change notification */
416 struct sclp_statechangebuf {
417         struct evbuf_header     header;
418         u8              validity_sclp_active_facility_mask : 1;
419         u8              validity_sclp_receive_mask : 1;
420         u8              validity_sclp_send_mask : 1;
421         u8              validity_read_data_function_mask : 1;
422         u16             _zeros : 12;
423         u16             mask_length;
424         u64             sclp_active_facility_mask;
425         sccb_mask_t     sclp_receive_mask;
426         sccb_mask_t     sclp_send_mask;
427         u32             read_data_function_mask;
428 } __attribute__((packed));
429
430 static inline void
431 __sclp_notify_state_change(void)
432 {
433         struct list_head *l;
434         struct sclp_register *t;
435         sccb_mask_t receive_mask, send_mask;
436
437         list_for_each(l, &sclp_reg_list) {
438                 t = list_entry(l, struct sclp_register, list);
439                 receive_mask = t->receive_mask & sclp_receive_mask;
440                 send_mask = t->send_mask & sclp_send_mask;
441                 if (t->sclp_receive_mask != receive_mask ||
442                     t->sclp_send_mask != send_mask) {
443                         t->sclp_receive_mask = receive_mask;
444                         t->sclp_send_mask = send_mask;
445                         if (t->state_change_fn != NULL)
446                                 t->state_change_fn(t);
447                 }
448         }
449 }
450
451 static void
452 sclp_state_change(struct evbuf_header *evbuf)
453 {
454         unsigned long flags;
455         struct sclp_statechangebuf *scbuf;
456
457         spin_lock_irqsave(&sclp_lock, flags);
458         scbuf = (struct sclp_statechangebuf *) evbuf;
459
460         if (scbuf->validity_sclp_receive_mask) {
461                 if (scbuf->mask_length != sizeof(sccb_mask_t))
462                         printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
463                                "state change event with mask length %i\n",
464                                scbuf->mask_length);
465                 else
466                         /* set new receive mask */
467                         sclp_receive_mask = scbuf->sclp_receive_mask;
468         }
469
470         if (scbuf->validity_sclp_send_mask) {
471                 if (scbuf->mask_length != sizeof(sccb_mask_t))
472                         printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
473                                "state change event with mask length %i\n",
474                                scbuf->mask_length);
475                 else
476                         /* set new send mask */
477                         sclp_send_mask = scbuf->sclp_send_mask;
478         }
479
480         __sclp_notify_state_change();
481         spin_unlock_irqrestore(&sclp_lock, flags);
482 }
483
484 static struct sclp_register sclp_state_change_event = {
485         .receive_mask = EvTyp_StateChange_Mask,
486         .receiver_fn = sclp_state_change
487 };
488
489
490 /*
491  * SCLP quiesce event handler
492  */
493 #ifdef CONFIG_SMP
494 static void
495 do_load_quiesce_psw(void * __unused)
496 {
497         static atomic_t cpuid = ATOMIC_INIT(-1);
498         psw_t quiesce_psw;
499         __u32 status;
500         int i;
501
502         if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
503                 signal_processor(smp_processor_id(), sigp_stop);
504         /* Wait for all other cpus to enter stopped state */
505         i = 1;
506         while (i < NR_CPUS) {
507                 if (!cpu_online(i)) {
508                         i++;
509                         continue;
510                 }
511                 switch (signal_processor_ps(&status, 0, i, sigp_sense)) {
512                 case sigp_order_code_accepted:
513                 case sigp_status_stored:
514                         /* Check for stopped and check stop state */
515                         if (status & 0x50)
516                                 i++;
517                         break;
518                 case sigp_busy:
519                         break;
520                 case sigp_not_operational:
521                         i++;
522                         break;
523                 }
524         }
525         /* Quiesce the last cpu with the special psw */
526         quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
527         quiesce_psw.addr = 0xfff;
528         __load_psw(quiesce_psw);
529 }
530
531 static void
532 do_machine_quiesce(void)
533 {
534         on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
535 }
536 #else
537 static void
538 do_machine_quiesce(void)
539 {
540         psw_t quiesce_psw;
541
542         quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
543         quiesce_psw.addr = 0xfff;
544         __load_psw(quiesce_psw);
545 }
546 #endif
547
548 extern void ctrl_alt_del(void);
549
550 static void
551 sclp_quiesce(struct evbuf_header *evbuf)
552 {
553         /*
554          * We got a "shutdown" request.
555          * Add a call to an appropriate "shutdown" routine here. This
556          * routine should set all PSWs to 'disabled-wait', 'stopped'
557          * or 'check-stopped' - except 1 PSW which needs to carry a
558          * special bit pattern called 'quiesce PSW'.
559          */
560         _machine_restart = (void *) do_machine_quiesce;
561         _machine_halt = do_machine_quiesce;
562         _machine_power_off = do_machine_quiesce;
563         ctrl_alt_del();
564 }
565
566 static struct sclp_register sclp_quiesce_event = {
567         .receive_mask = EvTyp_SigQuiesce_Mask,
568         .receiver_fn = sclp_quiesce
569 };
570
571 /* initialisation of SCLP */
572 struct init_sccb {
573         struct sccb_header header;
574         u16 _reserved;
575         u16 mask_length;
576         sccb_mask_t receive_mask;
577         sccb_mask_t send_mask;
578         sccb_mask_t sclp_send_mask;
579         sccb_mask_t sclp_receive_mask;
580 } __attribute__((packed));
581
582 static void sclp_init_mask_retry(unsigned long);
583
584 static int
585 sclp_init_mask(void)
586 {
587         unsigned long flags;
588         struct init_sccb *sccb;
589         struct sclp_req *req;
590         struct list_head *l;
591         struct sclp_register *t;
592         int rc;
593
594         sccb = (struct init_sccb *) sclp_init_sccb;
595         /* stick the request structure to the end of the init sccb page */
596         req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1;
597
598         /* SCLP setup concerning receiving and sending Event Buffers */
599         req->command = SCLP_CMDW_WRITEMASK;
600         req->status = SCLP_REQ_QUEUED;
601         req->callback = NULL;
602         req->sccb = sccb;
603         /* setup sccb for writemask command */
604         memset(sccb, 0, sizeof(struct init_sccb));
605         sccb->header.length = sizeof(struct init_sccb);
606         sccb->mask_length = sizeof(sccb_mask_t);
607         /* copy in the sccb mask of the registered event types */
608         spin_lock_irqsave(&sclp_lock, flags);
609         list_for_each(l, &sclp_reg_list) {
610                 t = list_entry(l, struct sclp_register, list);
611                 sccb->receive_mask |= t->receive_mask;
612                 sccb->send_mask |= t->send_mask;
613         }
614         sccb->sclp_receive_mask = 0;
615         sccb->sclp_send_mask = 0;
616         if (test_bit(SCLP_INIT, &sclp_status)) {
617                 /* add request to sclp queue */
618                 list_add_tail(&req->list, &sclp_req_queue);
619                 spin_unlock_irqrestore(&sclp_lock, flags);
620                 /* and start if SCLP is idle */
621                 sclp_start_request();
622                 /* now wait for completion */
623                 while (req->status != SCLP_REQ_DONE &&
624                        req->status != SCLP_REQ_FAILED)
625                         sclp_sync_wait();
626                 spin_lock_irqsave(&sclp_lock, flags);
627         } else {
628                 /*
629                  * Special case for the very first write mask command.
630                  * The interrupt handler is not removing request from
631                  * the request queue and doesn't call callbacks yet
632                  * because there might be an pending old interrupt
633                  * after a Re-IPL. We have to receive and ignore it.
634                  */
635                 do {
636                         rc = __service_call(req->command, req->sccb);
637                         if (rc == 0)
638                                 set_bit(SCLP_RUNNING, &sclp_status);
639                         spin_unlock_irqrestore(&sclp_lock, flags);
640                         if (rc == -EIO)
641                                 return -ENOSYS;
642                         sclp_sync_wait();
643                         spin_lock_irqsave(&sclp_lock, flags);
644                 } while (rc == -EBUSY);
645         }
646         if (sccb->header.response_code != 0x0020) {
647                 /* WRITEMASK failed - we cannot rely on receiving a state
648                    change event, so initially, polling is the only alternative
649                    for us to ever become operational. */
650                 if (!timer_pending(&retry_timer) ||
651                     !mod_timer(&retry_timer,
652                                jiffies + SCLP_INIT_POLL_INTERVAL*HZ)) {
653                         retry_timer.function = sclp_init_mask_retry;
654                         retry_timer.data = 0;
655                         retry_timer.expires = jiffies +
656                                 SCLP_INIT_POLL_INTERVAL*HZ;
657                         add_timer(&retry_timer);
658                 }
659         } else {
660                 sclp_receive_mask = sccb->sclp_receive_mask;
661                 sclp_send_mask = sccb->sclp_send_mask;
662                 __sclp_notify_state_change();
663         }
664         spin_unlock_irqrestore(&sclp_lock, flags);
665         return 0;
666 }
667
668 static void
669 sclp_init_mask_retry(unsigned long data) 
670 {
671         sclp_init_mask();
672 }
673
674 /*
675  * sclp setup function. Called early (no kmalloc!) from sclp_console_init().
676  */
677 static int
678 sclp_init(void)
679 {
680         int rc;
681
682         if (test_bit(SCLP_INIT, &sclp_status))
683                 /* Already initialized. */
684                 return 0;
685
686         spin_lock_init(&sclp_lock);
687         INIT_LIST_HEAD(&sclp_req_queue);
688
689         /* init event list */
690         INIT_LIST_HEAD(&sclp_reg_list);
691         list_add(&sclp_state_change_event.list, &sclp_reg_list);
692         list_add(&sclp_quiesce_event.list, &sclp_reg_list);
693
694         /*
695          * request the 0x2401 external interrupt
696          * The sclp driver is initialized early (before kmalloc works). We
697          * need to use register_early_external_interrupt.
698          */
699         if (register_early_external_interrupt(0x2401, sclp_interrupt_handler,
700                                               &ext_int_info_hwc) != 0)
701                 return -EBUSY;
702
703         /* enable service-signal external interruptions,
704          * Control Register 0 bit 22 := 1
705          * (besides PSW bit 7 must be set to 1 sometimes for external
706          * interruptions)
707          */
708         ctl_set_bit(0, 9);
709
710         init_timer(&retry_timer);
711         init_timer(&sclp_busy_timer);
712         /* do the initial write event mask */
713         rc = sclp_init_mask();
714         if (rc == 0) {
715                 /* Ok, now everything is setup right. */
716                 set_bit(SCLP_INIT, &sclp_status);
717                 return 0;
718         }
719
720         /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */
721         ctl_clear_bit(0,9);
722         unregister_early_external_interrupt(0x2401, sclp_interrupt_handler,
723                                             &ext_int_info_hwc);
724
725         return rc;
726 }
727
728 /*
729  * Register the SCLP event listener identified by REG. Return 0 on success.
730  * Some error codes and their meaning:
731  *
732  *  -ENODEV = SCLP interface is not supported on this machine
733  *   -EBUSY = there is already a listener registered for the requested
734  *            event type
735  *     -EIO = SCLP interface is currently not operational
736  */
737 int
738 sclp_register(struct sclp_register *reg)
739 {
740         unsigned long flags;
741         struct list_head *l;
742         struct sclp_register *t;
743
744         if (!MACHINE_HAS_SCLP)
745                 return -ENODEV;
746
747         if (!test_bit(SCLP_INIT, &sclp_status))
748                 sclp_init();
749         spin_lock_irqsave(&sclp_lock, flags);
750         /* check already registered event masks for collisions */
751         list_for_each(l, &sclp_reg_list) {
752                 t = list_entry(l, struct sclp_register, list);
753                 if (t->receive_mask & reg->receive_mask ||
754                     t->send_mask & reg->send_mask) {
755                         spin_unlock_irqrestore(&sclp_lock, flags);
756                         return -EBUSY;
757                 }
758         }
759         /*
760          * set present mask to 0 to trigger state change
761          * callback in sclp_init_mask
762          */
763         reg->sclp_receive_mask = 0;
764         reg->sclp_send_mask = 0;
765         list_add(&reg->list, &sclp_reg_list);
766         spin_unlock_irqrestore(&sclp_lock, flags);
767         sclp_init_mask();
768         return 0;
769 }
770
771 /*
772  * Unregister the SCLP event listener identified by REG.
773  */
774 void
775 sclp_unregister(struct sclp_register *reg)
776 {
777         unsigned long flags;
778
779         spin_lock_irqsave(&sclp_lock, flags);
780         list_del(&reg->list);
781         spin_unlock_irqrestore(&sclp_lock, flags);
782         sclp_init_mask();
783 }
784
785 #define SCLP_EVBUF_PROCESSED    0x80
786
787 /*
788  * Traverse array of event buffers contained in SCCB and remove all buffers
789  * with a set "processed" flag. Return the number of unprocessed buffers.
790  */
791 int
792 sclp_remove_processed(struct sccb_header *sccb)
793 {
794         struct evbuf_header *evbuf;
795         int unprocessed;
796         u16 remaining;
797
798         evbuf = (struct evbuf_header *) (sccb + 1);
799         unprocessed = 0;
800         remaining = sccb->length - sizeof(struct sccb_header);
801         while (remaining > 0) {
802                 remaining -= evbuf->length;
803                 if (evbuf->flags & SCLP_EVBUF_PROCESSED) {
804                         sccb->length -= evbuf->length;
805                         memcpy((void *) evbuf,
806                                (void *) ((addr_t) evbuf + evbuf->length),
807                                remaining);
808                 } else {
809                         unprocessed++;
810                         evbuf = (struct evbuf_header *)
811                                         ((addr_t) evbuf + evbuf->length);
812                 }
813         }
814
815         return unprocessed;
816 }
817
818 module_init(sclp_init);
819
820 EXPORT_SYMBOL(sclp_add_request);
821 EXPORT_SYMBOL(sclp_sync_wait);
822 EXPORT_SYMBOL(sclp_register);
823 EXPORT_SYMBOL(sclp_unregister);
824 EXPORT_SYMBOL(sclp_error_message);