vserver 2.0 rc7
[linux-2.6.git] / arch / ppc64 / kernel / viopath.c
1 /* -*- linux-c -*-
2  *  arch/ppc64/kernel/viopath.c
3  *
4  *  iSeries Virtual I/O Message Path code
5  *
6  *  Authors: Dave Boutcher <boutcher@us.ibm.com>
7  *           Ryan Arnold <ryanarn@us.ibm.com>
8  *           Colin Devilbiss <devilbis@us.ibm.com>
9  *
10  * (C) Copyright 2000-2003 IBM Corporation
11  *
12  * This code is used by the iSeries virtual disk, cd,
13  * tape, and console to communicate with OS/400 in another
14  * partition.
15  *
16  * This program is free software;  you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License as
18  * published by the Free Software Foundation; either version 2 of the
19  * License, or (at your option) anyu later version.
20  *
21  * This program is distributed in the hope that it will be useful, but
22  * WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24  * General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, write to the Free Software Foundation,
28  * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29  *
30  */
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
35 #include <linux/string.h>
36 #include <linux/proc_fs.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/wait.h>
39 #include <linux/seq_file.h>
40 #include <linux/smp_lock.h>
41 #include <linux/interrupt.h>
42
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/iSeries/HvTypes.h>
46 #include <asm/iSeries/LparData.h>
47 #include <asm/iSeries/HvLpEvent.h>
48 #include <asm/iSeries/HvLpConfig.h>
49 #include <asm/iSeries/HvCallCfg.h>
50 #include <asm/iSeries/mf.h>
51 #include <asm/iSeries/iSeries_proc.h>
52 #include <asm/iSeries/vio.h>
53
54 /* Status of the path to each other partition in the system.
55  * This is overkill, since we will only ever establish connections
56  * to our hosting partition and the primary partition on the system.
57  * But this allows for other support in the future.
58  */
59 static struct viopathStatus {
60         int isOpen;             /* Did we open the path?            */
61         int isActive;           /* Do we have a mon msg outstanding */
62         int users[VIO_MAX_SUBTYPES];
63         HvLpInstanceId mSourceInst;
64         HvLpInstanceId mTargetInst;
65         int numberAllocated;
66 } viopathStatus[HVMAXARCHITECTEDLPS];
67
68 static DEFINE_SPINLOCK(statuslock);
69
70 /*
71  * For each kind of event we allocate a buffer that is
72  * guaranteed not to cross a page boundary
73  */
74 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
75 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
76 static int event_buffer_initialised;
77
78 static void handleMonitorEvent(struct HvLpEvent *event);
79
80 /*
81  * We use this structure to handle asynchronous responses.  The caller
82  * blocks on the semaphore and the handler posts the semaphore.  However,
83  * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
84  */
85 struct alloc_parms {
86         struct semaphore sem;
87         int number;
88         atomic_t wait_atomic;
89         int used_wait_atomic;
90 };
91
92 /* Put a sequence number in each mon msg.  The value is not
93  * important.  Start at something other than 0 just for
94  * readability.  wrapping this is ok.
95  */
96 static u8 viomonseq = 22;
97
98 /* Our hosting logical partition.  We get this at startup
99  * time, and different modules access this variable directly.
100  */
101 HvLpIndex viopath_hostLp = HvLpIndexInvalid;
102 EXPORT_SYMBOL(viopath_hostLp);
103 HvLpIndex viopath_ourLp = HvLpIndexInvalid;
104 EXPORT_SYMBOL(viopath_ourLp);
105
106 /* For each kind of incoming event we set a pointer to a
107  * routine to call.
108  */
109 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
110
111 #define VIOPATH_KERN_WARN       KERN_WARNING "viopath: "
112 #define VIOPATH_KERN_INFO       KERN_INFO "viopath: "
113
114 static int proc_viopath_show(struct seq_file *m, void *v)
115 {
116         char *buf;
117         u16 vlanMap;
118         dma_addr_t handle;
119         HvLpEvent_Rc hvrc;
120         DECLARE_MUTEX_LOCKED(Semaphore);
121
122         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
123         if (!buf)
124                 return 0;
125         memset(buf, 0, PAGE_SIZE);
126
127         handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
128                                 DMA_FROM_DEVICE);
129
130         hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
131                         HvLpEvent_Type_VirtualIo,
132                         viomajorsubtype_config | vioconfigget,
133                         HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
134                         viopath_sourceinst(viopath_hostLp),
135                         viopath_targetinst(viopath_hostLp),
136                         (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
137                         ((u64)handle) << 32, PAGE_SIZE, 0, 0);
138
139         if (hvrc != HvLpEvent_Rc_Good)
140                 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
141
142         down(&Semaphore);
143
144         vlanMap = HvLpConfig_getVirtualLanIndexMap();
145
146         buf[PAGE_SIZE-1] = '\0';
147         seq_printf(m, "%s", buf);
148         seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
149         seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
150                    e2a(xItExtVpdPanel.mfgID[2]),
151                    e2a(xItExtVpdPanel.mfgID[3]),
152                    e2a(xItExtVpdPanel.systemSerial[1]),
153                    e2a(xItExtVpdPanel.systemSerial[2]),
154                    e2a(xItExtVpdPanel.systemSerial[3]),
155                    e2a(xItExtVpdPanel.systemSerial[4]),
156                    e2a(xItExtVpdPanel.systemSerial[5]));
157
158         dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
159         kfree(buf);
160
161         return 0;
162 }
163
164 static int proc_viopath_open(struct inode *inode, struct file *file)
165 {
166         return single_open(file, proc_viopath_show, NULL);
167 }
168
169 static struct file_operations proc_viopath_operations = {
170         .open           = proc_viopath_open,
171         .read           = seq_read,
172         .llseek         = seq_lseek,
173         .release        = single_release,
174 };
175
176 static int __init vio_proc_init(void)
177 {
178         struct proc_dir_entry *e;
179
180         e = create_proc_entry("iSeries/config", 0, NULL);
181         if (e)
182                 e->proc_fops = &proc_viopath_operations;
183
184         return 0;
185 }
186 __initcall(vio_proc_init);
187
188 /* See if a given LP is active.  Allow for invalid lps to be passed in
189  * and just return invalid
190  */
191 int viopath_isactive(HvLpIndex lp)
192 {
193         if (lp == HvLpIndexInvalid)
194                 return 0;
195         if (lp < HVMAXARCHITECTEDLPS)
196                 return viopathStatus[lp].isActive;
197         else
198                 return 0;
199 }
200 EXPORT_SYMBOL(viopath_isactive);
201
202 /*
203  * We cache the source and target instance ids for each
204  * partition.
205  */
206 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
207 {
208         return viopathStatus[lp].mSourceInst;
209 }
210 EXPORT_SYMBOL(viopath_sourceinst);
211
212 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
213 {
214         return viopathStatus[lp].mTargetInst;
215 }
216 EXPORT_SYMBOL(viopath_targetinst);
217
218 /*
219  * Send a monitor message.  This is a message with the acknowledge
220  * bit on that the other side will NOT explicitly acknowledge.  When
221  * the other side goes down, the hypervisor will acknowledge any
222  * outstanding messages....so we will know when the other side dies.
223  */
224 static void sendMonMsg(HvLpIndex remoteLp)
225 {
226         HvLpEvent_Rc hvrc;
227
228         viopathStatus[remoteLp].mSourceInst =
229                 HvCallEvent_getSourceLpInstanceId(remoteLp,
230                                 HvLpEvent_Type_VirtualIo);
231         viopathStatus[remoteLp].mTargetInst =
232                 HvCallEvent_getTargetLpInstanceId(remoteLp,
233                                 HvLpEvent_Type_VirtualIo);
234
235         /*
236          * Deliberately ignore the return code here.  if we call this
237          * more than once, we don't care.
238          */
239         vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
240
241         hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
242                         viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
243                         HvLpEvent_AckType_DeferredAck,
244                         viopathStatus[remoteLp].mSourceInst,
245                         viopathStatus[remoteLp].mTargetInst,
246                         viomonseq++, 0, 0, 0, 0, 0);
247
248         if (hvrc == HvLpEvent_Rc_Good)
249                 viopathStatus[remoteLp].isActive = 1;
250         else {
251                 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
252                                 remoteLp);
253                 viopathStatus[remoteLp].isActive = 0;
254         }
255 }
256
257 static void handleMonitorEvent(struct HvLpEvent *event)
258 {
259         HvLpIndex remoteLp;
260         int i;
261
262         /*
263          * This handler is _also_ called as part of the loop
264          * at the end of this routine, so it must be able to
265          * ignore NULL events...
266          */
267         if (!event)
268                 return;
269
270         /*
271          * First see if this is just a normal monitor message from the
272          * other partition
273          */
274         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
275                 remoteLp = event->xSourceLp;
276                 if (!viopathStatus[remoteLp].isActive)
277                         sendMonMsg(remoteLp);
278                 return;
279         }
280
281         /*
282          * This path is for an acknowledgement; the other partition
283          * died
284          */
285         remoteLp = event->xTargetLp;
286         if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
287             (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
288                 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
289                 return;
290         }
291
292         printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
293
294         viopathStatus[remoteLp].isActive = 0;
295
296         /*
297          * For each active handler, pass them a NULL
298          * message to indicate that the other partition
299          * died
300          */
301         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
302                 if (vio_handler[i] != NULL)
303                         (*vio_handler[i])(NULL);
304         }
305 }
306
307 int vio_setHandler(int subtype, vio_event_handler_t *beh)
308 {
309         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
310         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
311                 return -EINVAL;
312         if (vio_handler[subtype] != NULL)
313                 return -EBUSY;
314         vio_handler[subtype] = beh;
315         return 0;
316 }
317 EXPORT_SYMBOL(vio_setHandler);
318
319 int vio_clearHandler(int subtype)
320 {
321         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
322         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
323                 return -EINVAL;
324         if (vio_handler[subtype] == NULL)
325                 return -EAGAIN;
326         vio_handler[subtype] = NULL;
327         return 0;
328 }
329 EXPORT_SYMBOL(vio_clearHandler);
330
331 static void handleConfig(struct HvLpEvent *event)
332 {
333         if (!event)
334                 return;
335         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
336                 printk(VIOPATH_KERN_WARN
337                        "unexpected config request from partition %d",
338                        event->xSourceLp);
339
340                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
341                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
342                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
343                         HvCallEvent_ackLpEvent(event);
344                 }
345                 return;
346         }
347
348         up((struct semaphore *)event->xCorrelationToken);
349 }
350
351 /*
352  * Initialization of the hosting partition
353  */
354 void vio_set_hostlp(void)
355 {
356         /*
357          * If this has already been set then we DON'T want to either change
358          * it or re-register the proc file system
359          */
360         if (viopath_hostLp != HvLpIndexInvalid)
361                 return;
362
363         /*
364          * Figure out our hosting partition.  This isn't allowed to change
365          * while we're active
366          */
367         viopath_ourLp = HvLpConfig_getLpIndex();
368         viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
369
370         if (viopath_hostLp != HvLpIndexInvalid)
371                 vio_setHandler(viomajorsubtype_config, handleConfig);
372 }
373 EXPORT_SYMBOL(vio_set_hostlp);
374
375 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
376 {
377         HvLpIndex remoteLp;
378         int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
379                 >> VIOMAJOR_SUBTYPE_SHIFT;
380
381         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
382                 remoteLp = event->xSourceLp;
383                 /*
384                  * The isActive is checked because if the hosting partition
385                  * went down and came back up it would not be active but it
386                  * would have different source and target instances, in which
387                  * case we'd want to reset them.  This case really protects
388                  * against an unauthorized active partition sending interrupts
389                  * or acks to this linux partition.
390                  */
391                 if (viopathStatus[remoteLp].isActive
392                     && (event->xSourceInstanceId !=
393                         viopathStatus[remoteLp].mTargetInst)) {
394                         printk(VIOPATH_KERN_WARN
395                                "message from invalid partition. "
396                                "int msg rcvd, source inst (%d) doesnt match (%d)\n",
397                                viopathStatus[remoteLp].mTargetInst,
398                                event->xSourceInstanceId);
399                         return;
400                 }
401
402                 if (viopathStatus[remoteLp].isActive
403                     && (event->xTargetInstanceId !=
404                         viopathStatus[remoteLp].mSourceInst)) {
405                         printk(VIOPATH_KERN_WARN
406                                "message from invalid partition. "
407                                "int msg rcvd, target inst (%d) doesnt match (%d)\n",
408                                viopathStatus[remoteLp].mSourceInst,
409                                event->xTargetInstanceId);
410                         return;
411                 }
412         } else {
413                 remoteLp = event->xTargetLp;
414                 if (event->xSourceInstanceId !=
415                     viopathStatus[remoteLp].mSourceInst) {
416                         printk(VIOPATH_KERN_WARN
417                                "message from invalid partition. "
418                                "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
419                                viopathStatus[remoteLp].mSourceInst,
420                                event->xSourceInstanceId);
421                         return;
422                 }
423
424                 if (event->xTargetInstanceId !=
425                     viopathStatus[remoteLp].mTargetInst) {
426                         printk(VIOPATH_KERN_WARN
427                                "message from invalid partition. "
428                                "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
429                                viopathStatus[remoteLp].mTargetInst,
430                                event->xTargetInstanceId);
431                         return;
432                 }
433         }
434
435         if (vio_handler[subtype] == NULL) {
436                 printk(VIOPATH_KERN_WARN
437                        "unexpected virtual io event subtype %d from partition %d\n",
438                        event->xSubtype, remoteLp);
439                 /* No handler.  Ack if necessary */
440                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
441                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
442                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
443                         HvCallEvent_ackLpEvent(event);
444                 }
445                 return;
446         }
447
448         /* This innocuous little line is where all the real work happens */
449         (*vio_handler[subtype])(event);
450 }
451
452 static void viopath_donealloc(void *parm, int number)
453 {
454         struct alloc_parms *parmsp = parm;
455
456         parmsp->number = number;
457         if (parmsp->used_wait_atomic)
458                 atomic_set(&parmsp->wait_atomic, 0);
459         else
460                 up(&parmsp->sem);
461 }
462
463 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
464 {
465         struct alloc_parms parms;
466
467         if (system_state != SYSTEM_RUNNING) {
468                 parms.used_wait_atomic = 1;
469                 atomic_set(&parms.wait_atomic, 1);
470         } else {
471                 parms.used_wait_atomic = 0;
472                 init_MUTEX_LOCKED(&parms.sem);
473         }
474         mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250,  /* It would be nice to put a real number here! */
475                             numEvents, &viopath_donealloc, &parms);
476         if (system_state != SYSTEM_RUNNING) {
477                 while (atomic_read(&parms.wait_atomic))
478                         mb();
479         } else
480                 down(&parms.sem);
481         return parms.number;
482 }
483
484 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
485 {
486         int i;
487         unsigned long flags;
488         int tempNumAllocated;
489
490         if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
491                 return -EINVAL;
492
493         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
494         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
495                 return -EINVAL;
496
497         spin_lock_irqsave(&statuslock, flags);
498
499         if (!event_buffer_initialised) {
500                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
501                         atomic_set(&event_buffer_available[i], 1);
502                 event_buffer_initialised = 1;
503         }
504
505         viopathStatus[remoteLp].users[subtype]++;
506
507         if (!viopathStatus[remoteLp].isOpen) {
508                 viopathStatus[remoteLp].isOpen = 1;
509                 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
510
511                 /*
512                  * Don't hold the spinlock during an operation that
513                  * can sleep.
514                  */
515                 spin_unlock_irqrestore(&statuslock, flags);
516                 tempNumAllocated = allocateEvents(remoteLp, 1);
517                 spin_lock_irqsave(&statuslock, flags);
518
519                 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
520
521                 if (viopathStatus[remoteLp].numberAllocated == 0) {
522                         HvCallEvent_closeLpEventPath(remoteLp,
523                                         HvLpEvent_Type_VirtualIo);
524
525                         spin_unlock_irqrestore(&statuslock, flags);
526                         return -ENOMEM;
527                 }
528
529                 viopathStatus[remoteLp].mSourceInst =
530                         HvCallEvent_getSourceLpInstanceId(remoteLp,
531                                         HvLpEvent_Type_VirtualIo);
532                 viopathStatus[remoteLp].mTargetInst =
533                         HvCallEvent_getTargetLpInstanceId(remoteLp,
534                                         HvLpEvent_Type_VirtualIo);
535                 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
536                                           &vio_handleEvent);
537                 sendMonMsg(remoteLp);
538                 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
539                                 "setting sinst %d, tinst %d\n",
540                                 remoteLp, viopathStatus[remoteLp].mSourceInst,
541                                 viopathStatus[remoteLp].mTargetInst);
542         }
543
544         spin_unlock_irqrestore(&statuslock, flags);
545         tempNumAllocated = allocateEvents(remoteLp, numReq);
546         spin_lock_irqsave(&statuslock, flags);
547         viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
548         spin_unlock_irqrestore(&statuslock, flags);
549
550         return 0;
551 }
552 EXPORT_SYMBOL(viopath_open);
553
554 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
555 {
556         unsigned long flags;
557         int i;
558         int numOpen;
559         struct alloc_parms parms;
560
561         if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
562                 return -EINVAL;
563
564         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
565         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
566                 return -EINVAL;
567
568         spin_lock_irqsave(&statuslock, flags);
569         /*
570          * If the viopath_close somehow gets called before a
571          * viopath_open it could decrement to -1 which is a non
572          * recoverable state so we'll prevent this from
573          * happening.
574          */
575         if (viopathStatus[remoteLp].users[subtype] > 0)
576                 viopathStatus[remoteLp].users[subtype]--;
577
578         spin_unlock_irqrestore(&statuslock, flags);
579
580         parms.used_wait_atomic = 0;
581         init_MUTEX_LOCKED(&parms.sem);
582         mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
583                               numReq, &viopath_donealloc, &parms);
584         down(&parms.sem);
585
586         spin_lock_irqsave(&statuslock, flags);
587         for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
588                 numOpen += viopathStatus[remoteLp].users[i];
589
590         if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
591                 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
592                                 remoteLp);
593
594                 HvCallEvent_closeLpEventPath(remoteLp,
595                                              HvLpEvent_Type_VirtualIo);
596                 viopathStatus[remoteLp].isOpen = 0;
597                 viopathStatus[remoteLp].isActive = 0;
598
599                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
600                         atomic_set(&event_buffer_available[i], 0);
601                 event_buffer_initialised = 0;
602         }
603         spin_unlock_irqrestore(&statuslock, flags);
604         return 0;
605 }
606 EXPORT_SYMBOL(viopath_close);
607
608 void *vio_get_event_buffer(int subtype)
609 {
610         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
611         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
612                 return NULL;
613
614         if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
615                 return &event_buffer[subtype * 256];
616         else
617                 return NULL;
618 }
619 EXPORT_SYMBOL(vio_get_event_buffer);
620
621 void vio_free_event_buffer(int subtype, void *buffer)
622 {
623         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
624         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
625                 printk(VIOPATH_KERN_WARN
626                        "unexpected subtype %d freeing event buffer\n", subtype);
627                 return;
628         }
629
630         if (atomic_read(&event_buffer_available[subtype]) != 0) {
631                 printk(VIOPATH_KERN_WARN
632                        "freeing unallocated event buffer, subtype %d\n",
633                        subtype);
634                 return;
635         }
636
637         if (buffer != &event_buffer[subtype * 256]) {
638                 printk(VIOPATH_KERN_WARN
639                        "freeing invalid event buffer, subtype %d\n", subtype);
640         }
641
642         atomic_set(&event_buffer_available[subtype], 1);
643 }
644 EXPORT_SYMBOL(vio_free_event_buffer);
645
646 static const struct vio_error_entry vio_no_error =
647     { 0, 0, "Non-VIO Error" };
648 static const struct vio_error_entry vio_unknown_error =
649     { 0, EIO, "Unknown Error" };
650
651 static const struct vio_error_entry vio_default_errors[] = {
652         {0x0001, EIO, "No Connection"},
653         {0x0002, EIO, "No Receiver"},
654         {0x0003, EIO, "No Buffer Available"},
655         {0x0004, EBADRQC, "Invalid Message Type"},
656         {0x0000, 0, NULL},
657 };
658
659 const struct vio_error_entry *vio_lookup_rc(
660                 const struct vio_error_entry *local_table, u16 rc)
661 {
662         const struct vio_error_entry *cur;
663
664         if (!rc)
665                 return &vio_no_error;
666         if (local_table)
667                 for (cur = local_table; cur->rc; ++cur)
668                         if (cur->rc == rc)
669                                 return cur;
670         for (cur = vio_default_errors; cur->rc; ++cur)
671                 if (cur->rc == rc)
672                         return cur;
673         return &vio_unknown_error;
674 }
675 EXPORT_SYMBOL(vio_lookup_rc);