ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / ppc64 / kernel / viopath.c
1 /* -*- linux-c -*-
2  *  arch/ppc64/kernel/viopath.c
3  *
4  *  iSeries Virtual I/O Message Path code
5  *
6  *  Authors: Dave Boutcher <boutcher@us.ibm.com>
7  *           Ryan Arnold <ryanarn@us.ibm.com>
8  *           Colin Devilbiss <devilbis@us.ibm.com>
9  *
10  * (C) Copyright 2000-2003 IBM Corporation
11  *
12  * This code is used by the iSeries virtual disk, cd,
13  * tape, and console to communicate with OS/400 in another
14  * partition.
15  *
16  * This program is free software;  you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License as
18  * published by the Free Software Foundation; either version 2 of the
19  * License, or (at your option) anyu later version.
20  *
21  * This program is distributed in the hope that it will be useful, but
22  * WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24  * General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, write to the Free Software Foundation,
28  * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29  *
30  */
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
35 #include <linux/string.h>
36 #include <linux/proc_fs.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/wait.h>
39 #include <linux/seq_file.h>
40
41 #include <asm/hardirq.h>
42 #include <asm/uaccess.h>
43 #include <asm/iSeries/LparData.h>
44 #include <asm/iSeries/HvLpEvent.h>
45 #include <asm/iSeries/HvLpConfig.h>
46 #include <asm/iSeries/HvCallCfg.h>
47 #include <asm/iSeries/mf.h>
48 #include <asm/iSeries/iSeries_proc.h>
49 #include <asm/iSeries/vio.h>
50
51 /* Status of the path to each other partition in the system.
52  * This is overkill, since we will only ever establish connections
53  * to our hosting partition and the primary partition on the system.
54  * But this allows for other support in the future.
55  */
56 static struct viopathStatus {
57         int isOpen:1;           /* Did we open the path?            */
58         int isActive:1;         /* Do we have a mon msg outstanding */
59         int users[VIO_MAX_SUBTYPES];
60         HvLpInstanceId mSourceInst;
61         HvLpInstanceId mTargetInst;
62         int numberAllocated;
63 } viopathStatus[HVMAXARCHITECTEDLPS];
64
65 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
66
67 /*
68  * For each kind of event we allocate a buffer that is
69  * guaranteed not to cross a page boundary
70  */
71 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
72 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
73 static int event_buffer_initialised;
74
75 static void handleMonitorEvent(struct HvLpEvent *event);
76
77 /*
78  * We use this structure to handle asynchronous responses.  The caller
79  * blocks on the semaphore and the handler posts the semaphore.  However,
80  * if in_atomic() is true in the caller, then wait_atomic is used ...
81  */
82 struct doneAllocParms_t {
83         struct semaphore *sem;
84         int number;
85         atomic_t *wait_atomic;
86         int used_wait_atomic;
87 };
88
89 /* Put a sequence number in each mon msg.  The value is not
90  * important.  Start at something other than 0 just for
91  * readability.  wrapping this is ok.
92  */
93 static u8 viomonseq = 22;
94
95 /* Our hosting logical partition.  We get this at startup
96  * time, and different modules access this variable directly.
97  */
98 HvLpIndex viopath_hostLp = 0xff;        /* HvLpIndexInvalid */
99 EXPORT_SYMBOL(viopath_hostLp);
100 HvLpIndex viopath_ourLp = 0xff;
101 EXPORT_SYMBOL(viopath_ourLp);
102
103 /* For each kind of incoming event we set a pointer to a
104  * routine to call.
105  */
106 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
107
108 #define VIOPATH_KERN_WARN       KERN_WARNING "viopath: "
109 #define VIOPATH_KERN_INFO       KERN_INFO "viopath: "
110
111 static unsigned char e2a(unsigned char x)
112 {
113         switch (x) {
114         case 0xF0:
115                 return '0';
116         case 0xF1:
117                 return '1';
118         case 0xF2:
119                 return '2';
120         case 0xF3:
121                 return '3';
122         case 0xF4:
123                 return '4';
124         case 0xF5:
125                 return '5';
126         case 0xF6:
127                 return '6';
128         case 0xF7:
129                 return '7';
130         case 0xF8:
131                 return '8';
132         case 0xF9:
133                 return '9';
134         case 0xC1:
135                 return 'A';
136         case 0xC2:
137                 return 'B';
138         case 0xC3:
139                 return 'C';
140         case 0xC4:
141                 return 'D';
142         case 0xC5:
143                 return 'E';
144         case 0xC6:
145                 return 'F';
146         case 0xC7:
147                 return 'G';
148         case 0xC8:
149                 return 'H';
150         case 0xC9:
151                 return 'I';
152         case 0xD1:
153                 return 'J';
154         case 0xD2:
155                 return 'K';
156         case 0xD3:
157                 return 'L';
158         case 0xD4:
159                 return 'M';
160         case 0xD5:
161                 return 'N';
162         case 0xD6:
163                 return 'O';
164         case 0xD7:
165                 return 'P';
166         case 0xD8:
167                 return 'Q';
168         case 0xD9:
169                 return 'R';
170         case 0xE2:
171                 return 'S';
172         case 0xE3:
173                 return 'T';
174         case 0xE4:
175                 return 'U';
176         case 0xE5:
177                 return 'V';
178         case 0xE6:
179                 return 'W';
180         case 0xE7:
181                 return 'X';
182         case 0xE8:
183                 return 'Y';
184         case 0xE9:
185                 return 'Z';
186         }
187         return ' ';
188 }
189
190 static int proc_viopath_show(struct seq_file *m, void *v)
191 {
192         char *buf;
193         dma_addr_t handle;
194         HvLpEvent_Rc hvrc;
195         DECLARE_MUTEX_LOCKED(Semaphore);
196
197         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
198         if (!buf)
199                 return 0;
200         memset(buf, 0, PAGE_SIZE);
201
202         handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
203                                 DMA_FROM_DEVICE);
204
205         hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
206                         HvLpEvent_Type_VirtualIo,
207                         viomajorsubtype_config | vioconfigget,
208                         HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
209                         viopath_sourceinst(viopath_hostLp),
210                         viopath_targetinst(viopath_hostLp),
211                         (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
212                         ((u64)handle) << 32, PAGE_SIZE, 0, 0);
213
214         if (hvrc != HvLpEvent_Rc_Good)
215                 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
216
217         down(&Semaphore);
218
219         dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
220         kfree(buf);
221
222         buf[PAGE_SIZE] = '\0';
223         seq_printf(m, "%s", buf);
224
225         seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
226                    e2a(xItExtVpdPanel.mfgID[2]),
227                    e2a(xItExtVpdPanel.mfgID[3]),
228                    e2a(xItExtVpdPanel.systemSerial[1]),
229                    e2a(xItExtVpdPanel.systemSerial[2]),
230                    e2a(xItExtVpdPanel.systemSerial[3]),
231                    e2a(xItExtVpdPanel.systemSerial[4]),
232                    e2a(xItExtVpdPanel.systemSerial[5]));
233
234         return 0;
235 }
236
237 static int proc_viopath_open(struct inode *inode, struct file *file)
238 {
239         return single_open(file, proc_viopath_show, NULL);
240 }
241
242 static struct file_operations proc_viopath_operations = {
243         .open           = proc_viopath_open,
244         .read           = seq_read,
245         .llseek         = seq_lseek,
246         .release        = single_release,
247 };
248
249 static int __init vio_proc_init(void)
250 {
251         struct proc_dir_entry *e;
252
253         e = create_proc_entry("iSeries/config", 0, NULL);
254         if (e)
255                 e->proc_fops = &proc_viopath_operations;
256
257         return 0;
258 }
259 __initcall(vio_proc_init);
260
261 /* See if a given LP is active.  Allow for invalid lps to be passed in
262  * and just return invalid
263  */
264 int viopath_isactive(HvLpIndex lp)
265 {
266         if (lp == HvLpIndexInvalid)
267                 return 0;
268         if (lp < HVMAXARCHITECTEDLPS)
269                 return viopathStatus[lp].isActive;
270         else
271                 return 0;
272 }
273 EXPORT_SYMBOL(viopath_isactive);
274
275 /*
276  * We cache the source and target instance ids for each
277  * partition.  
278  */
279 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
280 {
281         return viopathStatus[lp].mSourceInst;
282 }
283 EXPORT_SYMBOL(viopath_sourceinst);
284
285 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
286 {
287         return viopathStatus[lp].mTargetInst;
288 }
289 EXPORT_SYMBOL(viopath_targetinst);
290
291 /*
292  * Send a monitor message.  This is a message with the acknowledge
293  * bit on that the other side will NOT explicitly acknowledge.  When
294  * the other side goes down, the hypervisor will acknowledge any
295  * outstanding messages....so we will know when the other side dies.
296  */
297 static void sendMonMsg(HvLpIndex remoteLp)
298 {
299         HvLpEvent_Rc hvrc;
300
301         viopathStatus[remoteLp].mSourceInst =
302                 HvCallEvent_getSourceLpInstanceId(remoteLp,
303                                 HvLpEvent_Type_VirtualIo);
304         viopathStatus[remoteLp].mTargetInst =
305                 HvCallEvent_getTargetLpInstanceId(remoteLp,
306                                 HvLpEvent_Type_VirtualIo);
307
308         /*
309          * Deliberately ignore the return code here.  if we call this
310          * more than once, we don't care.
311          */
312         vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
313
314         hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
315                         viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
316                         HvLpEvent_AckType_DeferredAck,
317                         viopathStatus[remoteLp].mSourceInst,
318                         viopathStatus[remoteLp].mTargetInst,
319                         viomonseq++, 0, 0, 0, 0, 0);
320
321         if (hvrc == HvLpEvent_Rc_Good)
322                 viopathStatus[remoteLp].isActive = 1;
323         else {
324                 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
325                                 remoteLp);
326                 viopathStatus[remoteLp].isActive = 0;
327         }
328 }
329
330 static void handleMonitorEvent(struct HvLpEvent *event)
331 {
332         HvLpIndex remoteLp;
333         int i;
334
335         /*
336          * This handler is _also_ called as part of the loop
337          * at the end of this routine, so it must be able to
338          * ignore NULL events...
339          */
340         if (!event)
341                 return;
342
343         /*
344          * First see if this is just a normal monitor message from the
345          * other partition
346          */
347         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
348                 remoteLp = event->xSourceLp;
349                 if (!viopathStatus[remoteLp].isActive)
350                         sendMonMsg(remoteLp);
351                 return;
352         }
353
354         /*
355          * This path is for an acknowledgement; the other partition
356          * died
357          */
358         remoteLp = event->xTargetLp;
359         if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
360             (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
361                 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
362                 return;
363         }
364
365         printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
366
367         viopathStatus[remoteLp].isActive = 0;
368
369         /*
370          * For each active handler, pass them a NULL
371          * message to indicate that the other partition
372          * died
373          */
374         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
375                 if (vio_handler[i] != NULL)
376                         (*vio_handler[i])(NULL);
377         }
378 }
379
380 int vio_setHandler(int subtype, vio_event_handler_t *beh)
381 {
382         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
383         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
384                 return -EINVAL;
385         if (vio_handler[subtype] != NULL)
386                 return -EBUSY;
387         vio_handler[subtype] = beh;
388         return 0;
389 }
390 EXPORT_SYMBOL(vio_setHandler);
391
392 int vio_clearHandler(int subtype)
393 {
394         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
395         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
396                 return -EINVAL;
397         if (vio_handler[subtype] == NULL)
398                 return -EAGAIN;
399         vio_handler[subtype] = NULL;
400         return 0;
401 }
402 EXPORT_SYMBOL(vio_clearHandler);
403
404 static void handleConfig(struct HvLpEvent *event)
405 {
406         if (!event)
407                 return;
408         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
409                 printk(VIOPATH_KERN_WARN
410                        "unexpected config request from partition %d",
411                        event->xSourceLp);
412
413                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
414                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
415                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
416                         HvCallEvent_ackLpEvent(event);
417                 }
418                 return;
419         }
420
421         up((struct semaphore *)event->xCorrelationToken);
422 }
423
424 /*
425  * Initialization of the hosting partition
426  */
427 void vio_set_hostlp(void)
428 {
429         /*
430          * If this has already been set then we DON'T want to either change
431          * it or re-register the proc file system
432          */
433         if (viopath_hostLp != HvLpIndexInvalid)
434                 return;
435
436         /*
437          * Figure out our hosting partition.  This isn't allowed to change
438          * while we're active
439          */
440         viopath_ourLp = HvLpConfig_getLpIndex();
441         viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
442
443         if (viopath_hostLp != HvLpIndexInvalid)
444                 vio_setHandler(viomajorsubtype_config, handleConfig);
445 }
446 EXPORT_SYMBOL(vio_set_hostlp);
447
448 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
449 {
450         HvLpIndex remoteLp;
451         int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
452                 >> VIOMAJOR_SUBTYPE_SHIFT;
453
454         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
455                 remoteLp = event->xSourceLp;
456                 /*
457                  * The isActive is checked because if the hosting partition
458                  * went down and came back up it would not be active but it
459                  * would have different source and target instances, in which
460                  * case we'd want to reset them.  This case really protects
461                  * against an unauthorized active partition sending interrupts
462                  * or acks to this linux partition.
463                  */
464                 if (viopathStatus[remoteLp].isActive
465                     && (event->xSourceInstanceId !=
466                         viopathStatus[remoteLp].mTargetInst)) {
467                         printk(VIOPATH_KERN_WARN
468                                "message from invalid partition. "
469                                "int msg rcvd, source inst (%d) doesnt match (%d)\n",
470                                viopathStatus[remoteLp].mTargetInst,
471                                event->xSourceInstanceId);
472                         return;
473                 }
474
475                 if (viopathStatus[remoteLp].isActive
476                     && (event->xTargetInstanceId !=
477                         viopathStatus[remoteLp].mSourceInst)) {
478                         printk(VIOPATH_KERN_WARN
479                                "message from invalid partition. "
480                                "int msg rcvd, target inst (%d) doesnt match (%d)\n",
481                                viopathStatus[remoteLp].mSourceInst,
482                                event->xTargetInstanceId);
483                         return;
484                 }
485         } else {
486                 remoteLp = event->xTargetLp;
487                 if (event->xSourceInstanceId !=
488                     viopathStatus[remoteLp].mSourceInst) {
489                         printk(VIOPATH_KERN_WARN
490                                "message from invalid partition. "
491                                "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
492                                viopathStatus[remoteLp].mSourceInst,
493                                event->xSourceInstanceId);
494                         return;
495                 }
496
497                 if (event->xTargetInstanceId !=
498                     viopathStatus[remoteLp].mTargetInst) {
499                         printk(VIOPATH_KERN_WARN
500                                "message from invalid partition. "
501                                "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
502                                viopathStatus[remoteLp].mTargetInst,
503                                event->xTargetInstanceId);
504                         return;
505                 }
506         }
507
508         if (vio_handler[subtype] == NULL) {
509                 printk(VIOPATH_KERN_WARN
510                        "unexpected virtual io event subtype %d from partition %d\n",
511                        event->xSubtype, remoteLp);
512                 /* No handler.  Ack if necessary */
513                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
514                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
515                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
516                         HvCallEvent_ackLpEvent(event);
517                 }
518                 return;
519         }
520
521         /* This innocuous little line is where all the real work happens */
522         (*vio_handler[subtype])(event);
523 }
524
525 static void viopath_donealloc(void *parm, int number)
526 {
527         struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
528
529         parmsp->number = number;
530         if (parmsp->used_wait_atomic)
531                 atomic_set(parmsp->wait_atomic, 0);
532         else
533                 up(parmsp->sem);
534 }
535
536 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
537 {
538         struct doneAllocParms_t parms;
539         DECLARE_MUTEX_LOCKED(Semaphore);
540         atomic_t wait_atomic;
541
542         if (in_atomic()) {
543                 parms.used_wait_atomic = 1;
544                 atomic_set(&wait_atomic, 1);
545                 parms.wait_atomic = &wait_atomic;
546         } else {
547                 parms.used_wait_atomic = 0;
548                 parms.sem = &Semaphore;
549         }
550         mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250,    /* It would be nice to put a real number here! */
551                             numEvents, &viopath_donealloc, &parms);
552         if (in_atomic()) {
553                 while (atomic_read(&wait_atomic))
554                         mb();
555         } else
556                 down(&Semaphore);
557         return parms.number;
558 }
559
560 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
561 {
562         int i;
563         unsigned long flags;
564         int tempNumAllocated;
565
566         if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
567                 return -EINVAL;
568
569         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
570         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
571                 return -EINVAL;
572
573         spin_lock_irqsave(&statuslock, flags);
574
575         if (!event_buffer_initialised) {
576                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
577                         atomic_set(&event_buffer_available[i], 1);
578                 event_buffer_initialised = 1;
579         }
580
581         viopathStatus[remoteLp].users[subtype]++;
582
583         if (!viopathStatus[remoteLp].isOpen) {
584                 viopathStatus[remoteLp].isOpen = 1;
585                 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
586
587                 /*
588                  * Don't hold the spinlock during an operation that
589                  * can sleep.
590                  */
591                 spin_unlock_irqrestore(&statuslock, flags);
592                 tempNumAllocated = allocateEvents(remoteLp, 1);
593                 spin_lock_irqsave(&statuslock, flags);
594
595                 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
596
597                 if (viopathStatus[remoteLp].numberAllocated == 0) {
598                         HvCallEvent_closeLpEventPath(remoteLp,
599                                         HvLpEvent_Type_VirtualIo);
600
601                         spin_unlock_irqrestore(&statuslock, flags);
602                         return -ENOMEM;
603                 }
604
605                 viopathStatus[remoteLp].mSourceInst =
606                         HvCallEvent_getSourceLpInstanceId(remoteLp,
607                                         HvLpEvent_Type_VirtualIo);
608                 viopathStatus[remoteLp].mTargetInst =
609                         HvCallEvent_getTargetLpInstanceId(remoteLp,
610                                         HvLpEvent_Type_VirtualIo);
611                 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
612                                           &vio_handleEvent);
613                 sendMonMsg(remoteLp);
614                 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
615                                 "setting sinst %d, tinst %d\n",
616                                 remoteLp, viopathStatus[remoteLp].mSourceInst,
617                                 viopathStatus[remoteLp].mTargetInst);
618         }
619
620         spin_unlock_irqrestore(&statuslock, flags);
621         tempNumAllocated = allocateEvents(remoteLp, numReq);
622         spin_lock_irqsave(&statuslock, flags);
623         viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
624         spin_unlock_irqrestore(&statuslock, flags);
625
626         return 0;
627 }
628 EXPORT_SYMBOL(viopath_open);
629
630 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
631 {
632         unsigned long flags;
633         int i;
634         int numOpen;
635         struct doneAllocParms_t doneAllocParms;
636         DECLARE_MUTEX_LOCKED(Semaphore);
637
638         if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
639                 return -EINVAL;
640
641         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
642         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
643                 return -EINVAL;
644
645         spin_lock_irqsave(&statuslock, flags);
646         /*
647          * If the viopath_close somehow gets called before a
648          * viopath_open it could decrement to -1 which is a non
649          * recoverable state so we'll prevent this from
650          * happening.
651          */
652         if (viopathStatus[remoteLp].users[subtype] > 0)
653                 viopathStatus[remoteLp].users[subtype]--;
654
655         spin_unlock_irqrestore(&statuslock, flags);
656
657         doneAllocParms.used_wait_atomic = 0;
658         doneAllocParms.sem = &Semaphore;
659         mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
660                               numReq, &viopath_donealloc, &doneAllocParms);
661         down(&Semaphore);
662
663         spin_lock_irqsave(&statuslock, flags);
664         for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
665                 numOpen += viopathStatus[remoteLp].users[i];
666
667         if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
668                 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
669                                 remoteLp);
670
671                 HvCallEvent_closeLpEventPath(remoteLp,
672                                              HvLpEvent_Type_VirtualIo);
673                 viopathStatus[remoteLp].isOpen = 0;
674                 viopathStatus[remoteLp].isActive = 0;
675
676                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
677                         atomic_set(&event_buffer_available[i], 0);
678                 event_buffer_initialised = 0;
679         }
680         spin_unlock_irqrestore(&statuslock, flags);
681         return 0;
682 }
683 EXPORT_SYMBOL(viopath_close);
684
685 void *vio_get_event_buffer(int subtype)
686 {
687         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
688         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
689                 return NULL;
690
691         if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
692                 return &event_buffer[subtype * 256];
693         else
694                 return NULL;
695 }
696 EXPORT_SYMBOL(vio_get_event_buffer);
697
698 void vio_free_event_buffer(int subtype, void *buffer)
699 {
700         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
701         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
702                 printk(VIOPATH_KERN_WARN
703                        "unexpected subtype %d freeing event buffer\n", subtype);
704                 return;
705         }
706
707         if (atomic_read(&event_buffer_available[subtype]) != 0) {
708                 printk(VIOPATH_KERN_WARN
709                        "freeing unallocated event buffer, subtype %d\n",
710                        subtype);
711                 return;
712         }
713
714         if (buffer != &event_buffer[subtype * 256]) {
715                 printk(VIOPATH_KERN_WARN
716                        "freeing invalid event buffer, subtype %d\n", subtype);
717         }
718
719         atomic_set(&event_buffer_available[subtype], 1);
720 }
721 EXPORT_SYMBOL(vio_free_event_buffer);
722
723 static const struct vio_error_entry vio_no_error =
724     { 0, 0, "Non-VIO Error" };
725 static const struct vio_error_entry vio_unknown_error =
726     { 0, EIO, "Unknown Error" };
727
728 static const struct vio_error_entry vio_default_errors[] = {
729         {0x0001, EIO, "No Connection"},
730         {0x0002, EIO, "No Receiver"},
731         {0x0003, EIO, "No Buffer Available"},
732         {0x0004, EBADRQC, "Invalid Message Type"},
733         {0x0000, 0, NULL},
734 };
735
736 const struct vio_error_entry *vio_lookup_rc(
737                 const struct vio_error_entry *local_table, u16 rc)
738 {
739         const struct vio_error_entry *cur;
740
741         if (!rc)
742                 return &vio_no_error;
743         if (local_table)
744                 for (cur = local_table; cur->rc; ++cur)
745                         if (cur->rc == rc)
746                                 return cur;
747         for (cur = vio_default_errors; cur->rc; ++cur)
748                 if (cur->rc == rc)
749                         return cur;
750         return &vio_unknown_error;
751 }
752 EXPORT_SYMBOL(vio_lookup_rc);