2 * arch/ppc64/kernel/viopath.c
4 * iSeries Virtual I/O Message Path code
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
10 * (C) Copyright 2000-2003 IBM Corporation
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
35 #include <linux/string.h>
36 #include <linux/proc_fs.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/wait.h>
39 #include <linux/seq_file.h>
41 #include <asm/hardirq.h>
42 #include <asm/uaccess.h>
43 #include <asm/iSeries/LparData.h>
44 #include <asm/iSeries/HvLpEvent.h>
45 #include <asm/iSeries/HvLpConfig.h>
46 #include <asm/iSeries/HvCallCfg.h>
47 #include <asm/iSeries/mf.h>
48 #include <asm/iSeries/iSeries_proc.h>
49 #include <asm/iSeries/vio.h>
51 /* Status of the path to each other partition in the system.
52 * This is overkill, since we will only ever establish connections
53 * to our hosting partition and the primary partition on the system.
54 * But this allows for other support in the future.
56 static struct viopathStatus {
57 int isOpen:1; /* Did we open the path? */
58 int isActive:1; /* Do we have a mon msg outstanding */
59 int users[VIO_MAX_SUBTYPES];
60 HvLpInstanceId mSourceInst;
61 HvLpInstanceId mTargetInst;
63 } viopathStatus[HVMAXARCHITECTEDLPS];
65 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
68 * For each kind of event we allocate a buffer that is
69 * guaranteed not to cross a page boundary
71 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
72 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
73 static int event_buffer_initialised;
75 static void handleMonitorEvent(struct HvLpEvent *event);
78 * We use this structure to handle asynchronous responses. The caller
79 * blocks on the semaphore and the handler posts the semaphore. However,
80 * if in_atomic() is true in the caller, then wait_atomic is used ...
82 struct doneAllocParms_t {
83 struct semaphore *sem;
85 atomic_t *wait_atomic;
89 /* Put a sequence number in each mon msg. The value is not
90 * important. Start at something other than 0 just for
91 * readability. wrapping this is ok.
93 static u8 viomonseq = 22;
95 /* Our hosting logical partition. We get this at startup
96 * time, and different modules access this variable directly.
98 HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
99 EXPORT_SYMBOL(viopath_hostLp);
100 HvLpIndex viopath_ourLp = 0xff;
101 EXPORT_SYMBOL(viopath_ourLp);
103 /* For each kind of incoming event we set a pointer to a
106 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
108 #define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
109 #define VIOPATH_KERN_INFO KERN_INFO "viopath: "
111 static unsigned char e2a(unsigned char x)
190 static int proc_viopath_show(struct seq_file *m, void *v)
197 DECLARE_MUTEX_LOCKED(Semaphore);
199 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
202 memset(buf, 0, PAGE_SIZE);
204 handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
207 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
208 HvLpEvent_Type_VirtualIo,
209 viomajorsubtype_config | vioconfigget,
210 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
211 viopath_sourceinst(viopath_hostLp),
212 viopath_targetinst(viopath_hostLp),
213 (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
214 ((u64)handle) << 32, PAGE_SIZE, 0, 0);
216 if (hvrc != HvLpEvent_Rc_Good)
217 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
221 vlanMap = HvLpConfig_getVirtualLanIndexMap();
223 while (vlanMap != 0){
224 if (vlanMap & 0x8000)
226 vlanMap = vlanMap << 1;
229 buf[PAGE_SIZE-1] = '\0';
230 seq_printf(m, "%s", buf);
232 seq_printf(m, "AVAILABLE_VETH=%d\n", vlanIndex );
233 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
234 e2a(xItExtVpdPanel.mfgID[2]),
235 e2a(xItExtVpdPanel.mfgID[3]),
236 e2a(xItExtVpdPanel.systemSerial[1]),
237 e2a(xItExtVpdPanel.systemSerial[2]),
238 e2a(xItExtVpdPanel.systemSerial[3]),
239 e2a(xItExtVpdPanel.systemSerial[4]),
240 e2a(xItExtVpdPanel.systemSerial[5]));
242 dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
248 static int proc_viopath_open(struct inode *inode, struct file *file)
250 return single_open(file, proc_viopath_show, NULL);
253 static struct file_operations proc_viopath_operations = {
254 .open = proc_viopath_open,
257 .release = single_release,
260 static int __init vio_proc_init(void)
262 struct proc_dir_entry *e;
264 e = create_proc_entry("iSeries/config", 0, NULL);
266 e->proc_fops = &proc_viopath_operations;
270 __initcall(vio_proc_init);
272 /* See if a given LP is active. Allow for invalid lps to be passed in
273 * and just return invalid
275 int viopath_isactive(HvLpIndex lp)
277 if (lp == HvLpIndexInvalid)
279 if (lp < HVMAXARCHITECTEDLPS)
280 return viopathStatus[lp].isActive;
284 EXPORT_SYMBOL(viopath_isactive);
287 * We cache the source and target instance ids for each
290 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
292 return viopathStatus[lp].mSourceInst;
294 EXPORT_SYMBOL(viopath_sourceinst);
296 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
298 return viopathStatus[lp].mTargetInst;
300 EXPORT_SYMBOL(viopath_targetinst);
303 * Send a monitor message. This is a message with the acknowledge
304 * bit on that the other side will NOT explicitly acknowledge. When
305 * the other side goes down, the hypervisor will acknowledge any
306 * outstanding messages....so we will know when the other side dies.
308 static void sendMonMsg(HvLpIndex remoteLp)
312 viopathStatus[remoteLp].mSourceInst =
313 HvCallEvent_getSourceLpInstanceId(remoteLp,
314 HvLpEvent_Type_VirtualIo);
315 viopathStatus[remoteLp].mTargetInst =
316 HvCallEvent_getTargetLpInstanceId(remoteLp,
317 HvLpEvent_Type_VirtualIo);
320 * Deliberately ignore the return code here. if we call this
321 * more than once, we don't care.
323 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
325 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
326 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
327 HvLpEvent_AckType_DeferredAck,
328 viopathStatus[remoteLp].mSourceInst,
329 viopathStatus[remoteLp].mTargetInst,
330 viomonseq++, 0, 0, 0, 0, 0);
332 if (hvrc == HvLpEvent_Rc_Good)
333 viopathStatus[remoteLp].isActive = 1;
335 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
337 viopathStatus[remoteLp].isActive = 0;
341 static void handleMonitorEvent(struct HvLpEvent *event)
347 * This handler is _also_ called as part of the loop
348 * at the end of this routine, so it must be able to
349 * ignore NULL events...
355 * First see if this is just a normal monitor message from the
358 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
359 remoteLp = event->xSourceLp;
360 if (!viopathStatus[remoteLp].isActive)
361 sendMonMsg(remoteLp);
366 * This path is for an acknowledgement; the other partition
369 remoteLp = event->xTargetLp;
370 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
371 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
372 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
376 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
378 viopathStatus[remoteLp].isActive = 0;
381 * For each active handler, pass them a NULL
382 * message to indicate that the other partition
385 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
386 if (vio_handler[i] != NULL)
387 (*vio_handler[i])(NULL);
391 int vio_setHandler(int subtype, vio_event_handler_t *beh)
393 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
394 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
396 if (vio_handler[subtype] != NULL)
398 vio_handler[subtype] = beh;
401 EXPORT_SYMBOL(vio_setHandler);
403 int vio_clearHandler(int subtype)
405 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
406 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
408 if (vio_handler[subtype] == NULL)
410 vio_handler[subtype] = NULL;
413 EXPORT_SYMBOL(vio_clearHandler);
415 static void handleConfig(struct HvLpEvent *event)
419 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
420 printk(VIOPATH_KERN_WARN
421 "unexpected config request from partition %d",
424 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
425 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
426 event->xRc = HvLpEvent_Rc_InvalidSubtype;
427 HvCallEvent_ackLpEvent(event);
432 up((struct semaphore *)event->xCorrelationToken);
436 * Initialization of the hosting partition
438 void vio_set_hostlp(void)
441 * If this has already been set then we DON'T want to either change
442 * it or re-register the proc file system
444 if (viopath_hostLp != HvLpIndexInvalid)
448 * Figure out our hosting partition. This isn't allowed to change
451 viopath_ourLp = HvLpConfig_getLpIndex();
452 viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
454 if (viopath_hostLp != HvLpIndexInvalid)
455 vio_setHandler(viomajorsubtype_config, handleConfig);
457 EXPORT_SYMBOL(vio_set_hostlp);
459 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
462 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
463 >> VIOMAJOR_SUBTYPE_SHIFT;
465 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
466 remoteLp = event->xSourceLp;
468 * The isActive is checked because if the hosting partition
469 * went down and came back up it would not be active but it
470 * would have different source and target instances, in which
471 * case we'd want to reset them. This case really protects
472 * against an unauthorized active partition sending interrupts
473 * or acks to this linux partition.
475 if (viopathStatus[remoteLp].isActive
476 && (event->xSourceInstanceId !=
477 viopathStatus[remoteLp].mTargetInst)) {
478 printk(VIOPATH_KERN_WARN
479 "message from invalid partition. "
480 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
481 viopathStatus[remoteLp].mTargetInst,
482 event->xSourceInstanceId);
486 if (viopathStatus[remoteLp].isActive
487 && (event->xTargetInstanceId !=
488 viopathStatus[remoteLp].mSourceInst)) {
489 printk(VIOPATH_KERN_WARN
490 "message from invalid partition. "
491 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
492 viopathStatus[remoteLp].mSourceInst,
493 event->xTargetInstanceId);
497 remoteLp = event->xTargetLp;
498 if (event->xSourceInstanceId !=
499 viopathStatus[remoteLp].mSourceInst) {
500 printk(VIOPATH_KERN_WARN
501 "message from invalid partition. "
502 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
503 viopathStatus[remoteLp].mSourceInst,
504 event->xSourceInstanceId);
508 if (event->xTargetInstanceId !=
509 viopathStatus[remoteLp].mTargetInst) {
510 printk(VIOPATH_KERN_WARN
511 "message from invalid partition. "
512 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
513 viopathStatus[remoteLp].mTargetInst,
514 event->xTargetInstanceId);
519 if (vio_handler[subtype] == NULL) {
520 printk(VIOPATH_KERN_WARN
521 "unexpected virtual io event subtype %d from partition %d\n",
522 event->xSubtype, remoteLp);
523 /* No handler. Ack if necessary */
524 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
525 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
526 event->xRc = HvLpEvent_Rc_InvalidSubtype;
527 HvCallEvent_ackLpEvent(event);
532 /* This innocuous little line is where all the real work happens */
533 (*vio_handler[subtype])(event);
536 static void viopath_donealloc(void *parm, int number)
538 struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
540 parmsp->number = number;
541 if (parmsp->used_wait_atomic)
542 atomic_set(parmsp->wait_atomic, 0);
547 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
549 struct doneAllocParms_t parms;
550 DECLARE_MUTEX_LOCKED(Semaphore);
551 atomic_t wait_atomic;
554 parms.used_wait_atomic = 1;
555 atomic_set(&wait_atomic, 1);
556 parms.wait_atomic = &wait_atomic;
558 parms.used_wait_atomic = 0;
559 parms.sem = &Semaphore;
561 mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
562 numEvents, &viopath_donealloc, &parms);
564 while (atomic_read(&wait_atomic))
571 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
575 int tempNumAllocated;
577 if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
580 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
581 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
584 spin_lock_irqsave(&statuslock, flags);
586 if (!event_buffer_initialised) {
587 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
588 atomic_set(&event_buffer_available[i], 1);
589 event_buffer_initialised = 1;
592 viopathStatus[remoteLp].users[subtype]++;
594 if (!viopathStatus[remoteLp].isOpen) {
595 viopathStatus[remoteLp].isOpen = 1;
596 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
599 * Don't hold the spinlock during an operation that
602 spin_unlock_irqrestore(&statuslock, flags);
603 tempNumAllocated = allocateEvents(remoteLp, 1);
604 spin_lock_irqsave(&statuslock, flags);
606 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
608 if (viopathStatus[remoteLp].numberAllocated == 0) {
609 HvCallEvent_closeLpEventPath(remoteLp,
610 HvLpEvent_Type_VirtualIo);
612 spin_unlock_irqrestore(&statuslock, flags);
616 viopathStatus[remoteLp].mSourceInst =
617 HvCallEvent_getSourceLpInstanceId(remoteLp,
618 HvLpEvent_Type_VirtualIo);
619 viopathStatus[remoteLp].mTargetInst =
620 HvCallEvent_getTargetLpInstanceId(remoteLp,
621 HvLpEvent_Type_VirtualIo);
622 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
624 sendMonMsg(remoteLp);
625 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
626 "setting sinst %d, tinst %d\n",
627 remoteLp, viopathStatus[remoteLp].mSourceInst,
628 viopathStatus[remoteLp].mTargetInst);
631 spin_unlock_irqrestore(&statuslock, flags);
632 tempNumAllocated = allocateEvents(remoteLp, numReq);
633 spin_lock_irqsave(&statuslock, flags);
634 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
635 spin_unlock_irqrestore(&statuslock, flags);
639 EXPORT_SYMBOL(viopath_open);
641 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
646 struct doneAllocParms_t doneAllocParms;
647 DECLARE_MUTEX_LOCKED(Semaphore);
649 if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
652 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
653 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
656 spin_lock_irqsave(&statuslock, flags);
658 * If the viopath_close somehow gets called before a
659 * viopath_open it could decrement to -1 which is a non
660 * recoverable state so we'll prevent this from
663 if (viopathStatus[remoteLp].users[subtype] > 0)
664 viopathStatus[remoteLp].users[subtype]--;
666 spin_unlock_irqrestore(&statuslock, flags);
668 doneAllocParms.used_wait_atomic = 0;
669 doneAllocParms.sem = &Semaphore;
670 mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
671 numReq, &viopath_donealloc, &doneAllocParms);
674 spin_lock_irqsave(&statuslock, flags);
675 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
676 numOpen += viopathStatus[remoteLp].users[i];
678 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
679 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
682 HvCallEvent_closeLpEventPath(remoteLp,
683 HvLpEvent_Type_VirtualIo);
684 viopathStatus[remoteLp].isOpen = 0;
685 viopathStatus[remoteLp].isActive = 0;
687 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
688 atomic_set(&event_buffer_available[i], 0);
689 event_buffer_initialised = 0;
691 spin_unlock_irqrestore(&statuslock, flags);
694 EXPORT_SYMBOL(viopath_close);
696 void *vio_get_event_buffer(int subtype)
698 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
699 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
702 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
703 return &event_buffer[subtype * 256];
707 EXPORT_SYMBOL(vio_get_event_buffer);
709 void vio_free_event_buffer(int subtype, void *buffer)
711 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
712 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
713 printk(VIOPATH_KERN_WARN
714 "unexpected subtype %d freeing event buffer\n", subtype);
718 if (atomic_read(&event_buffer_available[subtype]) != 0) {
719 printk(VIOPATH_KERN_WARN
720 "freeing unallocated event buffer, subtype %d\n",
725 if (buffer != &event_buffer[subtype * 256]) {
726 printk(VIOPATH_KERN_WARN
727 "freeing invalid event buffer, subtype %d\n", subtype);
730 atomic_set(&event_buffer_available[subtype], 1);
732 EXPORT_SYMBOL(vio_free_event_buffer);
734 static const struct vio_error_entry vio_no_error =
735 { 0, 0, "Non-VIO Error" };
736 static const struct vio_error_entry vio_unknown_error =
737 { 0, EIO, "Unknown Error" };
739 static const struct vio_error_entry vio_default_errors[] = {
740 {0x0001, EIO, "No Connection"},
741 {0x0002, EIO, "No Receiver"},
742 {0x0003, EIO, "No Buffer Available"},
743 {0x0004, EBADRQC, "Invalid Message Type"},
747 const struct vio_error_entry *vio_lookup_rc(
748 const struct vio_error_entry *local_table, u16 rc)
750 const struct vio_error_entry *cur;
753 return &vio_no_error;
755 for (cur = local_table; cur->rc; ++cur)
758 for (cur = vio_default_errors; cur->rc; ++cur)
761 return &vio_unknown_error;
763 EXPORT_SYMBOL(vio_lookup_rc);