2 * arch/ppc64/kernel/viopath.c
4 * iSeries Virtual I/O Message Path code
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
10 * (C) Copyright 2000-2003 IBM Corporation
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
35 #include <linux/string.h>
36 #include <linux/proc_fs.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/wait.h>
39 #include <linux/seq_file.h>
41 #include <asm/hardirq.h>
42 #include <asm/uaccess.h>
43 #include <asm/iSeries/LparData.h>
44 #include <asm/iSeries/HvLpEvent.h>
45 #include <asm/iSeries/HvLpConfig.h>
46 #include <asm/iSeries/HvCallCfg.h>
47 #include <asm/iSeries/mf.h>
48 #include <asm/iSeries/iSeries_proc.h>
49 #include <asm/iSeries/vio.h>
51 /* Status of the path to each other partition in the system.
52 * This is overkill, since we will only ever establish connections
53 * to our hosting partition and the primary partition on the system.
54 * But this allows for other support in the future.
56 static struct viopathStatus {
57 int isOpen:1; /* Did we open the path? */
58 int isActive:1; /* Do we have a mon msg outstanding */
59 int users[VIO_MAX_SUBTYPES];
60 HvLpInstanceId mSourceInst;
61 HvLpInstanceId mTargetInst;
63 } viopathStatus[HVMAXARCHITECTEDLPS];
65 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
68 * For each kind of event we allocate a buffer that is
69 * guaranteed not to cross a page boundary
71 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
72 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
73 static int event_buffer_initialised;
75 static void handleMonitorEvent(struct HvLpEvent *event);
78 * We use this structure to handle asynchronous responses. The caller
79 * blocks on the semaphore and the handler posts the semaphore. However,
80 * if in_atomic() is true in the caller, then wait_atomic is used ...
82 struct doneAllocParms_t {
83 struct semaphore *sem;
85 atomic_t *wait_atomic;
89 /* Put a sequence number in each mon msg. The value is not
90 * important. Start at something other than 0 just for
91 * readability. wrapping this is ok.
93 static u8 viomonseq = 22;
95 /* Our hosting logical partition. We get this at startup
96 * time, and different modules access this variable directly.
98 HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
99 EXPORT_SYMBOL(viopath_hostLp);
100 HvLpIndex viopath_ourLp = 0xff;
101 EXPORT_SYMBOL(viopath_ourLp);
103 /* For each kind of incoming event we set a pointer to a
106 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
108 #define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
109 #define VIOPATH_KERN_INFO KERN_INFO "viopath: "
111 static unsigned char e2a(unsigned char x)
190 static int proc_viopath_show(struct seq_file *m, void *v)
196 DECLARE_MUTEX_LOCKED(Semaphore);
198 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
201 memset(buf, 0, PAGE_SIZE);
203 handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
206 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
207 HvLpEvent_Type_VirtualIo,
208 viomajorsubtype_config | vioconfigget,
209 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
210 viopath_sourceinst(viopath_hostLp),
211 viopath_targetinst(viopath_hostLp),
212 (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
213 ((u64)handle) << 32, PAGE_SIZE, 0, 0);
215 if (hvrc != HvLpEvent_Rc_Good)
216 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
220 vlanMap = HvLpConfig_getVirtualLanIndexMap();
222 buf[PAGE_SIZE-1] = '\0';
223 seq_printf(m, "%s", buf);
224 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
225 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
226 e2a(xItExtVpdPanel.mfgID[2]),
227 e2a(xItExtVpdPanel.mfgID[3]),
228 e2a(xItExtVpdPanel.systemSerial[1]),
229 e2a(xItExtVpdPanel.systemSerial[2]),
230 e2a(xItExtVpdPanel.systemSerial[3]),
231 e2a(xItExtVpdPanel.systemSerial[4]),
232 e2a(xItExtVpdPanel.systemSerial[5]));
234 dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
240 static int proc_viopath_open(struct inode *inode, struct file *file)
242 return single_open(file, proc_viopath_show, NULL);
245 static struct file_operations proc_viopath_operations = {
246 .open = proc_viopath_open,
249 .release = single_release,
252 static int __init vio_proc_init(void)
254 struct proc_dir_entry *e;
256 e = create_proc_entry("iSeries/config", 0, NULL);
258 e->proc_fops = &proc_viopath_operations;
262 __initcall(vio_proc_init);
264 /* See if a given LP is active. Allow for invalid lps to be passed in
265 * and just return invalid
267 int viopath_isactive(HvLpIndex lp)
269 if (lp == HvLpIndexInvalid)
271 if (lp < HVMAXARCHITECTEDLPS)
272 return viopathStatus[lp].isActive;
276 EXPORT_SYMBOL(viopath_isactive);
279 * We cache the source and target instance ids for each
282 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
284 return viopathStatus[lp].mSourceInst;
286 EXPORT_SYMBOL(viopath_sourceinst);
288 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
290 return viopathStatus[lp].mTargetInst;
292 EXPORT_SYMBOL(viopath_targetinst);
295 * Send a monitor message. This is a message with the acknowledge
296 * bit on that the other side will NOT explicitly acknowledge. When
297 * the other side goes down, the hypervisor will acknowledge any
298 * outstanding messages....so we will know when the other side dies.
300 static void sendMonMsg(HvLpIndex remoteLp)
304 viopathStatus[remoteLp].mSourceInst =
305 HvCallEvent_getSourceLpInstanceId(remoteLp,
306 HvLpEvent_Type_VirtualIo);
307 viopathStatus[remoteLp].mTargetInst =
308 HvCallEvent_getTargetLpInstanceId(remoteLp,
309 HvLpEvent_Type_VirtualIo);
312 * Deliberately ignore the return code here. if we call this
313 * more than once, we don't care.
315 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
317 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
318 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
319 HvLpEvent_AckType_DeferredAck,
320 viopathStatus[remoteLp].mSourceInst,
321 viopathStatus[remoteLp].mTargetInst,
322 viomonseq++, 0, 0, 0, 0, 0);
324 if (hvrc == HvLpEvent_Rc_Good)
325 viopathStatus[remoteLp].isActive = 1;
327 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
329 viopathStatus[remoteLp].isActive = 0;
333 static void handleMonitorEvent(struct HvLpEvent *event)
339 * This handler is _also_ called as part of the loop
340 * at the end of this routine, so it must be able to
341 * ignore NULL events...
347 * First see if this is just a normal monitor message from the
350 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
351 remoteLp = event->xSourceLp;
352 if (!viopathStatus[remoteLp].isActive)
353 sendMonMsg(remoteLp);
358 * This path is for an acknowledgement; the other partition
361 remoteLp = event->xTargetLp;
362 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
363 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
364 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
368 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
370 viopathStatus[remoteLp].isActive = 0;
373 * For each active handler, pass them a NULL
374 * message to indicate that the other partition
377 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
378 if (vio_handler[i] != NULL)
379 (*vio_handler[i])(NULL);
383 int vio_setHandler(int subtype, vio_event_handler_t *beh)
385 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
386 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
388 if (vio_handler[subtype] != NULL)
390 vio_handler[subtype] = beh;
393 EXPORT_SYMBOL(vio_setHandler);
395 int vio_clearHandler(int subtype)
397 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
398 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
400 if (vio_handler[subtype] == NULL)
402 vio_handler[subtype] = NULL;
405 EXPORT_SYMBOL(vio_clearHandler);
407 static void handleConfig(struct HvLpEvent *event)
411 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
412 printk(VIOPATH_KERN_WARN
413 "unexpected config request from partition %d",
416 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
417 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
418 event->xRc = HvLpEvent_Rc_InvalidSubtype;
419 HvCallEvent_ackLpEvent(event);
424 up((struct semaphore *)event->xCorrelationToken);
428 * Initialization of the hosting partition
430 void vio_set_hostlp(void)
433 * If this has already been set then we DON'T want to either change
434 * it or re-register the proc file system
436 if (viopath_hostLp != HvLpIndexInvalid)
440 * Figure out our hosting partition. This isn't allowed to change
443 viopath_ourLp = HvLpConfig_getLpIndex();
444 viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
446 if (viopath_hostLp != HvLpIndexInvalid)
447 vio_setHandler(viomajorsubtype_config, handleConfig);
449 EXPORT_SYMBOL(vio_set_hostlp);
451 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
454 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
455 >> VIOMAJOR_SUBTYPE_SHIFT;
457 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
458 remoteLp = event->xSourceLp;
460 * The isActive is checked because if the hosting partition
461 * went down and came back up it would not be active but it
462 * would have different source and target instances, in which
463 * case we'd want to reset them. This case really protects
464 * against an unauthorized active partition sending interrupts
465 * or acks to this linux partition.
467 if (viopathStatus[remoteLp].isActive
468 && (event->xSourceInstanceId !=
469 viopathStatus[remoteLp].mTargetInst)) {
470 printk(VIOPATH_KERN_WARN
471 "message from invalid partition. "
472 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
473 viopathStatus[remoteLp].mTargetInst,
474 event->xSourceInstanceId);
478 if (viopathStatus[remoteLp].isActive
479 && (event->xTargetInstanceId !=
480 viopathStatus[remoteLp].mSourceInst)) {
481 printk(VIOPATH_KERN_WARN
482 "message from invalid partition. "
483 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
484 viopathStatus[remoteLp].mSourceInst,
485 event->xTargetInstanceId);
489 remoteLp = event->xTargetLp;
490 if (event->xSourceInstanceId !=
491 viopathStatus[remoteLp].mSourceInst) {
492 printk(VIOPATH_KERN_WARN
493 "message from invalid partition. "
494 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
495 viopathStatus[remoteLp].mSourceInst,
496 event->xSourceInstanceId);
500 if (event->xTargetInstanceId !=
501 viopathStatus[remoteLp].mTargetInst) {
502 printk(VIOPATH_KERN_WARN
503 "message from invalid partition. "
504 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
505 viopathStatus[remoteLp].mTargetInst,
506 event->xTargetInstanceId);
511 if (vio_handler[subtype] == NULL) {
512 printk(VIOPATH_KERN_WARN
513 "unexpected virtual io event subtype %d from partition %d\n",
514 event->xSubtype, remoteLp);
515 /* No handler. Ack if necessary */
516 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
517 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
518 event->xRc = HvLpEvent_Rc_InvalidSubtype;
519 HvCallEvent_ackLpEvent(event);
524 /* This innocuous little line is where all the real work happens */
525 (*vio_handler[subtype])(event);
528 static void viopath_donealloc(void *parm, int number)
530 struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
532 parmsp->number = number;
533 if (parmsp->used_wait_atomic)
534 atomic_set(parmsp->wait_atomic, 0);
539 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
541 struct doneAllocParms_t parms;
542 DECLARE_MUTEX_LOCKED(Semaphore);
543 atomic_t wait_atomic;
546 parms.used_wait_atomic = 1;
547 atomic_set(&wait_atomic, 1);
548 parms.wait_atomic = &wait_atomic;
550 parms.used_wait_atomic = 0;
551 parms.sem = &Semaphore;
553 mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
554 numEvents, &viopath_donealloc, &parms);
556 while (atomic_read(&wait_atomic))
563 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
567 int tempNumAllocated;
569 if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
572 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
573 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
576 spin_lock_irqsave(&statuslock, flags);
578 if (!event_buffer_initialised) {
579 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
580 atomic_set(&event_buffer_available[i], 1);
581 event_buffer_initialised = 1;
584 viopathStatus[remoteLp].users[subtype]++;
586 if (!viopathStatus[remoteLp].isOpen) {
587 viopathStatus[remoteLp].isOpen = 1;
588 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
591 * Don't hold the spinlock during an operation that
594 spin_unlock_irqrestore(&statuslock, flags);
595 tempNumAllocated = allocateEvents(remoteLp, 1);
596 spin_lock_irqsave(&statuslock, flags);
598 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
600 if (viopathStatus[remoteLp].numberAllocated == 0) {
601 HvCallEvent_closeLpEventPath(remoteLp,
602 HvLpEvent_Type_VirtualIo);
604 spin_unlock_irqrestore(&statuslock, flags);
608 viopathStatus[remoteLp].mSourceInst =
609 HvCallEvent_getSourceLpInstanceId(remoteLp,
610 HvLpEvent_Type_VirtualIo);
611 viopathStatus[remoteLp].mTargetInst =
612 HvCallEvent_getTargetLpInstanceId(remoteLp,
613 HvLpEvent_Type_VirtualIo);
614 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
616 sendMonMsg(remoteLp);
617 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
618 "setting sinst %d, tinst %d\n",
619 remoteLp, viopathStatus[remoteLp].mSourceInst,
620 viopathStatus[remoteLp].mTargetInst);
623 spin_unlock_irqrestore(&statuslock, flags);
624 tempNumAllocated = allocateEvents(remoteLp, numReq);
625 spin_lock_irqsave(&statuslock, flags);
626 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
627 spin_unlock_irqrestore(&statuslock, flags);
631 EXPORT_SYMBOL(viopath_open);
633 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
638 struct doneAllocParms_t doneAllocParms;
639 DECLARE_MUTEX_LOCKED(Semaphore);
641 if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
644 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
645 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
648 spin_lock_irqsave(&statuslock, flags);
650 * If the viopath_close somehow gets called before a
651 * viopath_open it could decrement to -1 which is a non
652 * recoverable state so we'll prevent this from
655 if (viopathStatus[remoteLp].users[subtype] > 0)
656 viopathStatus[remoteLp].users[subtype]--;
658 spin_unlock_irqrestore(&statuslock, flags);
660 doneAllocParms.used_wait_atomic = 0;
661 doneAllocParms.sem = &Semaphore;
662 mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
663 numReq, &viopath_donealloc, &doneAllocParms);
666 spin_lock_irqsave(&statuslock, flags);
667 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
668 numOpen += viopathStatus[remoteLp].users[i];
670 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
671 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
674 HvCallEvent_closeLpEventPath(remoteLp,
675 HvLpEvent_Type_VirtualIo);
676 viopathStatus[remoteLp].isOpen = 0;
677 viopathStatus[remoteLp].isActive = 0;
679 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
680 atomic_set(&event_buffer_available[i], 0);
681 event_buffer_initialised = 0;
683 spin_unlock_irqrestore(&statuslock, flags);
686 EXPORT_SYMBOL(viopath_close);
688 void *vio_get_event_buffer(int subtype)
690 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
691 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
694 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
695 return &event_buffer[subtype * 256];
699 EXPORT_SYMBOL(vio_get_event_buffer);
701 void vio_free_event_buffer(int subtype, void *buffer)
703 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
704 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
705 printk(VIOPATH_KERN_WARN
706 "unexpected subtype %d freeing event buffer\n", subtype);
710 if (atomic_read(&event_buffer_available[subtype]) != 0) {
711 printk(VIOPATH_KERN_WARN
712 "freeing unallocated event buffer, subtype %d\n",
717 if (buffer != &event_buffer[subtype * 256]) {
718 printk(VIOPATH_KERN_WARN
719 "freeing invalid event buffer, subtype %d\n", subtype);
722 atomic_set(&event_buffer_available[subtype], 1);
724 EXPORT_SYMBOL(vio_free_event_buffer);
726 static const struct vio_error_entry vio_no_error =
727 { 0, 0, "Non-VIO Error" };
728 static const struct vio_error_entry vio_unknown_error =
729 { 0, EIO, "Unknown Error" };
731 static const struct vio_error_entry vio_default_errors[] = {
732 {0x0001, EIO, "No Connection"},
733 {0x0002, EIO, "No Receiver"},
734 {0x0003, EIO, "No Buffer Available"},
735 {0x0004, EBADRQC, "Invalid Message Type"},
739 const struct vio_error_entry *vio_lookup_rc(
740 const struct vio_error_entry *local_table, u16 rc)
742 const struct vio_error_entry *cur;
745 return &vio_no_error;
747 for (cur = local_table; cur->rc; ++cur)
750 for (cur = vio_default_errors; cur->rc; ++cur)
753 return &vio_unknown_error;
755 EXPORT_SYMBOL(vio_lookup_rc);