2 * arch/ppc64/kernel/viopath.c
4 * iSeries Virtual I/O Message Path code
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
10 * (C) Copyright 2000-2003 IBM Corporation
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/errno.h>
34 #include <linux/vmalloc.h>
35 #include <linux/string.h>
36 #include <linux/proc_fs.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/wait.h>
39 #include <linux/seq_file.h>
40 #include <linux/smp_lock.h>
41 #include <linux/interrupt.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/iSeries/LparData.h>
46 #include <asm/iSeries/HvLpEvent.h>
47 #include <asm/iSeries/HvLpConfig.h>
48 #include <asm/iSeries/HvCallCfg.h>
49 #include <asm/iSeries/mf.h>
50 #include <asm/iSeries/iSeries_proc.h>
51 #include <asm/iSeries/vio.h>
53 /* Status of the path to each other partition in the system.
54 * This is overkill, since we will only ever establish connections
55 * to our hosting partition and the primary partition on the system.
56 * But this allows for other support in the future.
58 static struct viopathStatus {
59 int isOpen:1; /* Did we open the path? */
60 int isActive:1; /* Do we have a mon msg outstanding */
61 int users[VIO_MAX_SUBTYPES];
62 HvLpInstanceId mSourceInst;
63 HvLpInstanceId mTargetInst;
65 } viopathStatus[HVMAXARCHITECTEDLPS];
67 static DEFINE_SPINLOCK(statuslock);
70 * For each kind of event we allocate a buffer that is
71 * guaranteed not to cross a page boundary
73 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
74 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
75 static int event_buffer_initialised;
77 static void handleMonitorEvent(struct HvLpEvent *event);
80 * We use this structure to handle asynchronous responses. The caller
81 * blocks on the semaphore and the handler posts the semaphore. However,
82 * if in_atomic() is true in the caller, then wait_atomic is used ...
84 struct doneAllocParms_t {
85 struct semaphore *sem;
87 atomic_t *wait_atomic;
91 /* Put a sequence number in each mon msg. The value is not
92 * important. Start at something other than 0 just for
93 * readability. wrapping this is ok.
95 static u8 viomonseq = 22;
97 /* Our hosting logical partition. We get this at startup
98 * time, and different modules access this variable directly.
100 HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
101 EXPORT_SYMBOL(viopath_hostLp);
102 HvLpIndex viopath_ourLp = 0xff;
103 EXPORT_SYMBOL(viopath_ourLp);
105 /* For each kind of incoming event we set a pointer to a
108 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
110 #define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
111 #define VIOPATH_KERN_INFO KERN_INFO "viopath: "
113 static int proc_viopath_show(struct seq_file *m, void *v)
119 DECLARE_MUTEX_LOCKED(Semaphore);
121 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
124 memset(buf, 0, PAGE_SIZE);
126 handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
129 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
130 HvLpEvent_Type_VirtualIo,
131 viomajorsubtype_config | vioconfigget,
132 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
133 viopath_sourceinst(viopath_hostLp),
134 viopath_targetinst(viopath_hostLp),
135 (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
136 ((u64)handle) << 32, PAGE_SIZE, 0, 0);
138 if (hvrc != HvLpEvent_Rc_Good)
139 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
143 vlanMap = HvLpConfig_getVirtualLanIndexMap();
145 buf[PAGE_SIZE-1] = '\0';
146 seq_printf(m, "%s", buf);
147 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
148 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
149 e2a(xItExtVpdPanel.mfgID[2]),
150 e2a(xItExtVpdPanel.mfgID[3]),
151 e2a(xItExtVpdPanel.systemSerial[1]),
152 e2a(xItExtVpdPanel.systemSerial[2]),
153 e2a(xItExtVpdPanel.systemSerial[3]),
154 e2a(xItExtVpdPanel.systemSerial[4]),
155 e2a(xItExtVpdPanel.systemSerial[5]));
157 dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
163 static int proc_viopath_open(struct inode *inode, struct file *file)
165 return single_open(file, proc_viopath_show, NULL);
168 static struct file_operations proc_viopath_operations = {
169 .open = proc_viopath_open,
172 .release = single_release,
175 static int __init vio_proc_init(void)
177 struct proc_dir_entry *e;
179 e = create_proc_entry("iSeries/config", 0, NULL);
181 e->proc_fops = &proc_viopath_operations;
185 __initcall(vio_proc_init);
187 /* See if a given LP is active. Allow for invalid lps to be passed in
188 * and just return invalid
190 int viopath_isactive(HvLpIndex lp)
192 if (lp == HvLpIndexInvalid)
194 if (lp < HVMAXARCHITECTEDLPS)
195 return viopathStatus[lp].isActive;
199 EXPORT_SYMBOL(viopath_isactive);
202 * We cache the source and target instance ids for each
205 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
207 return viopathStatus[lp].mSourceInst;
209 EXPORT_SYMBOL(viopath_sourceinst);
211 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
213 return viopathStatus[lp].mTargetInst;
215 EXPORT_SYMBOL(viopath_targetinst);
218 * Send a monitor message. This is a message with the acknowledge
219 * bit on that the other side will NOT explicitly acknowledge. When
220 * the other side goes down, the hypervisor will acknowledge any
221 * outstanding messages....so we will know when the other side dies.
223 static void sendMonMsg(HvLpIndex remoteLp)
227 viopathStatus[remoteLp].mSourceInst =
228 HvCallEvent_getSourceLpInstanceId(remoteLp,
229 HvLpEvent_Type_VirtualIo);
230 viopathStatus[remoteLp].mTargetInst =
231 HvCallEvent_getTargetLpInstanceId(remoteLp,
232 HvLpEvent_Type_VirtualIo);
235 * Deliberately ignore the return code here. if we call this
236 * more than once, we don't care.
238 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
240 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
241 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
242 HvLpEvent_AckType_DeferredAck,
243 viopathStatus[remoteLp].mSourceInst,
244 viopathStatus[remoteLp].mTargetInst,
245 viomonseq++, 0, 0, 0, 0, 0);
247 if (hvrc == HvLpEvent_Rc_Good)
248 viopathStatus[remoteLp].isActive = 1;
250 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
252 viopathStatus[remoteLp].isActive = 0;
256 static void handleMonitorEvent(struct HvLpEvent *event)
262 * This handler is _also_ called as part of the loop
263 * at the end of this routine, so it must be able to
264 * ignore NULL events...
270 * First see if this is just a normal monitor message from the
273 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
274 remoteLp = event->xSourceLp;
275 if (!viopathStatus[remoteLp].isActive)
276 sendMonMsg(remoteLp);
281 * This path is for an acknowledgement; the other partition
284 remoteLp = event->xTargetLp;
285 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
286 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
287 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
291 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
293 viopathStatus[remoteLp].isActive = 0;
296 * For each active handler, pass them a NULL
297 * message to indicate that the other partition
300 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
301 if (vio_handler[i] != NULL)
302 (*vio_handler[i])(NULL);
306 int vio_setHandler(int subtype, vio_event_handler_t *beh)
308 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
309 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
311 if (vio_handler[subtype] != NULL)
313 vio_handler[subtype] = beh;
316 EXPORT_SYMBOL(vio_setHandler);
318 int vio_clearHandler(int subtype)
320 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
321 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
323 if (vio_handler[subtype] == NULL)
325 vio_handler[subtype] = NULL;
328 EXPORT_SYMBOL(vio_clearHandler);
330 static void handleConfig(struct HvLpEvent *event)
334 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
335 printk(VIOPATH_KERN_WARN
336 "unexpected config request from partition %d",
339 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
340 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
341 event->xRc = HvLpEvent_Rc_InvalidSubtype;
342 HvCallEvent_ackLpEvent(event);
347 up((struct semaphore *)event->xCorrelationToken);
351 * Initialization of the hosting partition
353 void vio_set_hostlp(void)
356 * If this has already been set then we DON'T want to either change
357 * it or re-register the proc file system
359 if (viopath_hostLp != HvLpIndexInvalid)
363 * Figure out our hosting partition. This isn't allowed to change
366 viopath_ourLp = HvLpConfig_getLpIndex();
367 viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
369 if (viopath_hostLp != HvLpIndexInvalid)
370 vio_setHandler(viomajorsubtype_config, handleConfig);
372 EXPORT_SYMBOL(vio_set_hostlp);
374 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
377 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
378 >> VIOMAJOR_SUBTYPE_SHIFT;
380 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
381 remoteLp = event->xSourceLp;
383 * The isActive is checked because if the hosting partition
384 * went down and came back up it would not be active but it
385 * would have different source and target instances, in which
386 * case we'd want to reset them. This case really protects
387 * against an unauthorized active partition sending interrupts
388 * or acks to this linux partition.
390 if (viopathStatus[remoteLp].isActive
391 && (event->xSourceInstanceId !=
392 viopathStatus[remoteLp].mTargetInst)) {
393 printk(VIOPATH_KERN_WARN
394 "message from invalid partition. "
395 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
396 viopathStatus[remoteLp].mTargetInst,
397 event->xSourceInstanceId);
401 if (viopathStatus[remoteLp].isActive
402 && (event->xTargetInstanceId !=
403 viopathStatus[remoteLp].mSourceInst)) {
404 printk(VIOPATH_KERN_WARN
405 "message from invalid partition. "
406 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
407 viopathStatus[remoteLp].mSourceInst,
408 event->xTargetInstanceId);
412 remoteLp = event->xTargetLp;
413 if (event->xSourceInstanceId !=
414 viopathStatus[remoteLp].mSourceInst) {
415 printk(VIOPATH_KERN_WARN
416 "message from invalid partition. "
417 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
418 viopathStatus[remoteLp].mSourceInst,
419 event->xSourceInstanceId);
423 if (event->xTargetInstanceId !=
424 viopathStatus[remoteLp].mTargetInst) {
425 printk(VIOPATH_KERN_WARN
426 "message from invalid partition. "
427 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
428 viopathStatus[remoteLp].mTargetInst,
429 event->xTargetInstanceId);
434 if (vio_handler[subtype] == NULL) {
435 printk(VIOPATH_KERN_WARN
436 "unexpected virtual io event subtype %d from partition %d\n",
437 event->xSubtype, remoteLp);
438 /* No handler. Ack if necessary */
439 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
440 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
441 event->xRc = HvLpEvent_Rc_InvalidSubtype;
442 HvCallEvent_ackLpEvent(event);
447 /* This innocuous little line is where all the real work happens */
448 (*vio_handler[subtype])(event);
451 static void viopath_donealloc(void *parm, int number)
453 struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
455 parmsp->number = number;
456 if (parmsp->used_wait_atomic)
457 atomic_set(parmsp->wait_atomic, 0);
462 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
464 struct doneAllocParms_t parms;
465 DECLARE_MUTEX_LOCKED(Semaphore);
466 atomic_t wait_atomic;
469 parms.used_wait_atomic = 1;
470 atomic_set(&wait_atomic, 1);
471 parms.wait_atomic = &wait_atomic;
473 parms.used_wait_atomic = 0;
474 parms.sem = &Semaphore;
476 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
477 numEvents, &viopath_donealloc, &parms);
479 while (atomic_read(&wait_atomic))
486 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
490 int tempNumAllocated;
492 if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
495 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
496 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
499 spin_lock_irqsave(&statuslock, flags);
501 if (!event_buffer_initialised) {
502 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
503 atomic_set(&event_buffer_available[i], 1);
504 event_buffer_initialised = 1;
507 viopathStatus[remoteLp].users[subtype]++;
509 if (!viopathStatus[remoteLp].isOpen) {
510 viopathStatus[remoteLp].isOpen = 1;
511 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
514 * Don't hold the spinlock during an operation that
517 spin_unlock_irqrestore(&statuslock, flags);
518 tempNumAllocated = allocateEvents(remoteLp, 1);
519 spin_lock_irqsave(&statuslock, flags);
521 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
523 if (viopathStatus[remoteLp].numberAllocated == 0) {
524 HvCallEvent_closeLpEventPath(remoteLp,
525 HvLpEvent_Type_VirtualIo);
527 spin_unlock_irqrestore(&statuslock, flags);
531 viopathStatus[remoteLp].mSourceInst =
532 HvCallEvent_getSourceLpInstanceId(remoteLp,
533 HvLpEvent_Type_VirtualIo);
534 viopathStatus[remoteLp].mTargetInst =
535 HvCallEvent_getTargetLpInstanceId(remoteLp,
536 HvLpEvent_Type_VirtualIo);
537 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
539 sendMonMsg(remoteLp);
540 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
541 "setting sinst %d, tinst %d\n",
542 remoteLp, viopathStatus[remoteLp].mSourceInst,
543 viopathStatus[remoteLp].mTargetInst);
546 spin_unlock_irqrestore(&statuslock, flags);
547 tempNumAllocated = allocateEvents(remoteLp, numReq);
548 spin_lock_irqsave(&statuslock, flags);
549 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
550 spin_unlock_irqrestore(&statuslock, flags);
554 EXPORT_SYMBOL(viopath_open);
556 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
561 struct doneAllocParms_t doneAllocParms;
562 DECLARE_MUTEX_LOCKED(Semaphore);
564 if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
567 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
568 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
571 spin_lock_irqsave(&statuslock, flags);
573 * If the viopath_close somehow gets called before a
574 * viopath_open it could decrement to -1 which is a non
575 * recoverable state so we'll prevent this from
578 if (viopathStatus[remoteLp].users[subtype] > 0)
579 viopathStatus[remoteLp].users[subtype]--;
581 spin_unlock_irqrestore(&statuslock, flags);
583 doneAllocParms.used_wait_atomic = 0;
584 doneAllocParms.sem = &Semaphore;
585 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
586 numReq, &viopath_donealloc, &doneAllocParms);
589 spin_lock_irqsave(&statuslock, flags);
590 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
591 numOpen += viopathStatus[remoteLp].users[i];
593 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
594 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
597 HvCallEvent_closeLpEventPath(remoteLp,
598 HvLpEvent_Type_VirtualIo);
599 viopathStatus[remoteLp].isOpen = 0;
600 viopathStatus[remoteLp].isActive = 0;
602 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
603 atomic_set(&event_buffer_available[i], 0);
604 event_buffer_initialised = 0;
606 spin_unlock_irqrestore(&statuslock, flags);
609 EXPORT_SYMBOL(viopath_close);
611 void *vio_get_event_buffer(int subtype)
613 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
614 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
617 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
618 return &event_buffer[subtype * 256];
622 EXPORT_SYMBOL(vio_get_event_buffer);
624 void vio_free_event_buffer(int subtype, void *buffer)
626 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
627 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
628 printk(VIOPATH_KERN_WARN
629 "unexpected subtype %d freeing event buffer\n", subtype);
633 if (atomic_read(&event_buffer_available[subtype]) != 0) {
634 printk(VIOPATH_KERN_WARN
635 "freeing unallocated event buffer, subtype %d\n",
640 if (buffer != &event_buffer[subtype * 256]) {
641 printk(VIOPATH_KERN_WARN
642 "freeing invalid event buffer, subtype %d\n", subtype);
645 atomic_set(&event_buffer_available[subtype], 1);
647 EXPORT_SYMBOL(vio_free_event_buffer);
649 static const struct vio_error_entry vio_no_error =
650 { 0, 0, "Non-VIO Error" };
651 static const struct vio_error_entry vio_unknown_error =
652 { 0, EIO, "Unknown Error" };
654 static const struct vio_error_entry vio_default_errors[] = {
655 {0x0001, EIO, "No Connection"},
656 {0x0002, EIO, "No Receiver"},
657 {0x0003, EIO, "No Buffer Available"},
658 {0x0004, EBADRQC, "Invalid Message Type"},
662 const struct vio_error_entry *vio_lookup_rc(
663 const struct vio_error_entry *local_table, u16 rc)
665 const struct vio_error_entry *cur;
668 return &vio_no_error;
670 for (cur = local_table; cur->rc; ++cur)
673 for (cur = vio_default_errors; cur->rc; ++cur)
676 return &vio_unknown_error;
678 EXPORT_SYMBOL(vio_lookup_rc);