4 * iSeries Virtual I/O Message Path code
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
10 * (C) Copyright 2000 IBM Corporation
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/config.h>
32 #include <asm/uaccess.h>
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/errno.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
38 #include <linux/proc_fs.h>
39 #include <linux/pci.h>
40 #include <linux/wait.h>
42 #include <asm/iSeries/LparData.h>
43 #include <asm/iSeries/HvLpEvent.h>
44 #include <asm/iSeries/HvLpConfig.h>
45 #include <asm/iSeries/HvCallCfg.h>
46 #include <asm/iSeries/mf.h>
47 #include <asm/iSeries/iSeries_proc.h>
48 #include <asm/iSeries/vio.h>
50 EXPORT_SYMBOL(viopath_hostLp);
51 EXPORT_SYMBOL(viopath_ourLp);
52 EXPORT_SYMBOL(vio_set_hostlp);
53 EXPORT_SYMBOL(vio_lookup_rc);
54 EXPORT_SYMBOL(viopath_open);
55 EXPORT_SYMBOL(viopath_close);
56 EXPORT_SYMBOL(viopath_isactive);
57 EXPORT_SYMBOL(viopath_sourceinst);
58 EXPORT_SYMBOL(viopath_targetinst);
59 EXPORT_SYMBOL(vio_setHandler);
60 EXPORT_SYMBOL(vio_clearHandler);
61 EXPORT_SYMBOL(vio_get_event_buffer);
62 EXPORT_SYMBOL(vio_free_event_buffer);
64 extern struct pci_dev *iSeries_vio_dev;
66 /* Status of the path to each other partition in the system.
67 * This is overkill, since we will only ever establish connections
68 * to our hosting partition and the primary partition on the system.
69 * But this allows for other support in the future.
71 static struct viopathStatus {
72 int isOpen:1; /* Did we open the path? */
73 int isActive:1; /* Do we have a mon msg outstanding */
74 int users[VIO_MAX_SUBTYPES];
75 HvLpInstanceId mSourceInst;
76 HvLpInstanceId mTargetInst;
78 } viopathStatus[HVMAXARCHITECTEDLPS];
80 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
83 * For each kind of event we allocate a buffer that is
84 * guaranteed not to cross a page boundary
86 static void *event_buffer[VIO_MAX_SUBTYPES] = { };
87 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES] = { };
89 static void handleMonitorEvent(struct HvLpEvent *event);
91 /* We use this structure to handle asynchronous responses. The caller
92 * blocks on the semaphore and the handler posts the semaphore.
94 struct doneAllocParms_t {
95 struct semaphore *sem;
99 /* Put a sequence number in each mon msg. The value is not
100 * important. Start at something other than 0 just for
101 * readability. wrapping this is ok.
103 static u8 viomonseq = 22;
105 /* Our hosting logical partition. We get this at startup
106 * time, and different modules access this variable directly.
108 HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
109 HvLpIndex viopath_ourLp = 0xff;
111 /* For each kind of incoming event we set a pointer to a
114 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
116 static unsigned char e2a(unsigned char x)
195 /* Handle reads from the proc file system
197 static int proc_read(char *buf, char **start, off_t offset,
198 int blen, int *eof, void *data)
201 DECLARE_MUTEX_LOCKED(Semaphore);
203 pci_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
210 memset(buf, 0x00, len);
211 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
212 HvLpEvent_Type_VirtualIo,
213 viomajorsubtype_config |
215 HvLpEvent_AckInd_DoAck,
216 HvLpEvent_AckType_ImmediateAck,
221 (u64) (unsigned long)
222 &Semaphore, VIOVERSION << 16,
223 ((u64) dmaa) << 32, len, 0,
225 if (hvrc != HvLpEvent_Rc_Good) {
226 printk("viopath hv error on op %d\n", (int) hvrc);
231 pci_unmap_single(iSeries_vio_dev, dmaa, PAGE_SIZE,
234 sprintf(buf + strlen(buf), "SRLNBR=");
235 buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[2]);
236 buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[3]);
237 buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[1]);
238 buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[2]);
239 buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[3]);
240 buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[4]);
241 buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[5]);
242 buf[strlen(buf)] = '\n';
247 /* Handle writes to our proc file system
249 static int proc_write(struct file *file, const char *buffer,
250 unsigned long count, void *data)
252 /* Doesn't do anything today!!!
257 /* setup our proc file system entries
259 static void vio_proc_init(struct proc_dir_entry *iSeries_proc)
261 struct proc_dir_entry *ent;
262 ent = create_proc_entry("config", S_IFREG | S_IRUSR, iSeries_proc);
267 ent->read_proc = proc_read;
268 ent->write_proc = proc_write;
271 /* See if a given LP is active. Allow for invalid lps to be passed in
272 * and just return invalid
274 int viopath_isactive(HvLpIndex lp)
276 if (lp == HvLpIndexInvalid)
278 if (lp < HVMAXARCHITECTEDLPS)
279 return viopathStatus[lp].isActive;
284 /* We cache the source and target instance ids for each
287 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
289 return viopathStatus[lp].mSourceInst;
292 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
294 return viopathStatus[lp].mTargetInst;
297 /* Send a monitor message. This is a message with the acknowledge
298 * bit on that the other side will NOT explicitly acknowledge. When
299 * the other side goes down, the hypervisor will acknowledge any
300 * outstanding messages....so we will know when the other side dies.
302 static void sendMonMsg(HvLpIndex remoteLp)
306 viopathStatus[remoteLp].mSourceInst =
307 HvCallEvent_getSourceLpInstanceId(remoteLp,
308 HvLpEvent_Type_VirtualIo);
309 viopathStatus[remoteLp].mTargetInst =
310 HvCallEvent_getTargetLpInstanceId(remoteLp,
311 HvLpEvent_Type_VirtualIo);
313 /* Deliberately ignore the return code here. if we call this
314 * more than once, we don't care.
316 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
318 hvrc = HvCallEvent_signalLpEventFast(remoteLp,
319 HvLpEvent_Type_VirtualIo,
320 viomajorsubtype_monitor,
321 HvLpEvent_AckInd_DoAck,
322 HvLpEvent_AckType_DeferredAck,
323 viopathStatus[remoteLp].
325 viopathStatus[remoteLp].
326 mTargetInst, viomonseq++,
329 if (hvrc == HvLpEvent_Rc_Good) {
330 viopathStatus[remoteLp].isActive = 1;
332 printk(KERN_WARNING_VIO
333 "could not connect to partition %d\n", remoteLp);
334 viopathStatus[remoteLp].isActive = 0;
338 static void handleMonitorEvent(struct HvLpEvent *event)
343 /* This handler is _also_ called as part of the loop
344 * at the end of this routine, so it must be able to
345 * ignore NULL events...
350 /* First see if this is just a normal monitor message from the
353 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
354 remoteLp = event->xSourceLp;
355 if (!viopathStatus[remoteLp].isActive)
356 sendMonMsg(remoteLp);
360 /* This path is for an acknowledgement; the other partition
363 remoteLp = event->xTargetLp;
364 if ((event->xSourceInstanceId !=
365 viopathStatus[remoteLp].mSourceInst)
366 || (event->xTargetInstanceId !=
367 viopathStatus[remoteLp].mTargetInst)) {
368 printk(KERN_WARNING_VIO
369 "ignoring ack....mismatched instances\n");
373 printk(KERN_WARNING_VIO "partition %d ended\n", remoteLp);
375 viopathStatus[remoteLp].isActive = 0;
377 /* For each active handler, pass them a NULL
378 * message to indicate that the other partition
381 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
382 if (vio_handler[i] != NULL)
383 (*vio_handler[i]) (NULL);
387 int vio_setHandler(int subtype, vio_event_handler_t * beh)
389 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
391 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
394 if (vio_handler[subtype] != NULL)
397 vio_handler[subtype] = beh;
401 int vio_clearHandler(int subtype)
403 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
405 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
408 if (vio_handler[subtype] == NULL)
411 vio_handler[subtype] = NULL;
415 static void handleConfig(struct HvLpEvent *event)
419 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
420 printk(KERN_WARNING_VIO
421 "unexpected config request from partition %d",
424 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
425 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
426 event->xRc = HvLpEvent_Rc_InvalidSubtype;
427 HvCallEvent_ackLpEvent(event);
432 up((struct semaphore *) event->xCorrelationToken);
435 /* Initialization of the hosting partition
437 void vio_set_hostlp(void)
439 /* If this has already been set then we DON'T want to either change
440 * it or re-register the proc file system
442 if (viopath_hostLp != HvLpIndexInvalid)
445 /* Figure out our hosting partition. This isn't allowed to change
448 viopath_ourLp = HvLpConfig_getLpIndex();
449 viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
451 /* If we have a valid hosting LP, create a proc file system entry
452 * for config information
454 if (viopath_hostLp != HvLpIndexInvalid) {
455 iSeries_proc_callback(&vio_proc_init);
456 vio_setHandler(viomajorsubtype_config, handleConfig);
460 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
465 xSubtype & VIOMAJOR_SUBTYPE_MASK) >> VIOMAJOR_SUBTYPE_SHIFT;
467 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
468 remoteLp = event->xSourceLp;
469 /* The isActive is checked because if the hosting partition
470 * went down and came back up it would not be active but it would have
471 * different source and target instances, in which case we'd want to
472 * reset them. This case really protects against an unauthorized
473 * active partition sending interrupts or acks to this linux partition.
475 if (viopathStatus[remoteLp].isActive
476 && (event->xSourceInstanceId !=
477 viopathStatus[remoteLp].mTargetInst)) {
478 printk(KERN_WARNING_VIO
479 "message from invalid partition. "
480 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
481 viopathStatus[remoteLp].mTargetInst,
482 event->xSourceInstanceId);
486 if (viopathStatus[remoteLp].isActive
487 && (event->xTargetInstanceId !=
488 viopathStatus[remoteLp].mSourceInst)) {
489 printk(KERN_WARNING_VIO
490 "message from invalid partition. "
491 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
492 viopathStatus[remoteLp].mSourceInst,
493 event->xTargetInstanceId);
497 remoteLp = event->xTargetLp;
498 if (event->xSourceInstanceId !=
499 viopathStatus[remoteLp].mSourceInst) {
500 printk(KERN_WARNING_VIO
501 "message from invalid partition. "
502 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
503 viopathStatus[remoteLp].mSourceInst,
504 event->xSourceInstanceId);
508 if (event->xTargetInstanceId !=
509 viopathStatus[remoteLp].mTargetInst) {
510 printk(KERN_WARNING_VIO
511 "message from invalid partition. "
512 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
513 viopathStatus[remoteLp].mTargetInst,
514 event->xTargetInstanceId);
519 if (vio_handler[subtype] == NULL) {
520 printk(KERN_WARNING_VIO
521 "unexpected virtual io event subtype %d from partition %d\n",
522 event->xSubtype, remoteLp);
523 /* No handler. Ack if necessary
525 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
526 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
527 event->xRc = HvLpEvent_Rc_InvalidSubtype;
528 HvCallEvent_ackLpEvent(event);
533 /* This innocuous little line is where all the real work happens
535 (*vio_handler[subtype]) (event);
538 static void viopath_donealloc(void *parm, int number)
540 struct doneAllocParms_t *doneAllocParmsp =
541 (struct doneAllocParms_t *) parm;
542 doneAllocParmsp->number = number;
543 up(doneAllocParmsp->sem);
546 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
548 struct doneAllocParms_t doneAllocParms;
549 DECLARE_MUTEX_LOCKED(Semaphore);
550 doneAllocParms.sem = &Semaphore;
552 mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
554 &viopath_donealloc, &doneAllocParms);
558 return doneAllocParms.number;
561 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
565 void *tempEventBuffer = NULL;
566 int tempNumAllocated;
568 if ((remoteLp >= HvMaxArchitectedLps)
569 || (remoteLp == HvLpIndexInvalid))
572 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
573 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
577 * NOTE: If VIO_MAX_SUBTYPES exceeds 16 then we need
578 * to allocate more than one page for the event_buffer.
580 if (event_buffer[0] == NULL) {
581 if (VIO_MAX_SUBTYPES <= 16) {
583 (void *) get_free_page(GFP_KERNEL);
584 if (tempEventBuffer == NULL)
588 "VIO_MAX_SUBTYPES > 16. Need more space.");
593 spin_lock_irqsave(&statuslock, flags);
596 * OK...we can fit 16 maximum-sized events (256 bytes) in
599 if (event_buffer[0] == NULL) {
600 event_buffer[0] = tempEventBuffer;
601 atomic_set(&event_buffer_available[0], 1);
603 * Start at the second element because we've already
604 * set the pointer for the first element and set the
605 * pointers for every 256 bytes in the page we
608 for (i = 1; i < VIO_MAX_SUBTYPES; i++) {
609 event_buffer[i] = event_buffer[i - 1] + 256;
610 atomic_set(&event_buffer_available[i], 1);
614 * While we were fetching the pages, which shouldn't
615 * be done in a spin lock, another call to viopath_open
616 * decided to do the same thing and allocated storage
617 * and set the event_buffer before we could so we'll
618 * free the one that we allocated and continue with our
619 * viopath_open operation.
621 free_page((unsigned long) tempEventBuffer);
624 viopathStatus[remoteLp].users[subtype]++;
626 if (!viopathStatus[remoteLp].isOpen) {
627 viopathStatus[remoteLp].isOpen = 1;
628 HvCallEvent_openLpEventPath(remoteLp,
629 HvLpEvent_Type_VirtualIo);
631 spin_unlock_irqrestore(&statuslock, flags);
633 * Don't hold the spinlock during an operation that
636 tempNumAllocated = allocateEvents(remoteLp, 1);
637 spin_lock_irqsave(&statuslock, flags);
639 viopathStatus[remoteLp].numberAllocated +=
642 if (viopathStatus[remoteLp].numberAllocated == 0) {
643 HvCallEvent_closeLpEventPath(remoteLp,
644 HvLpEvent_Type_VirtualIo);
646 spin_unlock_irqrestore(&statuslock, flags);
650 viopathStatus[remoteLp].mSourceInst =
651 HvCallEvent_getSourceLpInstanceId(remoteLp,
652 HvLpEvent_Type_VirtualIo);
653 viopathStatus[remoteLp].mTargetInst =
654 HvCallEvent_getTargetLpInstanceId(remoteLp,
655 HvLpEvent_Type_VirtualIo);
657 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
660 sendMonMsg(remoteLp);
663 "Opening connection to partition %d, setting sinst %d, tinst %d\n",
665 viopathStatus[remoteLp].mSourceInst,
666 viopathStatus[remoteLp].mTargetInst);
669 spin_unlock_irqrestore(&statuslock, flags);
670 tempNumAllocated = allocateEvents(remoteLp, numReq);
671 spin_lock_irqsave(&statuslock, flags);
672 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
673 spin_unlock_irqrestore(&statuslock, flags);
678 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
683 struct doneAllocParms_t doneAllocParms;
684 DECLARE_MUTEX_LOCKED(Semaphore);
685 doneAllocParms.sem = &Semaphore;
687 if ((remoteLp >= HvMaxArchitectedLps)
688 || (remoteLp == HvLpIndexInvalid))
691 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
692 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
695 spin_lock_irqsave(&statuslock, flags);
697 * If the viopath_close somehow gets called before a
698 * viopath_open it could decrement to -1 which is a non
699 * recoverable state so we'll prevent this from
702 if (viopathStatus[remoteLp].users[subtype] > 0) {
703 viopathStatus[remoteLp].users[subtype]--;
705 spin_unlock_irqrestore(&statuslock, flags);
707 mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
708 numReq, &viopath_donealloc, &doneAllocParms);
711 spin_lock_irqsave(&statuslock, flags);
712 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) {
713 numOpen += viopathStatus[remoteLp].users[i];
716 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
718 "Closing connection to partition %d", remoteLp);
720 HvCallEvent_closeLpEventPath(remoteLp,
721 HvLpEvent_Type_VirtualIo);
722 viopathStatus[remoteLp].isOpen = 0;
723 viopathStatus[remoteLp].isActive = 0;
725 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
726 atomic_set(&event_buffer_available[i], 0);
730 * Precautionary check to make sure we don't
731 * erroneously try to free a page that wasn't
734 if (event_buffer[0] != NULL) {
735 free_page((unsigned long) event_buffer[0]);
736 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
737 event_buffer[i] = NULL;
742 spin_unlock_irqrestore(&statuslock, flags);
746 void *vio_get_event_buffer(int subtype)
748 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
749 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
752 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
753 return event_buffer[subtype];
758 void vio_free_event_buffer(int subtype, void *buffer)
760 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
761 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
762 printk(KERN_WARNING_VIO
763 "unexpected subtype %d freeing event buffer\n",
768 if (atomic_read(&event_buffer_available[subtype]) != 0) {
769 printk(KERN_WARNING_VIO
770 "freeing unallocated event buffer, subtype %d\n",
775 if (buffer != event_buffer[subtype]) {
776 printk(KERN_WARNING_VIO
777 "freeing invalid event buffer, subtype %d\n",
781 atomic_set(&event_buffer_available[subtype], 1);
784 static const struct vio_error_entry vio_no_error =
785 { 0, 0, "Non-VIO Error" };
786 static const struct vio_error_entry vio_unknown_error =
787 { 0, EIO, "Unknown Error" };
789 static const struct vio_error_entry vio_default_errors[] = {
790 {0x0001, EIO, "No Connection"},
791 {0x0002, EIO, "No Receiver"},
792 {0x0003, EIO, "No Buffer Available"},
793 {0x0004, EBADRQC, "Invalid Message Type"},
797 const struct vio_error_entry *vio_lookup_rc(const struct vio_error_entry
798 *local_table, u16 rc)
800 const struct vio_error_entry *cur;
802 return &vio_no_error;
804 for (cur = local_table; cur->rc; ++cur)
807 for (cur = vio_default_errors; cur->rc; ++cur)
810 return &vio_unknown_error;