4 * iSeries Virtual I/O Message Path code
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
10 * (C) Copyright 2000 IBM Corporation
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/config.h>
32 #include <asm/uaccess.h>
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/errno.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
38 #include <linux/proc_fs.h>
39 #include <linux/pci.h>
40 #include <linux/wait.h>
42 #include <asm/iSeries/HvLpEvent.h>
43 #include <asm/iSeries/HvLpConfig.h>
44 #include <asm/iSeries/HvCallCfg.h>
45 #include <asm/iSeries/mf.h>
46 #include <asm/iSeries/iSeries_proc.h>
50 EXPORT_SYMBOL(viopath_hostLp);
51 EXPORT_SYMBOL(vio_set_hostlp);
52 EXPORT_SYMBOL(viopath_open);
53 EXPORT_SYMBOL(viopath_close);
54 EXPORT_SYMBOL(viopath_isactive);
55 EXPORT_SYMBOL(viopath_sourceinst);
56 EXPORT_SYMBOL(viopath_targetinst);
57 EXPORT_SYMBOL(vio_setHandler);
58 EXPORT_SYMBOL(vio_clearHandler);
59 EXPORT_SYMBOL(vio_get_event_buffer);
60 EXPORT_SYMBOL(vio_free_event_buffer);
62 extern struct pci_dev * iSeries_vio_dev;
64 /* Status of the path to each other partition in the system.
65 * This is overkill, since we will only ever establish connections
66 * to our hosting partition and the primary partition on the system.
67 * But this allows for other support in the future.
69 static struct viopathStatus {
70 int isOpen:1; /* Did we open the path? */
71 int isActive:1; /* Do we have a mon msg outstanding */
72 int users[VIO_MAX_SUBTYPES];
73 HvLpInstanceId mSourceInst;
74 HvLpInstanceId mTargetInst;
76 } viopathStatus[HVMAXARCHITECTEDLPS];
78 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
81 * For each kind of event we allocate a buffer that is
82 * guaranteed not to cross a page boundary
84 static void *event_buffer[VIO_MAX_SUBTYPES];
85 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
87 static void handleMonitorEvent(struct HvLpEvent *event);
89 /* We use this structure to handle asynchronous responses. The caller
90 * blocks on the semaphore and the handler posts the semaphore.
92 struct doneAllocParms_t {
93 struct semaphore *sem;
97 /* Put a sequence number in each mon msg. The value is not
98 * important. Start at something other than 0 just for
99 * readability. wrapping this is ok.
101 static u8 viomonseq = 22;
103 /* Our hosting logical partition. We get this at startup
104 * time, and different modules access this variable directly.
106 HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */
108 /* For each kind of incoming event we set a pointer to a
111 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
113 /* Handle reads from the proc file system
115 static int proc_read(char *buf, char **start, off_t offset,
116 int blen, int *eof, void *data)
119 DECLARE_MUTEX_LOCKED(Semaphore);
121 pci_map_single(iSeries_vio_dev, buf, PAGE_SIZE, PCI_DMA_FROMDEVICE);
127 memset(buf, 0x00, len);
128 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
129 HvLpEvent_Type_VirtualIo,
130 viomajorsubtype_config |
132 HvLpEvent_AckInd_DoAck,
133 HvLpEvent_AckType_ImmediateAck,
138 (u64) (unsigned long)
139 &Semaphore, VIOVERSION << 16,
140 ((u64) dmaa) << 32, len, 0,
142 if (hvrc != HvLpEvent_Rc_Good) {
143 printk("viopath hv error on op %d\n", (int) hvrc);
148 pci_unmap_single(iSeries_vio_dev, dmaa, PAGE_SIZE, PCI_DMA_FROMDEVICE);
154 /* Handle writes to our proc file system
156 static int proc_write(struct file *file, const char *buffer,
157 unsigned long count, void *data)
159 /* Doesn't do anything today!!!
164 /* setup our proc file system entries
166 static void vio_proc_init(struct proc_dir_entry *iSeries_proc)
168 struct proc_dir_entry *ent;
169 ent = create_proc_entry("config", S_IFREG | S_IRUSR, iSeries_proc);
174 ent->read_proc = proc_read;
175 ent->write_proc = proc_write;
178 /* See if a given LP is active. Allow for invalid lps to be passed in
179 * and just return invalid
181 int viopath_isactive(HvLpIndex lp)
183 if (lp == HvLpIndexInvalid)
185 if (lp < HVMAXARCHITECTEDLPS)
186 return viopathStatus[lp].isActive;
191 /* We cache the source and target instance ids for each
194 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
196 return viopathStatus[lp].mSourceInst;
199 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
201 return viopathStatus[lp].mTargetInst;
204 /* Send a monitor message. This is a message with the acknowledge
205 * bit on that the other side will NOT explicitly acknowledge. When
206 * the other side goes down, the hypervisor will acknowledge any
207 * outstanding messages....so we will know when the other side dies.
209 static void sendMonMsg(HvLpIndex remoteLp)
213 viopathStatus[remoteLp].mSourceInst =
214 HvCallEvent_getSourceLpInstanceId(remoteLp,
215 HvLpEvent_Type_VirtualIo);
216 viopathStatus[remoteLp].mTargetInst =
217 HvCallEvent_getTargetLpInstanceId(remoteLp,
218 HvLpEvent_Type_VirtualIo);
220 /* Deliberately ignore the return code here. if we call this
221 * more than once, we don't care.
223 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
225 hvrc = HvCallEvent_signalLpEventFast(remoteLp,
226 HvLpEvent_Type_VirtualIo,
227 viomajorsubtype_monitor,
228 HvLpEvent_AckInd_DoAck,
229 HvLpEvent_AckType_DeferredAck,
230 viopathStatus[remoteLp].
232 viopathStatus[remoteLp].
233 mTargetInst, viomonseq++,
236 if (hvrc == HvLpEvent_Rc_Good) {
237 viopathStatus[remoteLp].isActive = 1;
239 printk(KERN_WARNING_VIO
240 "could not connect ot partition %d\n", remoteLp);
241 viopathStatus[remoteLp].isActive = 0;
245 static void handleMonitorEvent(struct HvLpEvent *event)
250 /* First see if this is just a normal monitor message from the
253 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
254 remoteLp = event->xSourceLp;
255 if (!viopathStatus[remoteLp].isActive)
256 sendMonMsg(remoteLp);
260 /* This path is for an acknowledgement; the other partition
263 remoteLp = event->xTargetLp;
264 if ((event->xSourceInstanceId !=
265 viopathStatus[remoteLp].mSourceInst)
266 || (event->xTargetInstanceId !=
267 viopathStatus[remoteLp].mTargetInst)) {
268 printk(KERN_WARNING_VIO
269 "ignoring ack....mismatched instances\n");
273 printk(KERN_WARNING_VIO "partition %d ended\n", remoteLp);
275 viopathStatus[remoteLp].isActive = 0;
277 /* For each active handler, pass them a NULL
278 * message to indicate that the other partition
281 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
282 if (vio_handler[i] != NULL)
283 (*vio_handler[i]) (NULL);
287 int vio_setHandler(int subtype, vio_event_handler_t * beh)
289 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
291 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
294 if (vio_handler[subtype] != NULL)
297 vio_handler[subtype] = beh;
301 int vio_clearHandler(int subtype)
303 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
305 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
308 if (vio_handler[subtype] == NULL)
311 vio_handler[subtype] = NULL;
315 static void handleConfig(struct HvLpEvent *event)
317 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
318 printk(KERN_WARNING_VIO
319 "unexpected config request from partition %d",
322 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
323 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
324 event->xRc = HvLpEvent_Rc_InvalidSubtype;
325 HvCallEvent_ackLpEvent(event);
330 up((struct semaphore *)(unsigned long) event->xCorrelationToken);
333 /* Initialization of the hosting partition
335 void vio_set_hostlp(void)
337 /* If this has already been set then we DON'T want to either change
338 * it or re-register the proc file system
340 if (viopath_hostLp != HvLpIndexInvalid)
343 /* Figure out our hosting partition. This isn't allowed to change
347 HvCallCfg_getHostingLpIndex(HvLpConfig_getLpIndex());
349 /* If we have a valid hosting LP, create a proc file system entry
350 * for config information
352 if (viopath_hostLp != HvLpIndexInvalid) {
353 iSeries_proc_callback(&vio_proc_init);
354 vio_setHandler(viomajorsubtype_config, handleConfig);
358 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
363 xSubtype & VIOMAJOR_SUBTYPE_MASK) >> VIOMAJOR_SUBTYPE_SHIFT;
365 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
366 remoteLp = event->xSourceLp;
367 if (event->xSourceInstanceId !=
368 viopathStatus[remoteLp].mTargetInst) {
369 printk(KERN_WARNING_VIO
370 "message from invalid partition. "
371 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
372 viopathStatus[remoteLp].mTargetInst,
373 event->xSourceInstanceId);
377 if (event->xTargetInstanceId !=
378 viopathStatus[remoteLp].mSourceInst) {
379 printk(KERN_WARNING_VIO
380 "message from invalid partition. "
381 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
382 viopathStatus[remoteLp].mSourceInst,
383 event->xTargetInstanceId);
387 remoteLp = event->xTargetLp;
388 if (event->xSourceInstanceId !=
389 viopathStatus[remoteLp].mSourceInst) {
390 printk(KERN_WARNING_VIO
391 "message from invalid partition. "
392 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
393 viopathStatus[remoteLp].mSourceInst,
394 event->xSourceInstanceId);
398 if (event->xTargetInstanceId !=
399 viopathStatus[remoteLp].mTargetInst) {
400 printk(KERN_WARNING_VIO
401 "message from invalid partition. "
402 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
403 viopathStatus[remoteLp].mTargetInst,
404 event->xTargetInstanceId);
409 if (vio_handler[subtype] == NULL) {
410 printk(KERN_WARNING_VIO
411 "unexpected virtual io event subtype %d from partition %d\n",
412 event->xSubtype, remoteLp);
413 /* No handler. Ack if necessary
415 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
416 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
417 event->xRc = HvLpEvent_Rc_InvalidSubtype;
418 HvCallEvent_ackLpEvent(event);
423 /* This inocuous little line is where all the real work happens
425 (*vio_handler[subtype]) (event);
428 static void viopath_donealloc(void *parm, int number)
430 struct doneAllocParms_t *doneAllocParmsp =
431 (struct doneAllocParms_t *) parm;
432 doneAllocParmsp->number = number;
433 up(doneAllocParmsp->sem);
436 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
438 struct doneAllocParms_t doneAllocParms;
439 DECLARE_MUTEX_LOCKED(Semaphore);
440 doneAllocParms.sem = &Semaphore;
442 mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
444 &viopath_donealloc, &doneAllocParms);
448 return doneAllocParms.number;
451 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
456 if ((remoteLp >= HvMaxArchitectedLps)
457 || (remoteLp == HvLpIndexInvalid))
460 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
461 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
464 spin_lock_irqsave(&statuslock, flags);
466 /* OK...we can fit 4 maximum-sized events (256 bytes) in
467 * each page (4096). Get a new page every 4
469 if (event_buffer[0] == NULL) {
470 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
473 (void *) get_free_page(GFP_KERNEL);
474 if (event_buffer[i] == NULL) {
475 spin_unlock_irqrestore(&statuslock, flags);
480 event_buffer[i - 1] + 256;
482 atomic_set(&event_buffer_available[i], 1);
486 viopathStatus[remoteLp].users[subtype]++;
488 if (!viopathStatus[remoteLp].isOpen) {
489 HvCallEvent_openLpEventPath(remoteLp,
490 HvLpEvent_Type_VirtualIo);
492 viopathStatus[remoteLp].numberAllocated +=
493 allocateEvents(remoteLp, 1);
495 if (viopathStatus[remoteLp].numberAllocated == 0) {
496 HvCallEvent_closeLpEventPath(remoteLp,
497 HvLpEvent_Type_VirtualIo);
499 spin_unlock_irqrestore(&statuslock, flags);
503 viopathStatus[remoteLp].mSourceInst =
504 HvCallEvent_getSourceLpInstanceId(remoteLp,
505 HvLpEvent_Type_VirtualIo);
506 viopathStatus[remoteLp].mTargetInst =
507 HvCallEvent_getTargetLpInstanceId(remoteLp,
508 HvLpEvent_Type_VirtualIo);
510 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
513 viopathStatus[remoteLp].isOpen = 1;
515 sendMonMsg(remoteLp);
518 "Opening connection to partition %d, setting sinst %d, tinst %d\n",
520 viopathStatus[remoteLp].mSourceInst,
521 viopathStatus[remoteLp].mTargetInst);
524 viopathStatus[remoteLp].numberAllocated +=
525 allocateEvents(remoteLp, numReq);
526 spin_unlock_irqrestore(&statuslock, flags);
531 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
536 struct doneAllocParms_t doneAllocParms;
537 DECLARE_MUTEX_LOCKED(Semaphore);
538 doneAllocParms.sem = &Semaphore;
540 if ((remoteLp >= HvMaxArchitectedLps)
541 || (remoteLp == HvLpIndexInvalid))
544 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
545 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
548 spin_lock_irqsave(&statuslock, flags);
550 viopathStatus[remoteLp].users[subtype]--;
552 mf_deallocateLpEvents( remoteLp,HvLpEvent_Type_VirtualIo,
558 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) {
559 numOpen += viopathStatus[remoteLp].users[i];
562 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
564 "Closing connection to partition %d", remoteLp);
566 HvCallEvent_closeLpEventPath(remoteLp,
567 HvLpEvent_Type_VirtualIo);
568 viopathStatus[remoteLp].isOpen = 0;
569 viopathStatus[remoteLp].isActive = 0;
571 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
572 atomic_set(&event_buffer_available[i], 0);
574 for (i = 0; i < VIO_MAX_SUBTYPES; i += 4) {
575 free_page((unsigned long) event_buffer[i]);
580 spin_unlock_irqrestore(&statuslock, flags);
584 void *vio_get_event_buffer(int subtype)
586 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
587 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
590 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
591 return event_buffer[subtype];
596 void vio_free_event_buffer(int subtype, void *buffer)
598 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
599 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
600 printk(KERN_WARNING_VIO
601 "unexpected subtype %d freeing event buffer\n",
606 if (atomic_read(&event_buffer_available[subtype]) != 0) {
607 printk(KERN_WARNING_VIO
608 "freeing unallocated event buffer, subtype %d\n",
613 if (buffer != event_buffer[subtype]) {
614 printk(KERN_WARNING_VIO
615 "freeing invalid event buffer, subtype %d\n",
619 atomic_set(&event_buffer_available[subtype], 1);