more changes on original files
[linux-2.4.git] / arch / ppc64 / kernel / viopath.c
1 /* -*- linux-c -*-
2  *  arch/ppc64/viopath.c
3  *
4  *  iSeries Virtual I/O Message Path code
5  *
6  *  Authors: Dave Boutcher <boutcher@us.ibm.com>
7  *           Ryan Arnold <ryanarn@us.ibm.com>
8  *           Colin Devilbiss <devilbis@us.ibm.com>
9  *
10  * (C) Copyright 2000 IBM Corporation
11  *
12  * This code is used by the iSeries virtual disk, cd,
13  * tape, and console to communicate with OS/400 in another
14  * partition.
15  *
16  * This program is free software;  you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License as
18  * published by the Free Software Foundation; either version 2 of the
19  * License, or (at your option) anyu later version.
20  *
21  * This program is distributed in the hope that it will be useful, but
22  * WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24  * General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, write to the Free Software Foundation,
28  * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29  *
30  */
31 #include <linux/config.h>
32 #include <asm/uaccess.h>
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/errno.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
38 #include <linux/proc_fs.h>
39 #include <linux/pci.h>
40 #include <linux/wait.h>
41
42 #include <asm/iSeries/LparData.h>
43 #include <asm/iSeries/HvLpEvent.h>
44 #include <asm/iSeries/HvLpConfig.h>
45 #include <asm/iSeries/HvCallCfg.h>
46 #include <asm/iSeries/mf.h>
47 #include <asm/iSeries/iSeries_proc.h>
48 #include <asm/iSeries/vio.h>
49
50 EXPORT_SYMBOL(viopath_hostLp);
51 EXPORT_SYMBOL(viopath_ourLp);
52 EXPORT_SYMBOL(vio_set_hostlp);
53 EXPORT_SYMBOL(vio_lookup_rc);
54 EXPORT_SYMBOL(viopath_open);
55 EXPORT_SYMBOL(viopath_close);
56 EXPORT_SYMBOL(viopath_isactive);
57 EXPORT_SYMBOL(viopath_sourceinst);
58 EXPORT_SYMBOL(viopath_targetinst);
59 EXPORT_SYMBOL(vio_setHandler);
60 EXPORT_SYMBOL(vio_clearHandler);
61 EXPORT_SYMBOL(vio_get_event_buffer);
62 EXPORT_SYMBOL(vio_free_event_buffer);
63
64 extern struct pci_dev *iSeries_vio_dev;
65
66 /* Status of the path to each other partition in the system.
67  * This is overkill, since we will only ever establish connections
68  * to our hosting partition and the primary partition on the system.
69  * But this allows for other support in the future.
70  */
71 static struct viopathStatus {
72         int isOpen:1;           /* Did we open the path?            */
73         int isActive:1;         /* Do we have a mon msg outstanding */
74         int users[VIO_MAX_SUBTYPES];
75         HvLpInstanceId mSourceInst;
76         HvLpInstanceId mTargetInst;
77         int numberAllocated;
78 } viopathStatus[HVMAXARCHITECTEDLPS];
79
80 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
81
82 /*
83  * For each kind of event we allocate a buffer that is
84  * guaranteed not to cross a page boundary
85  */
86 static void *event_buffer[VIO_MAX_SUBTYPES] = { };
87 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES] = { };
88
89 static void handleMonitorEvent(struct HvLpEvent *event);
90
91 /* We use this structure to handle asynchronous responses.  The caller
92  * blocks on the semaphore and the handler posts the semaphore.
93  */
94 struct doneAllocParms_t {
95         struct semaphore *sem;
96         int number;
97 };
98
99 /* Put a sequence number in each mon msg.  The value is not
100  * important.  Start at something other than 0 just for
101  * readability.  wrapping this is ok.
102  */
103 static u8 viomonseq = 22;
104
105 /* Our hosting logical partition.  We get this at startup
106  * time, and different modules access this variable directly.
107  */
108 HvLpIndex viopath_hostLp = 0xff;        /* HvLpIndexInvalid */
109 HvLpIndex viopath_ourLp = 0xff;
110
111 /* For each kind of incoming event we set a pointer to a
112  * routine to call.
113  */
114 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
115
116 static unsigned char e2a(unsigned char x)
117 {
118         switch (x) {
119         case 0xF0:
120                 return '0';
121         case 0xF1:
122                 return '1';
123         case 0xF2:
124                 return '2';
125         case 0xF3:
126                 return '3';
127         case 0xF4:
128                 return '4';
129         case 0xF5:
130                 return '5';
131         case 0xF6:
132                 return '6';
133         case 0xF7:
134                 return '7';
135         case 0xF8:
136                 return '8';
137         case 0xF9:
138                 return '9';
139         case 0xC1:
140                 return 'A';
141         case 0xC2:
142                 return 'B';
143         case 0xC3:
144                 return 'C';
145         case 0xC4:
146                 return 'D';
147         case 0xC5:
148                 return 'E';
149         case 0xC6:
150                 return 'F';
151         case 0xC7:
152                 return 'G';
153         case 0xC8:
154                 return 'H';
155         case 0xC9:
156                 return 'I';
157         case 0xD1:
158                 return 'J';
159         case 0xD2:
160                 return 'K';
161         case 0xD3:
162                 return 'L';
163         case 0xD4:
164                 return 'M';
165         case 0xD5:
166                 return 'N';
167         case 0xD6:
168                 return 'O';
169         case 0xD7:
170                 return 'P';
171         case 0xD8:
172                 return 'Q';
173         case 0xD9:
174                 return 'R';
175         case 0xE2:
176                 return 'S';
177         case 0xE3:
178                 return 'T';
179         case 0xE4:
180                 return 'U';
181         case 0xE5:
182                 return 'V';
183         case 0xE6:
184                 return 'W';
185         case 0xE7:
186                 return 'X';
187         case 0xE8:
188                 return 'Y';
189         case 0xE9:
190                 return 'Z';
191         }
192         return ' ';
193 }
194
195 /* Handle reads from the proc file system
196  */
197 static int proc_read(char *buf, char **start, off_t offset,
198                      int blen, int *eof, void *data)
199 {
200         HvLpEvent_Rc hvrc;
201         DECLARE_MUTEX_LOCKED(Semaphore);
202         dma_addr_t dmaa =
203             pci_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
204                            PCI_DMA_FROMDEVICE);
205         int len = PAGE_SIZE;
206
207         if (len > blen)
208                 len = blen;
209
210         memset(buf, 0x00, len);
211         hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
212                                              HvLpEvent_Type_VirtualIo,
213                                              viomajorsubtype_config |
214                                              vioconfigget,
215                                              HvLpEvent_AckInd_DoAck,
216                                              HvLpEvent_AckType_ImmediateAck,
217                                              viopath_sourceinst
218                                              (viopath_hostLp),
219                                              viopath_targetinst
220                                              (viopath_hostLp),
221                                              (u64) (unsigned long)
222                                              &Semaphore, VIOVERSION << 16,
223                                              ((u64) dmaa) << 32, len, 0,
224                                              0);
225         if (hvrc != HvLpEvent_Rc_Good) {
226                 printk("viopath hv error on op %d\n", (int) hvrc);
227         }
228
229         down(&Semaphore);
230
231         pci_unmap_single(iSeries_vio_dev, dmaa, PAGE_SIZE,
232                          PCI_DMA_FROMDEVICE);
233
234         sprintf(buf + strlen(buf), "SRLNBR=");
235         buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[2]);
236         buf[strlen(buf)] = e2a(xItExtVpdPanel.mfgID[3]);
237         buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[1]);
238         buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[2]);
239         buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[3]);
240         buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[4]);
241         buf[strlen(buf)] = e2a(xItExtVpdPanel.systemSerial[5]);
242         buf[strlen(buf)] = '\n';
243         *eof = 1;
244         return strlen(buf);
245 }
246
247 /* Handle writes to our proc file system
248  */
249 static int proc_write(struct file *file, const char *buffer,
250                       unsigned long count, void *data)
251 {
252         /* Doesn't do anything today!!!
253          */
254         return count;
255 }
256
257 /* setup our proc file system entries
258  */
259 static void vio_proc_init(struct proc_dir_entry *iSeries_proc)
260 {
261         struct proc_dir_entry *ent;
262         ent = create_proc_entry("config", S_IFREG | S_IRUSR, iSeries_proc);
263         if (!ent)
264                 return;
265         ent->nlink = 1;
266         ent->data = NULL;
267         ent->read_proc = proc_read;
268         ent->write_proc = proc_write;
269 }
270
271 /* See if a given LP is active.  Allow for invalid lps to be passed in
272  * and just return invalid
273  */
274 int viopath_isactive(HvLpIndex lp)
275 {
276         if (lp == HvLpIndexInvalid)
277                 return 0;
278         if (lp < HVMAXARCHITECTEDLPS)
279                 return viopathStatus[lp].isActive;
280         else
281                 return 0;
282 }
283
284 /* We cache the source and target instance ids for each
285  * partition.  
286  */
287 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
288 {
289         return viopathStatus[lp].mSourceInst;
290 }
291
292 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
293 {
294         return viopathStatus[lp].mTargetInst;
295 }
296
297 /* Send a monitor message.  This is a message with the acknowledge
298  * bit on that the other side will NOT explicitly acknowledge.  When
299  * the other side goes down, the hypervisor will acknowledge any
300  * outstanding messages....so we will know when the other side dies.
301  */
302 static void sendMonMsg(HvLpIndex remoteLp)
303 {
304         HvLpEvent_Rc hvrc;
305
306         viopathStatus[remoteLp].mSourceInst =
307             HvCallEvent_getSourceLpInstanceId(remoteLp,
308                                               HvLpEvent_Type_VirtualIo);
309         viopathStatus[remoteLp].mTargetInst =
310             HvCallEvent_getTargetLpInstanceId(remoteLp,
311                                               HvLpEvent_Type_VirtualIo);
312
313         /* Deliberately ignore the return code here.  if we call this
314          * more than once, we don't care.
315          */
316         vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
317
318         hvrc = HvCallEvent_signalLpEventFast(remoteLp,
319                                              HvLpEvent_Type_VirtualIo,
320                                              viomajorsubtype_monitor,
321                                              HvLpEvent_AckInd_DoAck,
322                                              HvLpEvent_AckType_DeferredAck,
323                                              viopathStatus[remoteLp].
324                                              mSourceInst,
325                                              viopathStatus[remoteLp].
326                                              mTargetInst, viomonseq++,
327                                              0, 0, 0, 0, 0);
328
329         if (hvrc == HvLpEvent_Rc_Good) {
330                 viopathStatus[remoteLp].isActive = 1;
331         } else {
332                 printk(KERN_WARNING_VIO
333                        "could not connect to partition %d\n", remoteLp);
334                 viopathStatus[remoteLp].isActive = 0;
335         }
336 }
337
338 static void handleMonitorEvent(struct HvLpEvent *event)
339 {
340         HvLpIndex remoteLp;
341         int i;
342
343         /* This handler is _also_ called as part of the loop
344          * at the end of this routine, so it must be able to
345          * ignore NULL events...
346          */
347         if (!event)
348                 return;
349
350         /* First see if this is just a normal monitor message from the
351          * other partition
352          */
353         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
354                 remoteLp = event->xSourceLp;
355                 if (!viopathStatus[remoteLp].isActive)
356                         sendMonMsg(remoteLp);
357                 return;
358         }
359
360         /* This path is for an acknowledgement; the other partition
361          * died
362          */
363         remoteLp = event->xTargetLp;
364         if ((event->xSourceInstanceId !=
365              viopathStatus[remoteLp].mSourceInst)
366             || (event->xTargetInstanceId !=
367                 viopathStatus[remoteLp].mTargetInst)) {
368                 printk(KERN_WARNING_VIO
369                        "ignoring ack....mismatched instances\n");
370                 return;
371         }
372
373         printk(KERN_WARNING_VIO "partition %d ended\n", remoteLp);
374
375         viopathStatus[remoteLp].isActive = 0;
376
377         /* For each active handler, pass them a NULL
378          * message to indicate that the other partition
379          * died
380          */
381         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
382                 if (vio_handler[i] != NULL)
383                         (*vio_handler[i]) (NULL);
384         }
385 }
386
387 int vio_setHandler(int subtype, vio_event_handler_t * beh)
388 {
389         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
390
391         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
392                 return -EINVAL;
393
394         if (vio_handler[subtype] != NULL)
395                 return -EBUSY;
396
397         vio_handler[subtype] = beh;
398         return 0;
399 }
400
401 int vio_clearHandler(int subtype)
402 {
403         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
404
405         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
406                 return -EINVAL;
407
408         if (vio_handler[subtype] == NULL)
409                 return -EAGAIN;
410
411         vio_handler[subtype] = NULL;
412         return 0;
413 }
414
415 static void handleConfig(struct HvLpEvent *event)
416 {
417         if (!event)
418                 return;
419         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
420                 printk(KERN_WARNING_VIO
421                        "unexpected config request from partition %d",
422                        event->xSourceLp);
423
424                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
425                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
426                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
427                         HvCallEvent_ackLpEvent(event);
428                 }
429                 return;
430         }
431
432         up((struct semaphore *) event->xCorrelationToken);
433 }
434
435 /* Initialization of the hosting partition
436  */
437 void vio_set_hostlp(void)
438 {
439         /* If this has already been set then we DON'T want to either change
440          * it or re-register the proc file system
441          */
442         if (viopath_hostLp != HvLpIndexInvalid)
443                 return;
444
445         /* Figure out our hosting partition.  This isn't allowed to change
446          * while we're active
447          */
448         viopath_ourLp = HvLpConfig_getLpIndex();
449         viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);
450
451         /* If we have a valid hosting LP, create a proc file system entry
452          * for config information
453          */
454         if (viopath_hostLp != HvLpIndexInvalid) {
455                 iSeries_proc_callback(&vio_proc_init);
456                 vio_setHandler(viomajorsubtype_config, handleConfig);
457         }
458 }
459
460 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
461 {
462         HvLpIndex remoteLp;
463         int subtype =
464             (event->
465              xSubtype & VIOMAJOR_SUBTYPE_MASK) >> VIOMAJOR_SUBTYPE_SHIFT;
466
467         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
468                 remoteLp = event->xSourceLp;
469                 /* The isActive is checked because if the hosting partition
470                  * went down and came back up it would not be active but it would have
471                  * different source and target instances, in which case we'd want to
472                  * reset them.  This case really protects against an unauthorized
473                  * active partition sending interrupts or acks to this linux partition.
474                  */
475                 if (viopathStatus[remoteLp].isActive
476                     && (event->xSourceInstanceId !=
477                         viopathStatus[remoteLp].mTargetInst)) {
478                         printk(KERN_WARNING_VIO
479                                "message from invalid partition. "
480                                "int msg rcvd, source inst (%d) doesnt match (%d)\n",
481                                viopathStatus[remoteLp].mTargetInst,
482                                event->xSourceInstanceId);
483                         return;
484                 }
485
486                 if (viopathStatus[remoteLp].isActive
487                     && (event->xTargetInstanceId !=
488                         viopathStatus[remoteLp].mSourceInst)) {
489                         printk(KERN_WARNING_VIO
490                                "message from invalid partition. "
491                                "int msg rcvd, target inst (%d) doesnt match (%d)\n",
492                                viopathStatus[remoteLp].mSourceInst,
493                                event->xTargetInstanceId);
494                         return;
495                 }
496         } else {
497                 remoteLp = event->xTargetLp;
498                 if (event->xSourceInstanceId !=
499                     viopathStatus[remoteLp].mSourceInst) {
500                         printk(KERN_WARNING_VIO
501                                "message from invalid partition. "
502                                "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
503                                viopathStatus[remoteLp].mSourceInst,
504                                event->xSourceInstanceId);
505                         return;
506                 }
507
508                 if (event->xTargetInstanceId !=
509                     viopathStatus[remoteLp].mTargetInst) {
510                         printk(KERN_WARNING_VIO
511                                "message from invalid partition. "
512                                "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
513                                viopathStatus[remoteLp].mTargetInst,
514                                event->xTargetInstanceId);
515                         return;
516                 }
517         }
518
519         if (vio_handler[subtype] == NULL) {
520                 printk(KERN_WARNING_VIO
521                        "unexpected virtual io event subtype %d from partition %d\n",
522                        event->xSubtype, remoteLp);
523                 /* No handler.  Ack if necessary
524                  */
525                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
526                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
527                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
528                         HvCallEvent_ackLpEvent(event);
529                 }
530                 return;
531         }
532
533         /* This innocuous little line is where all the real work happens
534          */
535         (*vio_handler[subtype]) (event);
536 }
537
538 static void viopath_donealloc(void *parm, int number)
539 {
540         struct doneAllocParms_t *doneAllocParmsp =
541             (struct doneAllocParms_t *) parm;
542         doneAllocParmsp->number = number;
543         up(doneAllocParmsp->sem);
544 }
545
546 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
547 {
548         struct doneAllocParms_t doneAllocParms;
549         DECLARE_MUTEX_LOCKED(Semaphore);
550         doneAllocParms.sem = &Semaphore;
551
552         mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250,    /* It would be nice to put a real number here! */
553                             numEvents,
554                             &viopath_donealloc, &doneAllocParms);
555
556         down(&Semaphore);
557
558         return doneAllocParms.number;
559 }
560
561 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
562 {
563         int i;
564         unsigned long flags;
565         void *tempEventBuffer = NULL;
566         int tempNumAllocated;
567
568         if ((remoteLp >= HvMaxArchitectedLps)
569             || (remoteLp == HvLpIndexInvalid))
570                 return -EINVAL;
571
572         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
573         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
574                 return -EINVAL;
575
576         /*
577          * NOTE: If VIO_MAX_SUBTYPES exceeds 16 then we need
578          * to allocate more than one page for the event_buffer.
579          */
580         if (event_buffer[0] == NULL) {
581                 if (VIO_MAX_SUBTYPES <= 16) {
582                         tempEventBuffer =
583                             (void *) get_free_page(GFP_KERNEL);
584                         if (tempEventBuffer == NULL)
585                                 return -ENOMEM;
586                 } else {
587                         printk(KERN_INFO_VIO
588                                "VIO_MAX_SUBTYPES > 16. Need more space.");
589                         return -ENOMEM;
590                 }
591         }
592
593         spin_lock_irqsave(&statuslock, flags);
594
595         /*
596          * OK...we can fit 16 maximum-sized events (256 bytes) in
597          * each page (4096).
598          */
599         if (event_buffer[0] == NULL) {
600                 event_buffer[0] = tempEventBuffer;
601                 atomic_set(&event_buffer_available[0], 1);
602                 /*
603                  * Start at the second element because we've already
604                  * set the pointer for the first element and set the
605                  * pointers for every 256 bytes in the page we
606                  * allocated earlier.
607                  */
608                 for (i = 1; i < VIO_MAX_SUBTYPES; i++) {
609                         event_buffer[i] = event_buffer[i - 1] + 256;
610                         atomic_set(&event_buffer_available[i], 1);
611                 }
612         } else {
613                 /*
614                  * While we were fetching the pages, which shouldn't
615                  * be done in a spin lock, another call to viopath_open
616                  * decided to do the same thing and allocated storage
617                  * and set the event_buffer before we could so we'll
618                  * free the one that we allocated and continue with our
619                  * viopath_open operation.
620                  */
621                 free_page((unsigned long) tempEventBuffer);
622         }
623
624         viopathStatus[remoteLp].users[subtype]++;
625
626         if (!viopathStatus[remoteLp].isOpen) {
627                 viopathStatus[remoteLp].isOpen = 1;
628                 HvCallEvent_openLpEventPath(remoteLp,
629                                             HvLpEvent_Type_VirtualIo);
630
631                 spin_unlock_irqrestore(&statuslock, flags);
632                 /*
633                  * Don't hold the spinlock during an operation that
634                  * can sleep.
635                  */
636                 tempNumAllocated = allocateEvents(remoteLp, 1);
637                 spin_lock_irqsave(&statuslock, flags);
638
639                 viopathStatus[remoteLp].numberAllocated +=
640                     tempNumAllocated;
641
642                 if (viopathStatus[remoteLp].numberAllocated == 0) {
643                         HvCallEvent_closeLpEventPath(remoteLp,
644                                                      HvLpEvent_Type_VirtualIo);
645
646                         spin_unlock_irqrestore(&statuslock, flags);
647                         return -ENOMEM;
648                 }
649
650                 viopathStatus[remoteLp].mSourceInst =
651                     HvCallEvent_getSourceLpInstanceId(remoteLp,
652                                                       HvLpEvent_Type_VirtualIo);
653                 viopathStatus[remoteLp].mTargetInst =
654                     HvCallEvent_getTargetLpInstanceId(remoteLp,
655                                                       HvLpEvent_Type_VirtualIo);
656
657                 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
658                                           &vio_handleEvent);
659
660                 sendMonMsg(remoteLp);
661
662                 printk(KERN_INFO_VIO
663                        "Opening connection to partition %d, setting sinst %d, tinst %d\n",
664                        remoteLp,
665                        viopathStatus[remoteLp].mSourceInst,
666                        viopathStatus[remoteLp].mTargetInst);
667         }
668
669         spin_unlock_irqrestore(&statuslock, flags);
670         tempNumAllocated = allocateEvents(remoteLp, numReq);
671         spin_lock_irqsave(&statuslock, flags);
672         viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
673         spin_unlock_irqrestore(&statuslock, flags);
674
675         return 0;
676 }
677
678 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
679 {
680         unsigned long flags;
681         int i;
682         int numOpen;
683         struct doneAllocParms_t doneAllocParms;
684         DECLARE_MUTEX_LOCKED(Semaphore);
685         doneAllocParms.sem = &Semaphore;
686
687         if ((remoteLp >= HvMaxArchitectedLps)
688             || (remoteLp == HvLpIndexInvalid))
689                 return -EINVAL;
690
691         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
692         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
693                 return -EINVAL;
694
695         spin_lock_irqsave(&statuslock, flags);
696         /*
697          * If the viopath_close somehow gets called before a
698          * viopath_open it could decrement to -1 which is a non
699          * recoverable state so we'll prevent this from
700          * happening.
701          */
702         if (viopathStatus[remoteLp].users[subtype] > 0) {
703                 viopathStatus[remoteLp].users[subtype]--;
704         }
705         spin_unlock_irqrestore(&statuslock, flags);
706
707         mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,
708                               numReq, &viopath_donealloc, &doneAllocParms);
709         down(&Semaphore);
710
711         spin_lock_irqsave(&statuslock, flags);
712         for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) {
713                 numOpen += viopathStatus[remoteLp].users[i];
714         }
715
716         if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
717                 printk(KERN_INFO_VIO
718                        "Closing connection to partition %d", remoteLp);
719
720                 HvCallEvent_closeLpEventPath(remoteLp,
721                                              HvLpEvent_Type_VirtualIo);
722                 viopathStatus[remoteLp].isOpen = 0;
723                 viopathStatus[remoteLp].isActive = 0;
724
725                 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
726                         atomic_set(&event_buffer_available[i], 0);
727                 }
728
729                 /*
730                  * Precautionary check to make sure we don't
731                  * erroneously try to free a page that wasn't
732                  * allocated.
733                  */
734                 if (event_buffer[0] != NULL) {
735                         free_page((unsigned long) event_buffer[0]);
736                         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
737                                 event_buffer[i] = NULL;
738                         }
739                 }
740
741         }
742         spin_unlock_irqrestore(&statuslock, flags);
743         return 0;
744 }
745
746 void *vio_get_event_buffer(int subtype)
747 {
748         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
749         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
750                 return NULL;
751
752         if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
753                 return event_buffer[subtype];
754         else
755                 return NULL;
756 }
757
758 void vio_free_event_buffer(int subtype, void *buffer)
759 {
760         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
761         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
762                 printk(KERN_WARNING_VIO
763                        "unexpected subtype %d freeing event buffer\n",
764                        subtype);
765                 return;
766         }
767
768         if (atomic_read(&event_buffer_available[subtype]) != 0) {
769                 printk(KERN_WARNING_VIO
770                        "freeing unallocated event buffer, subtype %d\n",
771                        subtype);
772                 return;
773         }
774
775         if (buffer != event_buffer[subtype]) {
776                 printk(KERN_WARNING_VIO
777                        "freeing invalid event buffer, subtype %d\n",
778                        subtype);
779         }
780
781         atomic_set(&event_buffer_available[subtype], 1);
782 }
783
784 static const struct vio_error_entry vio_no_error =
785     { 0, 0, "Non-VIO Error" };
786 static const struct vio_error_entry vio_unknown_error =
787     { 0, EIO, "Unknown Error" };
788
789 static const struct vio_error_entry vio_default_errors[] = {
790         {0x0001, EIO, "No Connection"},
791         {0x0002, EIO, "No Receiver"},
792         {0x0003, EIO, "No Buffer Available"},
793         {0x0004, EBADRQC, "Invalid Message Type"},
794         {0x0000, 0, NULL},
795 };
796
797 const struct vio_error_entry *vio_lookup_rc(const struct vio_error_entry
798                                             *local_table, u16 rc)
799 {
800         const struct vio_error_entry *cur;
801         if (!rc)
802                 return &vio_no_error;
803         if (local_table)
804                 for (cur = local_table; cur->rc; ++cur)
805                         if (cur->rc == rc)
806                                 return cur;
807         for (cur = vio_default_errors; cur->rc; ++cur)
808                 if (cur->rc == rc)
809                         return cur;
810         return &vio_unknown_error;
811 }