added a lot of printk output to ease writing of emulator
[linux-2.4.21-pre4.git] / drivers / iseries / viopath.c
1 /* -*- linux-c -*-
2  *  arch/ppc64/viopath.c
3  *
4  *  iSeries Virtual I/O Message Path code
5  *
6  *  Authors: Dave Boutcher <boutcher@us.ibm.com>
7  *           Ryan Arnold <ryanarn@us.ibm.com>
8  *           Colin Devilbiss <devilbis@us.ibm.com>
9  *
10  * (C) Copyright 2000 IBM Corporation
11  * 
12  * This code is used by the iSeries virtual disk, cd,
13  * tape, and console to communicate with OS/400 in another
14  * partition.
15  *
16  * This program is free software;  you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License as
18  * published by the Free Software Foundation; either version 2 of the
19  * License, or (at your option) anyu later version.
20  *
21  * This program is distributed in the hope that it will be useful, but
22  * WITHOUT ANY WARRANTY; without even the implied warranty of 
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24  * General Public License for more details.  
25  *
26  * You should have received a copy of the GNU General Public License 
27  * along with this program; if not, write to the Free Software Foundation,
28  * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29  *
30  */
31 #include <linux/config.h>
32 #include <asm/uaccess.h>
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/errno.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
38 #include <linux/proc_fs.h>
39 #include <linux/pci.h>
40 #include <linux/wait.h>
41
42 #include <asm/iSeries/HvLpEvent.h>
43 #include <asm/iSeries/HvLpConfig.h>
44 #include <asm/iSeries/HvCallCfg.h>
45 #include <asm/iSeries/mf.h>
46 #include <asm/iSeries/iSeries_proc.h>
47
48 #include "vio.h"
49
50 EXPORT_SYMBOL(viopath_hostLp);
51 EXPORT_SYMBOL(vio_set_hostlp);
52 EXPORT_SYMBOL(viopath_open);
53 EXPORT_SYMBOL(viopath_close);
54 EXPORT_SYMBOL(viopath_isactive);
55 EXPORT_SYMBOL(viopath_sourceinst);
56 EXPORT_SYMBOL(viopath_targetinst);
57 EXPORT_SYMBOL(vio_setHandler);
58 EXPORT_SYMBOL(vio_clearHandler);
59 EXPORT_SYMBOL(vio_get_event_buffer);
60 EXPORT_SYMBOL(vio_free_event_buffer);
61
62 extern struct pci_dev * iSeries_vio_dev;
63
64 /* Status of the path to each other partition in the system.
65  * This is overkill, since we will only ever establish connections
66  * to our hosting partition and the primary partition on the system.
67  * But this allows for other support in the future.
68  */
69 static struct viopathStatus {
70         int isOpen:1;           /* Did we open the path?            */
71         int isActive:1;         /* Do we have a mon msg outstanding */
72         int users[VIO_MAX_SUBTYPES];
73         HvLpInstanceId mSourceInst;
74         HvLpInstanceId mTargetInst;
75         int numberAllocated;
76 } viopathStatus[HVMAXARCHITECTEDLPS];
77
78 static spinlock_t statuslock = SPIN_LOCK_UNLOCKED;
79
80 /*
81  * For each kind of event we allocate a buffer that is
82  * guaranteed not to cross a page boundary
83  */
84 static void *event_buffer[VIO_MAX_SUBTYPES];
85 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
86
87 static void handleMonitorEvent(struct HvLpEvent *event);
88
89 /* We use this structure to handle asynchronous responses.  The caller
90  * blocks on the semaphore and the handler posts the semaphore.
91  */
92 struct doneAllocParms_t {
93         struct semaphore *sem;
94         int number;
95 };
96
97 /* Put a sequence number in each mon msg.  The value is not
98  * important.  Start at something other than 0 just for
99  * readability.  wrapping this is ok.
100  */
101 static u8 viomonseq = 22;
102
103 /* Our hosting logical partition.  We get this at startup
104  * time, and different modules access this variable directly.
105  */
106 HvLpIndex viopath_hostLp = 0xff;        /* HvLpIndexInvalid */
107
108 /* For each kind of incoming event we set a pointer to a
109  * routine to call.
110  */
111 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
112
113 /* Handle reads from the proc file system
114  */
115 static int proc_read(char *buf, char **start, off_t offset,
116                      int blen, int *eof, void *data)
117 {
118         HvLpEvent_Rc hvrc;
119         DECLARE_MUTEX_LOCKED(Semaphore);
120         dma_addr_t dmaa =
121             pci_map_single(iSeries_vio_dev, buf, PAGE_SIZE, PCI_DMA_FROMDEVICE);
122         int len = PAGE_SIZE;
123
124         if (len > blen)
125                 len = blen;
126
127         memset(buf, 0x00, len);
128         hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
129                                              HvLpEvent_Type_VirtualIo,
130                                              viomajorsubtype_config |
131                                              vioconfigget,
132                                              HvLpEvent_AckInd_DoAck,
133                                              HvLpEvent_AckType_ImmediateAck,
134                                              viopath_sourceinst
135                                              (viopath_hostLp),
136                                              viopath_targetinst
137                                              (viopath_hostLp),
138                                              (u64) (unsigned long)
139                                              &Semaphore, VIOVERSION << 16,
140                                              ((u64) dmaa) << 32, len, 0,
141                                              0);
142         if (hvrc != HvLpEvent_Rc_Good) {
143                 printk("viopath hv error on op %d\n", (int) hvrc);
144         }
145
146         down(&Semaphore);
147
148         pci_unmap_single(iSeries_vio_dev, dmaa, PAGE_SIZE, PCI_DMA_FROMDEVICE);
149
150         *eof = 1;
151         return strlen(buf);
152 }
153
154 /* Handle writes to our proc file system
155  */
156 static int proc_write(struct file *file, const char *buffer,
157                       unsigned long count, void *data)
158 {
159         /* Doesn't do anything today!!!
160          */
161         return count;
162 }
163
164 /* setup our proc file system entries
165  */
166 static void vio_proc_init(struct proc_dir_entry *iSeries_proc)
167 {
168         struct proc_dir_entry *ent;
169         ent = create_proc_entry("config", S_IFREG | S_IRUSR, iSeries_proc);
170         if (!ent)
171                 return;
172         ent->nlink = 1;
173         ent->data = NULL;
174         ent->read_proc = proc_read;
175         ent->write_proc = proc_write;
176 }
177
178 /* See if a given LP is active.  Allow for invalid lps to be passed in
179  * and just return invalid
180  */
181 int viopath_isactive(HvLpIndex lp)
182 {
183         if (lp == HvLpIndexInvalid)
184                 return 0;
185         if (lp < HVMAXARCHITECTEDLPS)
186                 return viopathStatus[lp].isActive;
187         else
188                 return 0;
189 }
190
191 /* We cache the source and target instance ids for each
192  * partition.  
193  */
194 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
195 {
196         return viopathStatus[lp].mSourceInst;
197 }
198
199 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
200 {
201         return viopathStatus[lp].mTargetInst;
202 }
203
204 /* Send a monitor message.  This is a message with the acknowledge
205  * bit on that the other side will NOT explicitly acknowledge.  When
206  * the other side goes down, the hypervisor will acknowledge any
207  * outstanding messages....so we will know when the other side dies.
208  */
209 static void sendMonMsg(HvLpIndex remoteLp)
210 {
211         HvLpEvent_Rc hvrc;
212
213         viopathStatus[remoteLp].mSourceInst =
214             HvCallEvent_getSourceLpInstanceId(remoteLp,
215                                               HvLpEvent_Type_VirtualIo);
216         viopathStatus[remoteLp].mTargetInst =
217             HvCallEvent_getTargetLpInstanceId(remoteLp,
218                                               HvLpEvent_Type_VirtualIo);
219
220         /* Deliberately ignore the return code here.  if we call this
221          * more than once, we don't care.
222          */
223         vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
224
225         hvrc = HvCallEvent_signalLpEventFast(remoteLp,
226                                              HvLpEvent_Type_VirtualIo,
227                                              viomajorsubtype_monitor,
228                                              HvLpEvent_AckInd_DoAck,
229                                              HvLpEvent_AckType_DeferredAck,
230                                              viopathStatus[remoteLp].
231                                              mSourceInst,
232                                              viopathStatus[remoteLp].
233                                              mTargetInst, viomonseq++,
234                                              0, 0, 0, 0, 0);
235
236         if (hvrc == HvLpEvent_Rc_Good) {
237                 viopathStatus[remoteLp].isActive = 1;
238         } else {
239                 printk(KERN_WARNING_VIO
240                        "could not connect ot partition %d\n", remoteLp);
241                 viopathStatus[remoteLp].isActive = 0;
242         }
243 }
244
245 static void handleMonitorEvent(struct HvLpEvent *event)
246 {
247         HvLpIndex remoteLp;
248         int i;
249
250         /* First see if this is just a normal monitor message from the
251          * other partition
252          */
253         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
254                 remoteLp = event->xSourceLp;
255                 if (!viopathStatus[remoteLp].isActive)
256                         sendMonMsg(remoteLp);
257                 return;
258         }
259
260         /* This path is for an acknowledgement; the other partition
261          * died
262          */
263         remoteLp = event->xTargetLp;
264         if ((event->xSourceInstanceId !=
265              viopathStatus[remoteLp].mSourceInst)
266             || (event->xTargetInstanceId !=
267                 viopathStatus[remoteLp].mTargetInst)) {
268                 printk(KERN_WARNING_VIO
269                        "ignoring ack....mismatched instances\n");
270                 return;
271         }
272
273         printk(KERN_WARNING_VIO "partition %d ended\n", remoteLp);
274
275         viopathStatus[remoteLp].isActive = 0;
276
277         /* For each active handler, pass them a NULL
278          * message to indicate that the other partition
279          * died
280          */
281         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
282                 if (vio_handler[i] != NULL)
283                         (*vio_handler[i]) (NULL);
284         }
285 }
286
287 int vio_setHandler(int subtype, vio_event_handler_t * beh)
288 {
289         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
290
291         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
292                 return -EINVAL;
293
294         if (vio_handler[subtype] != NULL)
295                 return -EBUSY;
296
297         vio_handler[subtype] = beh;
298         return 0;
299 }
300
301 int vio_clearHandler(int subtype)
302 {
303         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
304
305         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
306                 return -EINVAL;
307
308         if (vio_handler[subtype] == NULL)
309                 return -EAGAIN;
310
311         vio_handler[subtype] = NULL;
312         return 0;
313 }
314
315 static void handleConfig(struct HvLpEvent *event)
316 {
317         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
318                 printk(KERN_WARNING_VIO
319                        "unexpected config request from partition %d",
320                        event->xSourceLp);
321
322                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
323                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
324                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
325                         HvCallEvent_ackLpEvent(event);
326                 }
327                 return;
328         }
329
330         up((struct semaphore *)(unsigned long) event->xCorrelationToken);
331 }
332
333 /* Initialization of the hosting partition
334  */
335 void vio_set_hostlp(void)
336 {
337         /* If this has already been set then we DON'T want to either change
338          * it or re-register the proc file system
339          */
340         if (viopath_hostLp != HvLpIndexInvalid)
341                 return;
342
343         /* Figure out our hosting partition.  This isn't allowed to change
344          * while we're active
345          */
346         viopath_hostLp =
347             HvCallCfg_getHostingLpIndex(HvLpConfig_getLpIndex());
348
349         /* If we have a valid hosting LP, create a proc file system entry
350          * for config information
351          */
352         if (viopath_hostLp != HvLpIndexInvalid) {
353                 iSeries_proc_callback(&vio_proc_init);
354                 vio_setHandler(viomajorsubtype_config, handleConfig);
355         }
356 }
357
358 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
359 {
360         HvLpIndex remoteLp;
361         int subtype =
362             (event->
363              xSubtype & VIOMAJOR_SUBTYPE_MASK) >> VIOMAJOR_SUBTYPE_SHIFT;
364
365         if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
366                 remoteLp = event->xSourceLp;
367                 if (event->xSourceInstanceId !=
368                     viopathStatus[remoteLp].mTargetInst) {
369                         printk(KERN_WARNING_VIO
370                                "message from invalid partition. "
371                                "int msg rcvd, source inst (%d) doesnt match (%d)\n",
372                                viopathStatus[remoteLp].mTargetInst,
373                                event->xSourceInstanceId);
374                         return;
375                 }
376
377                 if (event->xTargetInstanceId !=
378                     viopathStatus[remoteLp].mSourceInst) {
379                         printk(KERN_WARNING_VIO
380                                "message from invalid partition. "
381                                "int msg rcvd, target inst (%d) doesnt match (%d)\n",
382                                viopathStatus[remoteLp].mSourceInst,
383                                event->xTargetInstanceId);
384                         return;
385                 }
386         } else {
387                 remoteLp = event->xTargetLp;
388                 if (event->xSourceInstanceId !=
389                     viopathStatus[remoteLp].mSourceInst) {
390                         printk(KERN_WARNING_VIO
391                                "message from invalid partition. "
392                                "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
393                                viopathStatus[remoteLp].mSourceInst,
394                                event->xSourceInstanceId);
395                         return;
396                 }
397
398                 if (event->xTargetInstanceId !=
399                     viopathStatus[remoteLp].mTargetInst) {
400                         printk(KERN_WARNING_VIO
401                                "message from invalid partition. "
402                                "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
403                                viopathStatus[remoteLp].mTargetInst,
404                                event->xTargetInstanceId);
405                         return;
406                 }
407         }
408
409         if (vio_handler[subtype] == NULL) {
410                 printk(KERN_WARNING_VIO
411                        "unexpected virtual io event subtype %d from partition %d\n",
412                        event->xSubtype, remoteLp);
413                 /* No handler.  Ack if necessary
414                  */
415                 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
416                     (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
417                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
418                         HvCallEvent_ackLpEvent(event);
419                 }
420                 return;
421         }
422
423         /* This inocuous little line is where all the real work happens
424          */
425         (*vio_handler[subtype]) (event);
426 }
427
428 static void viopath_donealloc(void *parm, int number)
429 {
430         struct doneAllocParms_t *doneAllocParmsp =
431             (struct doneAllocParms_t *) parm;
432         doneAllocParmsp->number = number;
433         up(doneAllocParmsp->sem);
434 }
435
436 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
437 {
438         struct doneAllocParms_t doneAllocParms;
439         DECLARE_MUTEX_LOCKED(Semaphore);
440         doneAllocParms.sem = &Semaphore;
441
442         mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250,    /* It would be nice to put a real number here! */
443                             numEvents,
444                             &viopath_donealloc, &doneAllocParms);
445
446         down(&Semaphore);
447
448         return doneAllocParms.number;
449 }
450
451 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
452 {
453         int i;
454         unsigned long flags;
455
456         if ((remoteLp >= HvMaxArchitectedLps)
457             || (remoteLp == HvLpIndexInvalid))
458                 return -EINVAL;
459
460         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
461         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
462                 return -EINVAL;
463
464         spin_lock_irqsave(&statuslock, flags);
465
466         /* OK...we can fit 4 maximum-sized events (256 bytes) in
467          * each page (4096).  Get a new page every 4
468          */
469         if (event_buffer[0] == NULL) {
470                 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
471                         if ((i % 4) == 0) {
472                                 event_buffer[i] =
473                                     (void *) get_free_page(GFP_KERNEL);
474                                 if (event_buffer[i] == NULL) {
475                                         spin_unlock_irqrestore(&statuslock, flags);
476                                         return -ENOMEM;
477                                 }
478                         } else {
479                                 event_buffer[i] =
480                                     event_buffer[i - 1] + 256;
481                         }
482                         atomic_set(&event_buffer_available[i], 1);
483                 }
484         }
485
486         viopathStatus[remoteLp].users[subtype]++;
487
488         if (!viopathStatus[remoteLp].isOpen) {
489                 HvCallEvent_openLpEventPath(remoteLp,
490                                             HvLpEvent_Type_VirtualIo);
491
492                 viopathStatus[remoteLp].numberAllocated +=
493                     allocateEvents(remoteLp, 1);
494
495                 if (viopathStatus[remoteLp].numberAllocated == 0) {
496                         HvCallEvent_closeLpEventPath(remoteLp,
497                                                      HvLpEvent_Type_VirtualIo);
498                         
499                         spin_unlock_irqrestore(&statuslock, flags);
500                         return -ENOMEM;
501                 }
502
503                 viopathStatus[remoteLp].mSourceInst =
504                     HvCallEvent_getSourceLpInstanceId(remoteLp,
505                                                       HvLpEvent_Type_VirtualIo);
506                 viopathStatus[remoteLp].mTargetInst =
507                     HvCallEvent_getTargetLpInstanceId(remoteLp,
508                                                       HvLpEvent_Type_VirtualIo);
509
510                 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
511                                           &vio_handleEvent);
512
513                 viopathStatus[remoteLp].isOpen = 1;
514
515                 sendMonMsg(remoteLp);
516
517                 printk(KERN_INFO_VIO
518                        "Opening connection to partition %d, setting sinst %d, tinst %d\n",
519                        remoteLp,
520                        viopathStatus[remoteLp].mSourceInst,
521                        viopathStatus[remoteLp].mTargetInst);
522         }
523
524         viopathStatus[remoteLp].numberAllocated +=
525             allocateEvents(remoteLp, numReq);
526         spin_unlock_irqrestore(&statuslock, flags);
527
528         return 0;
529 }
530
531 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
532 {
533         unsigned long flags;
534         int i;
535         int numOpen;
536         struct doneAllocParms_t doneAllocParms;
537         DECLARE_MUTEX_LOCKED(Semaphore);
538         doneAllocParms.sem = &Semaphore;
539
540         if ((remoteLp >= HvMaxArchitectedLps)
541             || (remoteLp == HvLpIndexInvalid))
542                 return -EINVAL;
543
544         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
545         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
546                 return -EINVAL;
547
548         spin_lock_irqsave(&statuslock, flags);
549
550         viopathStatus[remoteLp].users[subtype]--;
551         
552         mf_deallocateLpEvents( remoteLp,HvLpEvent_Type_VirtualIo,
553                                numReq,
554                                &viopath_donealloc,
555                                &doneAllocParms );
556         down(&Semaphore);
557
558         for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) {
559                 numOpen += viopathStatus[remoteLp].users[i];
560         }
561         
562         if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
563                 printk(KERN_INFO_VIO
564                        "Closing connection to partition %d", remoteLp);
565
566                 HvCallEvent_closeLpEventPath(remoteLp,
567                                              HvLpEvent_Type_VirtualIo);
568                 viopathStatus[remoteLp].isOpen = 0;
569                 viopathStatus[remoteLp].isActive = 0;
570
571                 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
572                         atomic_set(&event_buffer_available[i], 0);
573                         
574                         for (i = 0; i < VIO_MAX_SUBTYPES; i += 4) {
575                                 free_page((unsigned long) event_buffer[i]);
576                         }
577                 }
578
579         }
580         spin_unlock_irqrestore(&statuslock, flags);
581         return 0;
582 }
583
584 void *vio_get_event_buffer(int subtype)
585 {
586         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
587         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
588                 return NULL;
589
590         if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
591                 return event_buffer[subtype];
592         else
593                 return NULL;
594 }
595
596 void vio_free_event_buffer(int subtype, void *buffer)
597 {
598         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
599         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
600                 printk(KERN_WARNING_VIO
601                        "unexpected subtype %d freeing event buffer\n",
602                        subtype);
603                 return;
604         }
605
606         if (atomic_read(&event_buffer_available[subtype]) != 0) {
607                 printk(KERN_WARNING_VIO
608                        "freeing unallocated event buffer, subtype %d\n",
609                        subtype);
610                 return;
611         }
612
613         if (buffer != event_buffer[subtype]) {
614                 printk(KERN_WARNING_VIO
615                        "freeing invalid event buffer, subtype %d\n",
616                        subtype);
617         }
618
619         atomic_set(&event_buffer_available[subtype], 1);
620 }