1 /* File veth.c created by Kyle A. Lucke on Mon Aug 7 2000. */
3 /**************************************************************************/
5 /* IBM eServer iSeries Virtual Ethernet Device Driver */
6 /* Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp. */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with iSeries LPAR Linux. It utilizes low-level message passing*/
25 /* provided by the hypervisor to enable an ethernet-like network device */
26 /* that can be used to enable inter-partition communications on the same */
27 /* physical iSeries. */
29 /* The iSeries LPAR hypervisor has currently defined the ability for a */
30 /* partition to communicate on up to 16 different virtual ethernets, all */
31 /* dynamically configurable, at least for an OS/400 partition. The */
32 /* dynamic nature is not supported for Linux yet. */
34 /* Each virtual ethernet a given Linux partition participates in will */
35 /* cause a network device with the form ethXX to be created, */
37 /* The virtual ethernet a given ethXX virtual ethernet device talks on */
38 /* can be determined either by dumping /proc/iSeries/veth/vethX, where */
39 /* X is the virtual ethernet number, and the netdevice name will be */
40 /* printed out. The virtual ethernet a given ethX device communicates on */
41 /* is also printed to the printk() buffer at module load time. */
43 /* This driver (and others like it on other partitions) is responsible for*/
44 /* routing packets to and from other partitions. The MAC addresses used */
45 /* by the virtual ethernets contain meaning, and should not be modified. */
46 /* Doing so could disable the ability of your Linux partition to */
47 /* communicate with the other OS/400 partitions on your physical iSeries. */
48 /* Similarly, setting the MAC address to something other than the */
49 /* "virtual burned-in" address is not allowed, for the same reason. */
53 /* 1. Although there is the capability to talk on multiple shared */
54 /* ethernets to communicate to the same partition, each shared */
55 /* ethernet to a given partition X will use a finite, shared amount */
56 /* of hypervisor messages to do the communication. So having 2 shared */
57 /* ethernets to the same remote partition DOES NOT double the */
58 /* available bandwidth. Each of the 2 shared ethernets will share the */
59 /* same bandwidth available to another. */
61 /* 2. It is allowed to have a virtual ethernet that does not communicate */
62 /* with any other partition. It won't do anything, but it's allowed. */
64 /* 3. There is no "loopback" mode for a virtual ethernet device. If you */
65 /* send a packet to your own mac address, it will just be dropped, you */
66 /* won't get it on the receive side. Such a thing could be done, */
67 /* but my default driver DOES NOT do so. */
69 /* 4. Multicast addressing is implemented via broadcasting the multicast */
70 /* frames to other partitions. It is the responsibility of the */
71 /* receiving partition to filter the addresses desired. */
73 /* 5. This module utilizes several different bottom half handlers for */
74 /* non-high-use path function (setup, error handling, etc.). Multiple */
75 /* bottom halves were used because only one would not keep up to the */
76 /* much faster iSeries device drivers this Linux driver is talking to. */
77 /* All hi-priority work (receiving frames, handling frame acks) is done*/
78 /* in the interrupt handler for maximum performance. */
80 /* Tunable parameters: */
82 /* VethBuffersToAllocate: This compile time option defaults to 120. It can*/
83 /* be safely changed to something greater or less than the default. It */
84 /* controls how much memory Linux will allocate per remote partition it is*/
85 /* communicating with. The user can play with this to see how it affects */
86 /* performance, packets dropped, etc. Without trying to understand the */
87 /* complete driver, it can be thought of as the maximum number of packets */
88 /* outstanding to a remote partition at a time. */
90 /**************************************************************************/
92 #include <linux/config.h>
93 #include <linux/module.h>
94 #include <linux/version.h>
95 #include <linux/types.h>
96 #include <linux/errno.h>
97 #include <linux/ioport.h>
98 #include <linux/pci.h>
99 #include <linux/kernel.h>
100 #include <linux/netdevice.h>
101 #include <linux/etherdevice.h>
102 #include <linux/skbuff.h>
103 #include <linux/init.h>
104 #include <linux/delay.h>
105 #include <linux/mm.h>
106 #include <asm/iSeries/mf.h>
111 #ifndef _HVLPCONFIG_H
112 #include <asm/iSeries/HvLpConfig.h>
115 #include <asm/iSeries/veth-proc.h>
118 #include <asm/iSeries/HvTypes.h>
120 #ifndef _ISERIES_PROC_H
121 #include <asm/iSeries/iSeries_proc.h>
123 #include <asm/semaphore.h>
124 #include <linux/proc_fs.h>
127 #define veth_printk(fmt, args...) \
128 printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
130 #define veth_error_printk(fmt, args...) \
131 printk(KERN_ERR "(%s:%3.3d) ERROR: " fmt, __FILE__, __LINE__ , ## args)
134 #define VIRT_TO_ABSOLUTE(a) virt_to_absolute_outline(a)
136 #define VIRT_TO_ABSOLUTE(a) virt_to_absolute(a)
139 static const char __initdata *version =
140 "v0.9 02/15/2001 Kyle Lucke, klucke@us.ibm.com\n";
142 static int probed __initdata = 0;
143 #define VethBuffersToAllocate 120
145 static struct VethFabricMgr *mFabricMgr = NULL;
146 static struct proc_dir_entry * veth_proc_root = NULL;
148 DECLARE_MUTEX_LOCKED(VethProcSemaphore);
150 static int veth_open(struct net_device *dev);
151 static int veth_close(struct net_device *dev);
152 static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev);
153 static int veth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
154 static void veth_handleEvent(struct HvLpEvent *, struct pt_regs *);
155 static void veth_handleAck(struct HvLpEvent *);
156 static void veth_handleInt(struct HvLpEvent *);
157 static void veth_openConnections(void);
158 static void veth_openConnection(u8, int lockMe);
159 static void veth_closeConnection(u8, int lockMe);
160 static void veth_intFinishOpeningConnections(void *, int number);
161 static void veth_finishOpeningConnections(void *);
162 static void veth_finishOpeningConnectionsLocked(struct VethLpConnection *);
163 static int veth_multicast_wanted(struct VethPort *port, u64 dest);
164 static void veth_set_multicast_list(struct net_device *dev);
166 static void veth_sendCap(struct VethLpConnection *);
167 static void veth_sendMonitor(struct VethLpConnection *);
168 static void veth_takeCap(struct VethLpConnection *, struct VethLpEvent *);
169 static void veth_takeCapAck(struct VethLpConnection *, struct VethLpEvent *);
170 static void veth_takeMonitorAck(struct VethLpConnection *, struct VethLpEvent *);
171 static void veth_msgsInit(struct VethLpConnection *connection);
172 static void veth_recycleMsg(struct VethLpConnection *, u16);
173 static void veth_capBh(struct VethLpConnection *);
174 static void veth_capAckBh(struct VethLpConnection *);
175 static void veth_monitorAckBh(struct VethLpConnection *);
176 static void veth_takeFrames(struct VethLpConnection *, struct VethLpEvent *);
177 static void veth_pTransmit(struct sk_buff *skb, HvLpIndex remoteLp, struct net_device *dev);
178 static struct net_device_stats *veth_get_stats(struct net_device *dev);
179 static void veth_intFinishMsgsInit(void *, int);
180 static void veth_finishMsgsInit(struct VethLpConnection *connection);
181 static void veth_intFinishCapBh(void *, int);
182 static void veth_finishCapBh(struct VethLpConnection *connection);
183 static void veth_finishCapBhLocked(struct VethLpConnection *connection);
184 static void veth_finishSendCap(struct VethLpConnection *connection);
185 static void veth_timedAck(unsigned long connectionPtr);
187 static void veth_waitForEnd(void);
189 static void veth_failMe(struct VethLpConnection *connection);
191 extern struct pci_dev * iSeries_veth_dev;
193 int __init veth_probe(void)
195 struct net_device *dev= NULL;
196 struct VethPort *port = NULL;
198 int displayVersion = 0;
200 u16 vlanMap = HvLpConfig_getVirtualLanIndexMap();
209 int bitOn = vlanMap & 0x8000;
215 dev = init_etherdev(NULL, sizeof(struct VethPort));
218 veth_error_printk("Unable to allocate net_device structure!\n");
223 dev->priv = kmalloc(sizeof(struct VethPort), GFP_KERNEL);
225 veth_error_printk("Unable to allocate memory\n");
229 veth_printk("Found an ethernet device %s (veth=%d) (addr=%p)\n", dev->name, vlanIndex, dev);
230 port = mFabricMgr->mPorts[vlanIndex] = (struct VethPort *)dev->priv;
231 memset(port, 0, sizeof(struct VethPort));
232 rwlock_init(&(port->mMcastGate));
233 mFabricMgr->mPorts[vlanIndex]->mDev = dev;
235 dev->dev_addr[0] = 0x02;
236 dev->dev_addr[1] = 0x01;
237 dev->dev_addr[2] = 0xFF;
238 dev->dev_addr[3] = vlanIndex;
239 dev->dev_addr[4] = 0xFF;
240 dev->dev_addr[5] = HvLpConfig_getLpIndex_outline();
243 memcpy(&(port->mMyAddress), dev->dev_addr, 6);
245 dev->open = &veth_open;
246 dev->hard_start_xmit = &veth_start_xmit;
247 dev->stop = &veth_close;
248 dev->get_stats = veth_get_stats;
249 dev->set_multicast_list = &veth_set_multicast_list;
250 dev->do_ioctl = &veth_ioctl;
252 /* display version info if adapter is found */
255 /* set display flag to TRUE so that */
256 /* we only display this string ONCE */
258 veth_printk("%s", version);
264 vlanMap = vlanMap << 1;
274 MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
275 MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
277 DECLARE_MUTEX_LOCKED(VethModuleBhDone);
278 int VethModuleReopen = 1;
280 void veth_proc_delete(struct proc_dir_entry *iSeries_proc)
283 HvLpIndex thisLp = HvLpConfig_getLpIndex_outline();
284 u16 vlanMap = HvLpConfig_getVirtualLanIndexMap();
287 for (i=0; i < HvMaxArchitectedLps; ++i)
291 if (HvLpConfig_doLpsCommunicateOnVirtualLan(thisLp, i))
294 sprintf(name, "lpar%d", i);
295 remove_proc_entry(name, veth_proc_root);
302 int bitOn = vlanMap & 0x8000;
307 sprintf(name, "veth%d", vlanIndex);
308 remove_proc_entry(name, veth_proc_root);
312 vlanMap = vlanMap << 1;
315 remove_proc_entry("veth", iSeries_proc);
317 up(&VethProcSemaphore);
320 void veth_waitForEnd(void)
322 up(&VethModuleBhDone);
325 void __exit veth_module_cleanup(void)
328 struct VethFabricMgr *myFm = mFabricMgr;
329 struct tq_struct myBottomHalf;
330 struct net_device *thisOne = NULL;
332 VethModuleReopen = 0;
334 for (i = 0; i < HvMaxArchitectedLps; ++i)
336 veth_closeConnection(i, 1);
339 myBottomHalf.routine = (void *)(void *)veth_waitForEnd;
341 queue_task(&myBottomHalf, &tq_immediate);
342 mark_bh(IMMEDIATE_BH);
344 down(&VethModuleBhDone);
346 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
352 down(&VethProcSemaphore);
354 iSeries_proc_callback(&veth_proc_delete);
356 down(&VethProcSemaphore);
358 for (i = 0; i < HvMaxArchitectedLps; ++i)
360 if (myFm->mConnection[i].mNumberAllocated + myFm->mConnection[i].mNumberRcvMsgs > 0)
362 mf_deallocateLpEvents(myFm->mConnection[i].mRemoteLp,
363 HvLpEvent_Type_VirtualLan,
364 myFm->mConnection[i].mNumberAllocated + myFm->mConnection[i].mNumberRcvMsgs,
369 if (myFm->mConnection[i].mMsgs != NULL)
371 kfree(myFm->mConnection[i].mMsgs);
375 for (i = 0; i < HvMaxArchitectedVirtualLans; ++i)
377 if (myFm->mPorts[i] != NULL)
379 thisOne = myFm->mPorts[i]->mDev;
380 myFm->mPorts[i] = NULL;
386 veth_printk("Unregistering %s (veth=%d)\n", thisOne->name, i);
387 unregister_netdev(thisOne);
395 module_exit(veth_module_cleanup);
399 void veth_proc_init(struct proc_dir_entry *iSeries_proc)
402 HvLpIndex thisLp = HvLpConfig_getLpIndex_outline();
403 u16 vlanMap = HvLpConfig_getVirtualLanIndexMap();
407 veth_proc_root = proc_mkdir("veth", iSeries_proc);
408 if (!veth_proc_root) return;
410 for (i=0; i < HvMaxArchitectedLps; ++i)
414 if (HvLpConfig_doLpsCommunicateOnVirtualLan(thisLp, i))
416 struct proc_dir_entry *ent;
418 sprintf(name, "lpar%d", (int)i);
419 ent = create_proc_entry(name, S_IFREG|S_IRUSR, veth_proc_root);
422 ent->data = (void *)i;
423 ent->read_proc = proc_veth_dump_connection;
424 ent->write_proc = NULL;
431 int bitOn = vlanMap & 0x8000;
435 struct proc_dir_entry *ent;
437 sprintf(name, "veth%d", (int)vlanIndex);
438 ent = create_proc_entry(name, S_IFREG|S_IRUSR, veth_proc_root);
441 ent->data = (void *)vlanIndex;
442 ent->read_proc = proc_veth_dump_port;
443 ent->write_proc = NULL;
447 vlanMap = vlanMap << 1;
450 up(&VethProcSemaphore);
453 int __init veth_module_init(void)
458 mFabricMgr = kmalloc(sizeof(struct VethFabricMgr), GFP_KERNEL);
459 memset(mFabricMgr, 0, sizeof(struct VethFabricMgr));
460 veth_printk("Initializing veth module, fabric mgr (address=%p)\n", mFabricMgr);
462 mFabricMgr->mEyecatcher = 0x56455448464D4752ULL;
463 mFabricMgr->mThisLp = HvLpConfig_getLpIndex_outline();
465 for (i=0; i < HvMaxArchitectedLps; ++i)
467 mFabricMgr->mConnection[i].mEyecatcher = 0x564554484C50434EULL;
468 veth_failMe(mFabricMgr->mConnection+i);
469 spin_lock_init(&mFabricMgr->mConnection[i].mAckGate);
470 spin_lock_init(&mFabricMgr->mConnection[i].mStatusGate);
473 status = veth_probe();
477 veth_openConnections();
480 iSeries_proc_callback(&veth_proc_init);
485 module_init(veth_module_init);
487 static void veth_failMe(struct VethLpConnection *connection)
489 connection->mConnectionStatus.mSentCap = 0;
490 connection->mConnectionStatus.mCapAcked = 0;
491 connection->mConnectionStatus.mGotCap = 0;
492 connection->mConnectionStatus.mGotCapAcked = 0;
493 connection->mConnectionStatus.mSentMonitor = 0;
494 connection->mConnectionStatus.mFailed = 1;
497 static int veth_open(struct net_device *dev)
499 struct VethPort *port = (struct VethPort *)dev->priv;
501 memset(&port->mStats, 0, sizeof(port->mStats));
504 netif_start_queue(dev);
509 static int veth_close(struct net_device *dev)
511 netif_stop_queue(dev);
518 static struct net_device_stats *veth_get_stats(struct net_device *dev)
520 struct VethPort *port = (struct VethPort *)dev->priv;
522 return(&port->mStats);
526 static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
528 unsigned char *frame = skb->data;
529 HvLpIndex remoteLp = frame[5];
533 if (mFabricMgr == NULL)
535 veth_error_printk("NULL fabric manager with active ports!\n");
536 netif_stop_queue(dev);
542 if ((*frame & 0x01) != 0x01) /* broadcast or multicast */
544 if ((remoteLp != mFabricMgr->mThisLp) &&
545 (HvLpConfig_doLpsCommunicateOnVirtualLan(mFabricMgr->mThisLp, remoteLp)))
546 veth_pTransmit(skb, remoteLp, dev);
550 for (i=0; i < HvMaxArchitectedLps; ++i)
552 if (i != mFabricMgr->mThisLp)
555 skb = skb_clone(skb, GFP_ATOMIC);
559 if (HvLpConfig_doLpsCommunicateOnVirtualLan(mFabricMgr->mThisLp, i))
561 /* the ack handles deleting the skb */
562 veth_pTransmit(skb, i, dev);
571 static void veth_pTransmit(struct sk_buff *skb, HvLpIndex remoteLp, struct net_device *dev)
573 struct VethLpConnection *connection = mFabricMgr->mConnection + remoteLp;
574 HvLpEvent_Rc returnCode;
576 if (connection->mConnectionStatus.mFailed != 1)
578 struct VethMsg *msg = NULL;
579 VETHSTACKPOP(&(connection->mMsgStack), msg);
583 if ((skb->len > 14) &&
586 dma_addr_t dma_addr = pci_map_single(iSeries_veth_dev,
596 msg->mEvent.mSendData.mAddress[0] = dma_addr;
597 msg->mEvent.mSendData.mLength[0] = skb->len;
598 msg->mEvent.mSendData.mEofMask = 0xFFFFFFFFUL;
600 test_and_set_bit(0, &(msg->mInUse));
602 returnCode = HvCallEvent_signalLpEventFast(remoteLp,
603 HvLpEvent_Type_VirtualLan,
605 HvLpEvent_AckInd_NoAck,
606 HvLpEvent_AckType_ImmediateAck,
607 connection->mSourceInst,
608 connection->mTargetInst,
610 msg->mEvent.mFpData.mData1,
611 msg->mEvent.mFpData.mData2,
612 msg->mEvent.mFpData.mData3,
613 msg->mEvent.mFpData.mData4,
614 msg->mEvent.mFpData.mData5);
618 returnCode = -1; /* Bad return code */
621 if (returnCode != HvLpEvent_Rc_Good)
623 struct VethPort *port = (struct VethPort *)dev->priv;
625 if (msg->mEvent.mSendData.mAddress[0])
627 pci_unmap_single(iSeries_veth_dev, dma_addr, skb->len, PCI_DMA_TODEVICE);
630 dev_kfree_skb_irq(skb);
633 memset(&(msg->mEvent.mSendData), 0, sizeof(struct VethFramesData));
634 VETHSTACKPUSH(&(connection->mMsgStack), msg);
635 port->mStats.tx_dropped++;
639 struct VethPort *port = (struct VethPort *)dev->priv;
640 port->mStats.tx_packets++;
641 port->mStats.tx_bytes += skb->len;
647 struct VethPort *port = (struct VethPort *)dev->priv;
648 port->mStats.tx_dropped++;
653 struct VethPort *port = (struct VethPort *)dev->priv;
654 port->mStats.tx_dropped++;
658 static int veth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
664 static void veth_set_multicast_list(struct net_device *dev)
667 struct VethPort *port = (struct VethPort *)dev->priv;
671 write_lock_irqsave(&port->mMcastGate, flags);
673 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
674 port->mPromiscuous = 1;
676 struct dev_mc_list *dmi = dev->mc_list;
678 if (dev->flags & IFF_ALLMULTI) {
685 for (i = 0; ((i < dev->mc_count) && (i < 12)); i++) { /* for each address in the list */
686 addrs = dmi->dmi_addr;
688 if ((*addrs & 0x01) == 1) { /* multicast address? */
689 memcpy(&newAddress, addrs, 6);
690 newAddress &= 0xFFFFFFFFFFFF0000;
692 port->mMcasts[port->mNumAddrs] = newAddress;
694 port->mNumAddrs = port->mNumAddrs + 1;
700 write_unlock_irqrestore(&port->mMcastGate, flags);
704 static void veth_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
706 if (event->xFlags.xFunction == HvLpEvent_Function_Ack)
708 veth_handleAck(event);
710 else if (event->xFlags.xFunction == HvLpEvent_Function_Int)
712 veth_handleInt(event);
716 static void veth_handleAck(struct HvLpEvent *event)
718 struct VethLpConnection *connection = &(mFabricMgr->mConnection[event->xTargetLp]);
719 struct VethLpEvent *vethEvent = (struct VethLpEvent *)event;
721 switch(event->xSubtype)
723 case VethEventTypeCap:
725 veth_takeCapAck(connection, vethEvent);
728 case VethEventTypeMonitor:
730 veth_takeMonitorAck(connection, vethEvent);
735 veth_error_printk("Unknown ack type %d from lpar %d\n", event->xSubtype, connection->mRemoteLp);
740 static void veth_handleInt(struct HvLpEvent *event)
743 struct VethLpConnection *connection = &(mFabricMgr->mConnection[event->xSourceLp]);
744 struct VethLpEvent *vethEvent = (struct VethLpEvent *)event;
746 switch(event->xSubtype)
748 case VethEventTypeCap:
750 veth_takeCap(connection, vethEvent);
753 case VethEventTypeMonitor:
755 /* do nothing... this'll hang out here til we're dead, and the hypervisor will return it for us. */
758 case VethEventTypeFramesAck:
760 for (i=0; i < VethMaxFramesMsgsAcked; ++i)
762 u16 msg = vethEvent->mDerivedData.mFramesAckData.mToken[i];
763 veth_recycleMsg(connection, msg);
767 case VethEventTypeFrames:
769 veth_takeFrames(connection, vethEvent);
774 veth_error_printk("Unknown interrupt type %d from lpar %d\n", event->xSubtype, connection->mRemoteLp);
779 static void veth_openConnections()
783 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan, &veth_handleEvent);
785 /* Now I need to run through the active lps and open connections to the ones I'm supposed to
788 for (i=HvMaxArchitectedLps-1; i >=0; --i)
790 if (i != mFabricMgr->mThisLp)
792 if (HvLpConfig_doLpsCommunicateOnVirtualLan(mFabricMgr->mThisLp, i))
794 veth_openConnection(i, 1);
798 veth_closeConnection(i, 1);
804 static void veth_intFinishOpeningConnections(void *parm, int number)
806 struct VethLpConnection *connection = (struct VethLpConnection *)parm;
807 connection->mAllocBhTq.data = parm;
808 connection->mNumberAllocated = number;
809 queue_task(&connection->mAllocBhTq, &tq_immediate);
810 mark_bh(IMMEDIATE_BH);
813 static void veth_finishOpeningConnections(void *parm)
816 struct VethLpConnection *connection = (struct VethLpConnection *)parm;
817 spin_lock_irqsave(&connection->mStatusGate, flags);
818 veth_finishOpeningConnectionsLocked(connection);
819 spin_unlock_irqrestore(&connection->mStatusGate, flags);
822 static void veth_finishOpeningConnectionsLocked(struct VethLpConnection *connection)
824 if (connection->mNumberAllocated >= 2)
826 connection->mConnectionStatus.mCapMonAlloced = 1;
827 veth_sendCap(connection);
831 veth_error_printk("Couldn't allocate base msgs for lpar %d, only got %d\n", connection->mRemoteLp, connection->mNumberAllocated);
832 veth_failMe(connection);
836 static void veth_openConnection(u8 remoteLp, int lockMe)
839 unsigned long flags2;
840 HvLpInstanceId source;
841 HvLpInstanceId target;
843 struct VethLpConnection *connection = &(mFabricMgr->mConnection[remoteLp]);
845 memset(&connection->mCapBhTq, 0, sizeof(connection->mCapBhTq));
846 connection->mCapBhTq.routine = (void *)(void *)veth_capBh;
848 memset(&connection->mCapAckBhTq, 0, sizeof(connection->mCapAckBhTq));
849 connection->mCapAckBhTq.routine = (void *)(void *)veth_capAckBh;
851 memset(&connection->mMonitorAckBhTq, 0, sizeof(connection->mMonitorAckBhTq));
852 connection->mMonitorAckBhTq.routine = (void *)(void *)veth_monitorAckBh;
854 memset(&connection->mAllocBhTq, 0, sizeof(connection->mAllocBhTq));
855 connection->mAllocBhTq.routine = (void *)(void *)veth_finishOpeningConnections;
858 spin_lock_irqsave(&connection->mStatusGate, flags);
860 connection->mRemoteLp = remoteLp;
862 spin_lock_irqsave(&connection->mAckGate, flags2);
864 memset(&connection->mEventData, 0xFF, sizeof(connection->mEventData));
865 connection->mNumAcks = 0;
867 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualLan);
869 /* clean up non-acked msgs */
870 for (i=0; i < connection->mNumMsgs; ++i)
872 veth_recycleMsg(connection, i);
875 connection->mConnectionStatus.mOpen = 1;
877 source = connection->mSourceInst = HvCallEvent_getSourceLpInstanceId(remoteLp, HvLpEvent_Type_VirtualLan);
878 target = connection->mTargetInst = HvCallEvent_getTargetLpInstanceId(remoteLp, HvLpEvent_Type_VirtualLan);
880 if (connection->mConnectionStatus.mCapMonAlloced != 1)
882 connection->mAllocBhTq.routine = (void *)(void *)veth_finishOpeningConnections;
883 mf_allocateLpEvents(remoteLp,
884 HvLpEvent_Type_VirtualLan,
885 sizeof(struct VethLpEvent),
887 &veth_intFinishOpeningConnections,
892 veth_finishOpeningConnectionsLocked(connection);
895 spin_unlock_irqrestore(&connection->mAckGate, flags2);
898 spin_unlock_irqrestore(&connection->mStatusGate, flags);
901 static void veth_closeConnection(u8 remoteLp, int lockMe)
903 struct VethLpConnection *connection = &(mFabricMgr->mConnection[remoteLp]);
905 unsigned long flags2;
907 spin_lock_irqsave(&connection->mStatusGate, flags);
909 del_timer(&connection->mAckTimer);
911 if (connection->mConnectionStatus.mOpen == 1)
913 HvCallEvent_closeLpEventPath(remoteLp, HvLpEvent_Type_VirtualLan);
914 connection->mConnectionStatus.mOpen = 0;
915 veth_failMe(connection);
918 spin_lock_irqsave(&connection->mAckGate, flags2);
920 memset(&connection->mEventData, 0xFF, sizeof(connection->mEventData));
921 connection->mNumAcks = 0;
923 spin_unlock_irqrestore(&connection->mAckGate, flags2);
927 spin_unlock_irqrestore(&connection->mStatusGate, flags);
930 static void veth_msgsInit(struct VethLpConnection *connection)
932 connection->mAllocBhTq.routine = (void *)(void *)veth_finishMsgsInit;
933 mf_allocateLpEvents(connection->mRemoteLp,
934 HvLpEvent_Type_VirtualLan,
935 sizeof(struct VethLpEvent),
936 connection->mMyCap.mUnionData.mFields.mNumberBuffers,
937 &veth_intFinishMsgsInit,
941 static void veth_intFinishMsgsInit(void *parm, int number)
943 struct VethLpConnection *connection = (struct VethLpConnection *)parm;
944 connection->mAllocBhTq.data = parm;
945 connection->mNumberRcvMsgs = number;
946 queue_task(&connection->mAllocBhTq, &tq_immediate);
947 mark_bh(IMMEDIATE_BH);
950 static void veth_intFinishCapBh(void *parm, int number)
952 struct VethLpConnection *connection = (struct VethLpConnection *)parm;
953 connection->mAllocBhTq.data = parm;
955 connection->mNumberLpAcksAlloced += number;
957 queue_task(&connection->mAllocBhTq, &tq_immediate);
958 mark_bh(IMMEDIATE_BH);
961 static void veth_finishMsgsInit(struct VethLpConnection *connection)
964 unsigned int numberGotten = 0;
965 u64 amountOfHeapToGet = connection->mMyCap.mUnionData.mFields.mNumberBuffers * sizeof(struct VethMsg);
968 spin_lock_irqsave(&connection->mStatusGate, flags);
970 if (connection->mNumberRcvMsgs >= connection->mMyCap.mUnionData.mFields.mNumberBuffers)
972 msgs = kmalloc(amountOfHeapToGet, GFP_ATOMIC);
974 connection->mMsgs = (struct VethMsg *)msgs;
978 memset(msgs, 0, amountOfHeapToGet);
980 for (i=0; i < connection->mMyCap.mUnionData.mFields.mNumberBuffers; ++i)
982 connection->mMsgs[i].mIndex = i;
984 VETHSTACKPUSH(&(connection->mMsgStack), (connection->mMsgs+i));
986 if (numberGotten > 0)
988 connection->mNumMsgs = numberGotten;
994 connection->mMsgs = NULL;
998 connection->mMyCap.mUnionData.mFields.mNumberBuffers = connection->mNumMsgs;
1000 if (connection->mNumMsgs < 10)
1001 connection->mMyCap.mUnionData.mFields.mThreshold = 1;
1002 else if (connection->mNumMsgs < 20)
1003 connection->mMyCap.mUnionData.mFields.mThreshold = 4;
1004 else if (connection->mNumMsgs < 40)
1005 connection->mMyCap.mUnionData.mFields.mThreshold = 10;
1007 connection->mMyCap.mUnionData.mFields.mThreshold = 20;
1009 connection->mMyCap.mUnionData.mFields.mTimer = VethAckTimeoutUsec;
1011 veth_finishSendCap(connection);
1013 spin_unlock_irqrestore(&connection->mStatusGate, flags);
1016 static void veth_sendCap(struct VethLpConnection *connection)
1018 if (connection->mMsgs == NULL)
1020 connection->mMyCap.mUnionData.mFields.mNumberBuffers = VethBuffersToAllocate;
1021 veth_msgsInit(connection);
1025 veth_finishSendCap(connection);
1029 static void veth_finishSendCap(struct VethLpConnection *connection)
1031 HvLpEvent_Rc returnCode = HvCallEvent_signalLpEventFast(connection->mRemoteLp,
1032 HvLpEvent_Type_VirtualLan,
1034 HvLpEvent_AckInd_DoAck,
1035 HvLpEvent_AckType_ImmediateAck,
1036 connection->mSourceInst,
1037 connection->mTargetInst,
1039 connection->mMyCap.mUnionData.mNoFields.mReserved1,
1040 connection->mMyCap.mUnionData.mNoFields.mReserved2,
1041 connection->mMyCap.mUnionData.mNoFields.mReserved3,
1042 connection->mMyCap.mUnionData.mNoFields.mReserved4,
1043 connection->mMyCap.mUnionData.mNoFields.mReserved5);
1045 if ((returnCode == HvLpEvent_Rc_PartitionDead) ||
1046 (returnCode == HvLpEvent_Rc_PathClosed))
1048 connection->mConnectionStatus.mSentCap = 0;
1050 else if (returnCode != HvLpEvent_Rc_Good)
1052 veth_error_printk("Couldn't send cap to lpar %d, rc %x\n", connection->mRemoteLp, (int)returnCode);
1053 veth_failMe(connection);
1057 connection->mConnectionStatus.mSentCap = 1;
1061 static void veth_takeCap(struct VethLpConnection *connection, struct VethLpEvent *event)
1063 if (!test_and_set_bit(0,&(connection->mCapBhPending)))
1065 connection->mCapBhTq.data = connection;
1066 memcpy(&connection->mCapEvent, event, sizeof(connection->mCapEvent));
1067 queue_task(&connection->mCapBhTq, &tq_immediate);
1068 mark_bh(IMMEDIATE_BH);
1072 veth_error_printk("Received a capabilities from lpar %d while already processing one\n", connection->mRemoteLp);
1073 event->mBaseEvent.xRc = HvLpEvent_Rc_BufferNotAvailable;
1074 HvCallEvent_ackLpEvent((struct HvLpEvent *)event);
1078 static void veth_takeCapAck(struct VethLpConnection *connection, struct VethLpEvent *event)
1080 if (!test_and_set_bit(0,&(connection->mCapAckBhPending)))
1082 connection->mCapAckBhTq.data = connection;
1083 memcpy(&connection->mCapAckEvent, event, sizeof(connection->mCapAckEvent));
1084 queue_task(&connection->mCapAckBhTq, &tq_immediate);
1085 mark_bh(IMMEDIATE_BH);
1089 veth_error_printk("Received a capabilities ack from lpar %d while already processing one\n", connection->mRemoteLp);
1093 static void veth_takeMonitorAck(struct VethLpConnection *connection, struct VethLpEvent *event)
1095 if (!test_and_set_bit(0,&(connection->mMonitorAckBhPending)))
1097 connection->mMonitorAckBhTq.data = connection;
1098 memcpy(&connection->mMonitorAckEvent, event, sizeof(connection->mMonitorAckEvent));
1099 queue_task(&connection->mMonitorAckBhTq, &tq_immediate);
1100 mark_bh(IMMEDIATE_BH);
1104 veth_error_printk("Received a monitor ack from lpar %d while already processing one\n", connection->mRemoteLp);
1108 static void veth_recycleMsg(struct VethLpConnection *connection, u16 msg)
1110 if (msg < connection->mNumMsgs)
1112 struct VethMsg *myMsg = connection->mMsgs + msg;
1113 if (test_and_clear_bit(0, &(myMsg->mInUse)))
1115 pci_unmap_single(iSeries_veth_dev,
1116 myMsg->mEvent.mSendData.mAddress[0],
1117 myMsg->mEvent.mSendData.mLength[0],
1119 dev_kfree_skb_irq(myMsg->mSkb);
1122 memset(&(myMsg->mEvent.mSendData), 0, sizeof(struct VethFramesData));
1123 VETHSTACKPUSH(&connection->mMsgStack, myMsg);
1127 if (connection->mConnectionStatus.mOpen)
1129 veth_error_printk("Received a frames ack for msg %d from lpar %d while not outstanding\n", msg, connection->mRemoteLp);
1135 static void veth_capBh(struct VethLpConnection *connection)
1137 struct VethLpEvent *event = &connection->mCapEvent;
1138 unsigned long flags;
1139 struct VethCapData *remoteCap = &(connection->mRemoteCap);
1141 spin_lock_irqsave(&connection->mStatusGate, flags);
1142 connection->mConnectionStatus.mGotCap = 1;
1144 memcpy(remoteCap, &(event->mDerivedData.mCapabilitiesData), sizeof(connection->mRemoteCap));
1146 if ((remoteCap->mUnionData.mFields.mNumberBuffers <= VethMaxFramesMsgs) &&
1147 (remoteCap->mUnionData.mFields.mNumberBuffers != 0) &&
1148 (remoteCap->mUnionData.mFields.mThreshold <= VethMaxFramesMsgsAcked) &&
1149 (remoteCap->mUnionData.mFields.mThreshold != 0))
1151 numAcks = (remoteCap->mUnionData.mFields.mNumberBuffers / remoteCap->mUnionData.mFields.mThreshold) + 1;
1153 if (connection->mNumberLpAcksAlloced < numAcks)
1155 numAcks = numAcks - connection->mNumberLpAcksAlloced;
1156 connection->mAllocBhTq.routine = (void *)(void *)veth_finishCapBh;
1157 mf_allocateLpEvents(connection->mRemoteLp,
1158 HvLpEvent_Type_VirtualLan,
1159 sizeof(struct VethLpEvent),
1161 &veth_intFinishCapBh,
1165 veth_finishCapBhLocked(connection);
1169 veth_error_printk("Received incompatible capabilities from lpar %d\n", connection->mRemoteLp);
1170 event->mBaseEvent.xRc = HvLpEvent_Rc_InvalidSubtypeData;
1171 HvCallEvent_ackLpEvent((struct HvLpEvent *)event);
1174 clear_bit(0,&(connection->mCapBhPending));
1175 spin_unlock_irqrestore(&connection->mStatusGate, flags);
1178 static void veth_capAckBh(struct VethLpConnection *connection)
1180 struct VethLpEvent *event = &connection->mCapAckEvent;
1181 unsigned long flags;
1183 spin_lock_irqsave(&connection->mStatusGate, flags);
1185 if (event->mBaseEvent.xRc == HvLpEvent_Rc_Good)
1187 connection->mConnectionStatus.mCapAcked = 1;
1189 if ((connection->mConnectionStatus.mGotCap == 1) &&
1190 (connection->mConnectionStatus.mGotCapAcked == 1))
1192 if (connection->mConnectionStatus.mSentMonitor != 1)
1193 veth_sendMonitor(connection);
1198 veth_error_printk("Bad rc(%d) from lpar %d on capabilities\n", event->mBaseEvent.xRc, connection->mRemoteLp);
1199 veth_failMe(connection);
1202 clear_bit(0,&(connection->mCapAckBhPending));
1203 spin_unlock_irqrestore(&connection->mStatusGate, flags);
1206 static void veth_monitorAckBh(struct VethLpConnection *connection)
1208 unsigned long flags;
1210 spin_lock_irqsave(&connection->mStatusGate, flags);
1212 veth_failMe(connection);
1214 veth_printk("Monitor ack returned for lpar %d\n", connection->mRemoteLp);
1216 if (connection->mConnectionStatus.mOpen)
1218 veth_closeConnection(connection->mRemoteLp, 0);
1222 queue_task(&connection->mMonitorAckBhTq, &tq_immediate);
1223 mark_bh(IMMEDIATE_BH);
1228 if (VethModuleReopen)
1230 veth_openConnection(connection->mRemoteLp, 0);
1236 for (i=0; i < connection->mNumMsgs; ++i)
1238 veth_recycleMsg(connection, i);
1242 clear_bit(0,&(connection->mMonitorAckBhPending));
1245 spin_unlock_irqrestore(&connection->mStatusGate, flags);
1248 #define number_of_pages(v, l) ((((unsigned long)(v) & ((1 << 12) - 1)) + (l) + 4096 - 1) / 4096)
1249 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
1251 static void veth_takeFrames(struct VethLpConnection *connection, struct VethLpEvent *event)
1254 struct VethPort *port = NULL;
1269 struct BufList myBufList[4];
1270 struct BufList remoteList;
1272 for (i=0; i < VethMaxFramesPerMsg; ++i)
1274 u16 length = event->mDerivedData.mSendData.mLength[i];
1275 u32 address = event->mDerivedData.mSendData.mAddress[i];
1276 if ((address != 0) &&
1280 struct sk_buff *skb = alloc_skb(event->mDerivedData.mSendData.mLength[i], GFP_ATOMIC);
1281 remoteList.addr.token1.token2 = address;
1282 remoteList.size = length;
1285 HvLpDma_Rc returnCode = HvLpDma_Rc_Good;
1286 int numPages = number_of_pages((skb->data), length);
1289 myBufList[0].addr.address = (0x8000000000000000LL | (VIRT_TO_ABSOLUTE((unsigned long)skb->data)));
1290 myBufList[0].size = (numPages > 1) ? (4096 - page_offset(skb->data)) : length;
1294 myBufList[1].addr.address = (0x8000000000000000LL | (VIRT_TO_ABSOLUTE((unsigned long) skb->data + myBufList[0].size)));
1295 myBufList[1].size = (numPages > 2) ? (4096 - page_offset(skb->data)) : length - myBufList[0].size;
1299 myBufList[2].addr.address = (0x8000000000000000LL | (VIRT_TO_ABSOLUTE((unsigned long) skb->data + myBufList[0].size + myBufList[1].size)));
1300 myBufList[2].size = (numPages > 3) ? (4096 - page_offset(skb->data)) : length - myBufList[1].size - myBufList[0].size;
1304 myBufList[3].addr.address = 0x8000000000000000LL | (VIRT_TO_ABSOLUTE((unsigned long) skb->data + myBufList[0].size + myBufList[1].size + myBufList[2].size));
1305 myBufList[3].size = (numPages > 4) ? (4096 - page_offset(skb->data)) : length - myBufList[2].size - myBufList[1].size - myBufList[0].size;
1310 returnCode = HvCallEvent_dmaBufList(HvLpEvent_Type_VirtualLan,
1311 event->mBaseEvent.xSourceLp,
1312 HvLpDma_Direction_RemoteToLocal,
1313 connection->mSourceInst,
1314 connection->mTargetInst,
1315 HvLpDma_AddressType_RealAddress,
1316 HvLpDma_AddressType_TceIndex,
1317 0x8000000000000000LL | (VIRT_TO_ABSOLUTE((unsigned long)&myBufList)),
1318 0x8000000000000000LL | (VIRT_TO_ABSOLUTE((unsigned long)&remoteList)),
1321 if (returnCode == HvLpDma_Rc_Good)
1323 HvLpVirtualLanIndex vlan = skb->data[9];
1324 u64 dest = *((u64 *)skb->data) & 0xFFFFFFFFFFFF0000;
1326 if (((vlan < HvMaxArchitectedVirtualLans) &&
1327 ((port = mFabricMgr->mPorts[vlan]) != NULL)) &&
1328 ((dest == port->mMyAddress) || /* it's for me */
1329 (dest == 0xFFFFFFFFFFFF0000) || /* it's a broadcast */
1330 (veth_multicast_wanted(port, dest)) || /* it's one of my multicasts */
1331 (port->mPromiscuous == 1))) /* I'm promiscuous */
1333 skb_put(skb, length);
1334 skb->dev = port->mDev;
1335 skb->protocol = eth_type_trans(skb, port->mDev);
1336 skb->ip_summed = CHECKSUM_NONE;
1337 netif_rx(skb); /* send it up */
1338 port->mStats.rx_packets++;
1339 port->mStats.rx_bytes += length;
1344 dev_kfree_skb_irq(skb);
1349 printk("bad lp event rc %x length %d remote address %x raw address %x\n", (int)returnCode, length, remoteList.addr.token1.token2, address);
1350 dev_kfree_skb_irq(skb);
1360 unsigned long flags;
1361 spin_lock_irqsave(&connection->mAckGate, flags);
1363 if (connection->mNumAcks < VethMaxFramesMsgsAcked)
1365 connection->mEventData.mAckData.mToken[connection->mNumAcks] = event->mBaseEvent.xCorrelationToken;
1366 ++connection->mNumAcks;
1368 if (connection->mNumAcks == connection->mRemoteCap.mUnionData.mFields.mThreshold)
1370 HvLpEvent_Rc rc = HvCallEvent_signalLpEventFast(connection->mRemoteLp,
1371 HvLpEvent_Type_VirtualLan,
1372 VethEventTypeFramesAck,
1373 HvLpEvent_AckInd_NoAck,
1374 HvLpEvent_AckType_ImmediateAck,
1375 connection->mSourceInst,
1376 connection->mTargetInst,
1378 connection->mEventData.mFpData.mData1,
1379 connection->mEventData.mFpData.mData2,
1380 connection->mEventData.mFpData.mData3,
1381 connection->mEventData.mFpData.mData4,
1382 connection->mEventData.mFpData.mData5);
1384 if (rc != HvLpEvent_Rc_Good)
1386 veth_error_printk("Bad lp event return code(%x) acking frames from lpar %d\n", (int)rc, connection->mRemoteLp);
1389 connection->mNumAcks = 0;
1391 memset(&connection->mEventData, 0xFF, sizeof(connection->mEventData));
1396 spin_unlock_irqrestore(&connection->mAckGate, flags);
1399 #undef number_of_pages
1402 static void veth_timedAck(unsigned long connectionPtr)
1404 unsigned long flags;
1406 struct VethLpConnection *connection = (struct VethLpConnection *) connectionPtr;
1407 /* Ack all the events */
1408 spin_lock_irqsave(&connection->mAckGate, flags);
1410 if (connection->mNumAcks > 0)
1412 rc = HvCallEvent_signalLpEventFast(connection->mRemoteLp,
1413 HvLpEvent_Type_VirtualLan,
1414 VethEventTypeFramesAck,
1415 HvLpEvent_AckInd_NoAck,
1416 HvLpEvent_AckType_ImmediateAck,
1417 connection->mSourceInst,
1418 connection->mTargetInst,
1420 connection->mEventData.mFpData.mData1,
1421 connection->mEventData.mFpData.mData2,
1422 connection->mEventData.mFpData.mData3,
1423 connection->mEventData.mFpData.mData4,
1424 connection->mEventData.mFpData.mData5);
1426 if (rc != HvLpEvent_Rc_Good)
1428 veth_error_printk("Bad lp event return code(%x) acking frames from lpar %d!\n", (int)rc, connection->mRemoteLp);
1431 connection->mNumAcks = 0;
1433 memset(&connection->mEventData, 0xFF, sizeof(connection->mEventData));
1436 spin_unlock_irqrestore(&connection->mAckGate, flags);
1438 /* Reschedule the timer */
1439 connection->mAckTimer.expires = jiffies + connection->mTimeout;
1440 add_timer(&connection->mAckTimer);
1443 static int veth_multicast_wanted(struct VethPort *port, u64 thatAddr)
1447 unsigned long flags;
1449 if ((*((char *)&thatAddr) & 0x01) != 1)
1452 read_lock_irqsave(&port->mMcastGate, flags);
1453 if (port->mAllMcast)
1456 for (i=0; i < port->mNumAddrs; ++i)
1458 u64 thisAddr = port->mMcasts[i];
1460 if (thisAddr == thatAddr)
1466 read_unlock_irqrestore(&port->mMcastGate, flags);
1471 static void veth_sendMonitor(struct VethLpConnection *connection)
1473 HvLpEvent_Rc returnCode = HvCallEvent_signalLpEventFast(connection->mRemoteLp,
1474 HvLpEvent_Type_VirtualLan,
1475 VethEventTypeMonitor,
1476 HvLpEvent_AckInd_DoAck,
1477 HvLpEvent_AckType_DeferredAck,
1478 connection->mSourceInst,
1479 connection->mTargetInst,
1482 if (returnCode == HvLpEvent_Rc_Good)
1484 connection->mConnectionStatus.mSentMonitor = 1;
1485 connection->mConnectionStatus.mFailed = 0;
1487 /* Start the ACK timer */
1488 init_timer(&connection->mAckTimer);
1489 connection->mAckTimer.function = veth_timedAck;
1490 connection->mAckTimer.data = (unsigned long) connection;
1491 connection->mAckTimer.expires = jiffies + connection->mTimeout;
1492 add_timer(&connection->mAckTimer);
1497 veth_error_printk("Monitor send to lpar %d failed with rc %x\n", connection->mRemoteLp, (int)returnCode);
1498 veth_failMe(connection);
1502 static void veth_finishCapBh(struct VethLpConnection *connection)
1504 unsigned long flags;
1505 spin_lock_irqsave(&connection->mStatusGate, flags);
1506 veth_finishCapBhLocked(connection);
1507 spin_unlock_irqrestore(&connection->mStatusGate, flags);
1510 static void veth_finishCapBhLocked(struct VethLpConnection *connection)
1512 struct VethLpEvent *event = &connection->mCapEvent;
1513 struct VethCapData *remoteCap = &(connection->mRemoteCap);
1514 int numAcks = (remoteCap->mUnionData.mFields.mNumberBuffers / remoteCap->mUnionData.mFields.mThreshold) + 1;
1516 /* Convert timer to jiffies */
1517 if (connection->mMyCap.mUnionData.mFields.mTimer)
1518 connection->mTimeout = remoteCap->mUnionData.mFields.mTimer * HZ / 1000000;
1520 connection->mTimeout = VethAckTimeoutUsec * HZ / 1000000;
1522 if (connection->mNumberLpAcksAlloced >= numAcks)
1524 HvLpEvent_Rc returnCode = HvCallEvent_ackLpEvent((struct HvLpEvent *)event);
1526 if (returnCode == HvLpEvent_Rc_Good)
1528 connection->mConnectionStatus.mGotCapAcked = 1;
1530 if (connection->mConnectionStatus.mSentCap != 1)
1532 connection->mTargetInst = HvCallEvent_getTargetLpInstanceId(connection->mRemoteLp, HvLpEvent_Type_VirtualLan);
1534 veth_sendCap(connection);
1536 else if (connection->mConnectionStatus.mCapAcked == 1)
1538 if (connection->mConnectionStatus.mSentMonitor != 1)
1539 veth_sendMonitor(connection);
1544 veth_error_printk("Failed to ack remote cap for lpar %d with rc %x\n", connection->mRemoteLp, (int)returnCode);
1545 veth_failMe(connection);
1550 veth_error_printk("Couldn't allocate all the frames ack events for lpar %d\n", connection->mRemoteLp);
1551 event->mBaseEvent.xRc = HvLpEvent_Rc_BufferNotAvailable;
1552 HvCallEvent_ackLpEvent((struct HvLpEvent *)event);
1556 int proc_veth_dump_connection
1557 (char *page, char **start, off_t off, int count, int *eof, void *data)
1560 long whichConnection = (long) data;
1562 struct VethLpConnection *connection = NULL;
1564 if ((whichConnection < 0) || (whichConnection > HvMaxArchitectedLps) || (mFabricMgr == NULL))
1566 veth_error_printk("Got bad data from /proc file system\n");
1567 len = sprintf(page, "ERROR\n");
1571 int thereWasStuffBefore = 0;
1572 connection = &(mFabricMgr->mConnection[whichConnection]);
1574 out += sprintf(out, "Remote Lp:\t%d\n", connection->mRemoteLp);
1575 out += sprintf(out, "Source Inst:\t%04X\n", connection->mSourceInst);
1576 out += sprintf(out, "Target Inst:\t%04X\n", connection->mTargetInst);
1577 out += sprintf(out, "Num Msgs:\t%d\n", connection->mNumMsgs);
1578 out += sprintf(out, "Num Lp Acks:\t%d\n", connection->mNumberLpAcksAlloced);
1579 out += sprintf(out, "Num Acks:\t%d\n", connection->mNumAcks);
1581 if (connection->mConnectionStatus.mOpen)
1583 out += sprintf(out, "<Open");
1584 thereWasStuffBefore = 1;
1587 if (connection->mConnectionStatus.mCapMonAlloced)
1589 if (thereWasStuffBefore)
1590 out += sprintf(out,"/");
1592 out += sprintf(out,"<");
1593 out += sprintf(out, "CapMonAlloced");
1594 thereWasStuffBefore = 1;
1597 if (connection->mConnectionStatus.mBaseMsgsAlloced)
1599 if (thereWasStuffBefore)
1600 out += sprintf(out,"/");
1602 out += sprintf(out,"<");
1603 out += sprintf(out, "BaseMsgsAlloced");
1604 thereWasStuffBefore = 1;
1607 if (connection->mConnectionStatus.mSentCap)
1609 if (thereWasStuffBefore)
1610 out += sprintf(out,"/");
1612 out += sprintf(out,"<");
1613 out += sprintf(out, "SentCap");
1614 thereWasStuffBefore = 1;
1617 if (connection->mConnectionStatus.mCapAcked)
1619 if (thereWasStuffBefore)
1620 out += sprintf(out,"/");
1622 out += sprintf(out,"<");
1623 out += sprintf(out, "CapAcked");
1624 thereWasStuffBefore = 1;
1627 if (connection->mConnectionStatus.mGotCap)
1629 if (thereWasStuffBefore)
1630 out += sprintf(out,"/");
1632 out += sprintf(out,"<");
1633 out += sprintf(out, "GotCap");
1634 thereWasStuffBefore = 1;
1637 if (connection->mConnectionStatus.mGotCapAcked)
1639 if (thereWasStuffBefore)
1640 out += sprintf(out,"/");
1642 out += sprintf(out,"<");
1643 out += sprintf(out, "GotCapAcked");
1644 thereWasStuffBefore = 1;
1647 if (connection->mConnectionStatus.mSentMonitor)
1649 if (thereWasStuffBefore)
1650 out += sprintf(out,"/");
1652 out += sprintf(out,"<");
1653 out += sprintf(out, "SentMonitor");
1654 thereWasStuffBefore = 1;
1657 if (connection->mConnectionStatus.mPopulatedRings)
1659 if (thereWasStuffBefore)
1660 out += sprintf(out,"/");
1662 out += sprintf(out,"<");
1663 out += sprintf(out, "PopulatedRings");
1664 thereWasStuffBefore = 1;
1667 if (connection->mConnectionStatus.mFailed)
1669 if (thereWasStuffBefore)
1670 out += sprintf(out,"/");
1672 out += sprintf(out,"<");
1673 out += sprintf(out, "Failed");
1674 thereWasStuffBefore = 1;
1677 if (thereWasStuffBefore)
1678 out += sprintf(out, ">");
1680 out += sprintf(out, "\n");
1682 out += sprintf(out, "Capabilities (System:<Version/Buffers/Threshold/Timeout>):\n");
1683 out += sprintf(out, "\tLocal:<");
1684 out += sprintf(out, "%d/%d/%d/%d>\n",
1685 connection->mMyCap.mUnionData.mFields.mVersion,
1686 connection->mMyCap.mUnionData.mFields.mNumberBuffers,
1687 connection->mMyCap.mUnionData.mFields.mThreshold,
1688 connection->mMyCap.mUnionData.mFields.mTimer);
1689 out += sprintf(out, "\tRemote:<");
1690 out += sprintf(out, "%d/%d/%d/%d>\n",
1691 connection->mRemoteCap.mUnionData.mFields.mVersion,
1692 connection->mRemoteCap.mUnionData.mFields.mNumberBuffers,
1693 connection->mRemoteCap.mUnionData.mFields.mThreshold,
1694 connection->mRemoteCap.mUnionData.mFields.mTimer);
1704 *start = page + off;
1708 int proc_veth_dump_port
1709 (char *page, char **start, off_t off, int count, int *eof, void *data)
1712 long whichPort = (long) data;
1714 struct VethPort *port = NULL;
1716 if ((whichPort < 0) || (whichPort > HvMaxArchitectedVirtualLans) || (mFabricMgr == NULL))
1717 len = sprintf(page, "Virtual ethernet is not configured.\n");
1723 port = mFabricMgr->mPorts[whichPort];
1727 myAddr = (u32 *)&(port->mMyAddress);
1728 myEndAddr = (u16 *)(myAddr + 1);
1729 out += sprintf(out, "Net device:\t%p\n", port->mDev);
1730 out += sprintf(out, "Net device name:\t%s\n", port->mDev->name);
1731 out += sprintf(out, "Address:\t%08X%04X\n", myAddr[0], myEndAddr[0]);
1732 out += sprintf(out, "Promiscuous:\t%d\n", port->mPromiscuous);
1733 out += sprintf(out, "All multicast:\t%d\n", port->mAllMcast);
1734 out += sprintf(out, "Number multicast:\t%d\n", port->mNumAddrs);
1736 for (i=0; i < port->mNumAddrs; ++i)
1738 u32 *multi = (u32 *)&(port->mMcasts[i]);
1739 u16 *multiEnd = (u16 *)(multi + 1);
1740 out += sprintf(out, " %08X%04X\n", multi[0], multiEnd[0]);
1745 out += sprintf(page, "veth%d is not configured.\n", (int)whichPort);
1757 *start = page + off;