2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/interrupt.h>
17 #include <linux/io-64-nonatomic-lo-hi.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
21 #include <linux/ntb.h>
22 #include <linux/pci.h>
23 #include <linux/switchtec.h>
25 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
26 MODULE_VERSION("0.1");
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Microsemi Corporation");
30 static ulong max_mw_size = SZ_2M;
31 module_param(max_mw_size, ulong, 0644);
32 MODULE_PARM_DESC(max_mw_size,
33 "Max memory window size reported to the upper layer");
35 static bool use_lut_mws;
36 module_param(use_lut_mws, bool, 0644);
37 MODULE_PARM_DESC(use_lut_mws,
38 "Enable the use of the LUT based memory windows");
40 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
47 u64 mw_sizes[MAX_MWS];
51 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
52 #define LUT_SIZE SZ_64K
54 struct switchtec_ntb {
56 struct switchtec_dev *stdev;
64 struct ntb_info_regs __iomem *mmio_ntb;
65 struct ntb_ctrl_regs __iomem *mmio_ctrl;
66 struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
67 struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
68 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
69 struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
70 struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
72 void __iomem *mmio_xlink_win;
74 struct shared_mw *self_shared;
75 struct shared_mw __iomem *peer_shared;
76 dma_addr_t self_shared_dma;
83 /* synchronize rmw access of db_mask and hw reg */
84 spinlock_t db_mask_lock;
89 int direct_mw_to_bar[MAX_DIRECT_MW];
91 int peer_nr_direct_mw;
93 int peer_direct_mw_to_bar[MAX_DIRECT_MW];
96 enum ntb_speed link_speed;
97 enum ntb_width link_width;
98 struct work_struct link_reinit_work;
101 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
103 return container_of(ntb, struct switchtec_ntb, ntb);
106 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
107 struct ntb_ctrl_regs __iomem *ctl,
108 u32 op, int wait_status)
110 static const char * const op_text[] = {
111 [NTB_CTRL_PART_OP_LOCK] = "lock",
112 [NTB_CTRL_PART_OP_CFG] = "configure",
113 [NTB_CTRL_PART_OP_RESET] = "reset",
121 case NTB_CTRL_PART_OP_LOCK:
122 status = NTB_CTRL_PART_STATUS_LOCKING;
124 case NTB_CTRL_PART_OP_CFG:
125 status = NTB_CTRL_PART_STATUS_CONFIGURING;
127 case NTB_CTRL_PART_OP_RESET:
128 status = NTB_CTRL_PART_STATUS_RESETTING;
134 iowrite32(op, &ctl->partition_op);
136 for (i = 0; i < 1000; i++) {
137 if (msleep_interruptible(50) != 0) {
138 iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
142 ps = ioread32(&ctl->partition_status) & 0xFFFF;
148 if (ps == wait_status)
152 dev_err(&sndev->stdev->dev,
153 "Timed out while performing %s (%d). (%08x)\n",
155 ioread32(&ctl->partition_status));
163 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
166 if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
169 iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
174 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
176 struct switchtec_ntb *sndev = ntb_sndev(ntb);
177 int nr_direct_mw = sndev->peer_nr_direct_mw;
178 int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
180 if (pidx != NTB_DEF_PEER_IDX)
186 return nr_direct_mw + nr_lut_mw;
189 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
191 return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
194 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
196 return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
199 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
200 int widx, resource_size_t *addr_align,
201 resource_size_t *size_align,
202 resource_size_t *size_max)
204 struct switchtec_ntb *sndev = ntb_sndev(ntb);
206 resource_size_t size;
208 if (pidx != NTB_DEF_PEER_IDX)
211 lut = widx >= sndev->peer_nr_direct_mw;
212 size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
218 *addr_align = lut ? size : SZ_4K;
221 *size_align = lut ? size : SZ_4K;
229 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
231 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
232 int bar = sndev->peer_direct_mw_to_bar[idx];
235 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
236 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
237 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
238 iowrite32(0, &ctl->bar_entry[bar].win_size);
239 iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
240 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
243 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
245 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
247 iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
250 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
251 dma_addr_t addr, resource_size_t size)
253 int xlate_pos = ilog2(size);
254 int bar = sndev->peer_direct_mw_to_bar[idx];
255 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
258 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
259 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
261 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
262 iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
263 &ctl->bar_entry[bar].win_size);
264 iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
265 iowrite64(sndev->self_partition | addr,
266 &ctl->bar_entry[bar].xlate_addr);
269 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
270 dma_addr_t addr, resource_size_t size)
272 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
274 iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
275 &ctl->lut_entry[peer_lut_index(sndev, idx)]);
278 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
279 dma_addr_t addr, resource_size_t size)
281 struct switchtec_ntb *sndev = ntb_sndev(ntb);
282 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
283 int xlate_pos = ilog2(size);
284 int nr_direct_mw = sndev->peer_nr_direct_mw;
287 if (pidx != NTB_DEF_PEER_IDX)
290 dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
291 widx, pidx, &addr, &size);
293 if (widx >= switchtec_ntb_mw_count(ntb, pidx))
299 if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
301 * In certain circumstances we can get a buffer that is
302 * not aligned to its size. (Most of the time
303 * dma_alloc_coherent ensures this). This can happen when
304 * using large buffers allocated by the CMA
305 * (see CMA_CONFIG_ALIGNMENT)
307 dev_err(&sndev->stdev->dev,
308 "ERROR: Memory window address is not aligned to it's size!\n");
312 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
313 NTB_CTRL_PART_STATUS_LOCKED);
317 if (addr == 0 || size == 0) {
318 if (widx < nr_direct_mw)
319 switchtec_ntb_mw_clr_direct(sndev, widx);
321 switchtec_ntb_mw_clr_lut(sndev, widx);
323 if (widx < nr_direct_mw)
324 switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
326 switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
329 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
330 NTB_CTRL_PART_STATUS_NORMAL);
333 dev_err(&sndev->stdev->dev,
334 "Hardware reported an error configuring mw %d: %08x\n",
335 widx, ioread32(&ctl->bar_error));
337 if (widx < nr_direct_mw)
338 switchtec_ntb_mw_clr_direct(sndev, widx);
340 switchtec_ntb_mw_clr_lut(sndev, widx);
342 switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
343 NTB_CTRL_PART_STATUS_NORMAL);
349 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
351 struct switchtec_ntb *sndev = ntb_sndev(ntb);
352 int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
354 return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
357 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
358 int idx, phys_addr_t *base,
359 resource_size_t *size)
361 int bar = sndev->direct_mw_to_bar[idx];
369 * This is the direct BAR shared with the LUTs
370 * which means the actual window will be offset
371 * by the size of all the LUT entries.
374 offset = LUT_SIZE * sndev->nr_lut_mw;
378 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
381 *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
382 if (offset && *size > offset)
385 if (*size > max_mw_size)
392 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
393 int idx, phys_addr_t *base,
394 resource_size_t *size)
396 int bar = sndev->direct_mw_to_bar[0];
399 offset = LUT_SIZE * lut_index(sndev, idx);
402 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
410 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
412 resource_size_t *size)
414 struct switchtec_ntb *sndev = ntb_sndev(ntb);
416 if (idx < sndev->nr_direct_mw)
417 return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
418 else if (idx < switchtec_ntb_peer_mw_count(ntb))
419 return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
424 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
426 enum ntb_speed *speed,
427 enum ntb_width *width)
429 struct switchtec_dev *stdev = sndev->stdev;
431 u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
432 u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
435 *speed = (linksta >> 16) & 0xF;
438 *width = (linksta >> 20) & 0x3F;
441 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
443 enum ntb_speed self_speed, peer_speed;
444 enum ntb_width self_width, peer_width;
446 if (!sndev->link_is_up) {
447 sndev->link_speed = NTB_SPEED_NONE;
448 sndev->link_width = NTB_WIDTH_NONE;
452 switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
453 &self_speed, &self_width);
454 switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
455 &peer_speed, &peer_width);
457 sndev->link_speed = min(self_speed, peer_speed);
458 sndev->link_width = min(self_width, peer_width);
461 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
463 struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
465 return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
468 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
473 if (!crosslink_is_enabled(sndev))
476 for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
477 int m = i | sndev->self_partition << 2;
479 msg_map |= m << i * 8;
482 iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
483 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
484 &sndev->mmio_peer_dbmsg->odb_mask);
492 MSG_LINK_FORCE_DOWN = 4,
495 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
497 static void link_reinit_work(struct work_struct *work)
499 struct switchtec_ntb *sndev;
501 sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
503 switchtec_ntb_reinit_peer(sndev);
506 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
507 enum switchtec_msg msg)
510 int old = sndev->link_is_up;
512 if (msg == MSG_LINK_FORCE_DOWN) {
513 schedule_work(&sndev->link_reinit_work);
515 if (sndev->link_is_up) {
516 sndev->link_is_up = 0;
517 ntb_link_event(&sndev->ntb);
518 dev_info(&sndev->stdev->dev, "ntb link forced down\n");
524 link_sta = sndev->self_shared->link_sta;
526 u64 peer = ioread64(&sndev->peer_shared->magic);
528 if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
529 link_sta = peer >> 32;
534 sndev->link_is_up = link_sta;
535 switchtec_ntb_set_link_speed(sndev);
537 if (link_sta != old) {
538 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
539 ntb_link_event(&sndev->ntb);
540 dev_info(&sndev->stdev->dev, "ntb link %s\n",
541 link_sta ? "up" : "down");
544 crosslink_init_dbmsgs(sndev);
548 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
550 struct switchtec_ntb *sndev = stdev->sndev;
552 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
555 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
556 enum ntb_speed *speed,
557 enum ntb_width *width)
559 struct switchtec_ntb *sndev = ntb_sndev(ntb);
562 *speed = sndev->link_speed;
564 *width = sndev->link_width;
566 return sndev->link_is_up;
569 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
570 enum ntb_speed max_speed,
571 enum ntb_width max_width)
573 struct switchtec_ntb *sndev = ntb_sndev(ntb);
575 dev_dbg(&sndev->stdev->dev, "enabling link\n");
577 sndev->self_shared->link_sta = 1;
578 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
580 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
585 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
587 struct switchtec_ntb *sndev = ntb_sndev(ntb);
589 dev_dbg(&sndev->stdev->dev, "disabling link\n");
591 sndev->self_shared->link_sta = 0;
592 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
594 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
599 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
601 struct switchtec_ntb *sndev = ntb_sndev(ntb);
603 return sndev->db_valid_mask;
606 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
611 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
613 struct switchtec_ntb *sndev = ntb_sndev(ntb);
615 if (db_vector < 0 || db_vector > 1)
618 return sndev->db_valid_mask;
621 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
624 struct switchtec_ntb *sndev = ntb_sndev(ntb);
626 ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
628 return ret & sndev->db_valid_mask;
631 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
633 struct switchtec_ntb *sndev = ntb_sndev(ntb);
635 iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
640 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
642 unsigned long irqflags;
643 struct switchtec_ntb *sndev = ntb_sndev(ntb);
645 if (db_bits & ~sndev->db_valid_mask)
648 spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
650 sndev->db_mask |= db_bits << sndev->db_shift;
651 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
653 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
658 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
660 unsigned long irqflags;
661 struct switchtec_ntb *sndev = ntb_sndev(ntb);
663 if (db_bits & ~sndev->db_valid_mask)
666 spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
668 sndev->db_mask &= ~(db_bits << sndev->db_shift);
669 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
671 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
676 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
678 struct switchtec_ntb *sndev = ntb_sndev(ntb);
680 return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
683 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
684 phys_addr_t *db_addr,
685 resource_size_t *db_size)
687 struct switchtec_ntb *sndev = ntb_sndev(ntb);
688 unsigned long offset;
690 offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
691 (unsigned long)sndev->stdev->mmio;
693 offset += sndev->db_shift / 8;
696 *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
698 *db_size = sizeof(u32);
703 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
705 struct switchtec_ntb *sndev = ntb_sndev(ntb);
707 iowrite64(db_bits << sndev->db_peer_shift,
708 &sndev->mmio_peer_dbmsg->odb);
713 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
715 struct switchtec_ntb *sndev = ntb_sndev(ntb);
717 return ARRAY_SIZE(sndev->self_shared->spad);
720 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
722 struct switchtec_ntb *sndev = ntb_sndev(ntb);
724 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
727 if (!sndev->self_shared)
730 return sndev->self_shared->spad[idx];
733 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
735 struct switchtec_ntb *sndev = ntb_sndev(ntb);
737 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
740 if (!sndev->self_shared)
743 sndev->self_shared->spad[idx] = val;
748 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
751 struct switchtec_ntb *sndev = ntb_sndev(ntb);
753 if (pidx != NTB_DEF_PEER_IDX)
756 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
759 if (!sndev->peer_shared)
762 return ioread32(&sndev->peer_shared->spad[sidx]);
765 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
768 struct switchtec_ntb *sndev = ntb_sndev(ntb);
770 if (pidx != NTB_DEF_PEER_IDX)
773 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
776 if (!sndev->peer_shared)
779 iowrite32(val, &sndev->peer_shared->spad[sidx]);
784 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
785 int sidx, phys_addr_t *spad_addr)
787 struct switchtec_ntb *sndev = ntb_sndev(ntb);
788 unsigned long offset;
790 if (pidx != NTB_DEF_PEER_IDX)
793 offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
794 (unsigned long)sndev->stdev->mmio;
797 *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
802 static const struct ntb_dev_ops switchtec_ntb_ops = {
803 .mw_count = switchtec_ntb_mw_count,
804 .mw_get_align = switchtec_ntb_mw_get_align,
805 .mw_set_trans = switchtec_ntb_mw_set_trans,
806 .peer_mw_count = switchtec_ntb_peer_mw_count,
807 .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
808 .link_is_up = switchtec_ntb_link_is_up,
809 .link_enable = switchtec_ntb_link_enable,
810 .link_disable = switchtec_ntb_link_disable,
811 .db_valid_mask = switchtec_ntb_db_valid_mask,
812 .db_vector_count = switchtec_ntb_db_vector_count,
813 .db_vector_mask = switchtec_ntb_db_vector_mask,
814 .db_read = switchtec_ntb_db_read,
815 .db_clear = switchtec_ntb_db_clear,
816 .db_set_mask = switchtec_ntb_db_set_mask,
817 .db_clear_mask = switchtec_ntb_db_clear_mask,
818 .db_read_mask = switchtec_ntb_db_read_mask,
819 .peer_db_addr = switchtec_ntb_peer_db_addr,
820 .peer_db_set = switchtec_ntb_peer_db_set,
821 .spad_count = switchtec_ntb_spad_count,
822 .spad_read = switchtec_ntb_spad_read,
823 .spad_write = switchtec_ntb_spad_write,
824 .peer_spad_read = switchtec_ntb_peer_spad_read,
825 .peer_spad_write = switchtec_ntb_peer_spad_write,
826 .peer_spad_addr = switchtec_ntb_peer_spad_addr,
829 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
836 sndev->ntb.pdev = sndev->stdev->pdev;
837 sndev->ntb.topo = NTB_TOPO_SWITCH;
838 sndev->ntb.ops = &switchtec_ntb_ops;
840 INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
842 sndev->self_partition = sndev->stdev->partition;
844 sndev->mmio_ntb = sndev->stdev->mmio_ntb;
846 self = sndev->self_partition;
847 tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
849 tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
851 part_map = ioread64(&sndev->mmio_ntb->ep_map);
852 part_map &= ~(1 << sndev->self_partition);
854 if (!ffs(tpart_vec)) {
855 if (sndev->stdev->partition_count != 2) {
856 dev_err(&sndev->stdev->dev,
857 "ntb target partition not defined\n");
863 dev_err(&sndev->stdev->dev,
864 "peer partition is not NT partition\n");
868 sndev->peer_partition = bit - 1;
870 if (ffs(tpart_vec) != fls(tpart_vec)) {
871 dev_err(&sndev->stdev->dev,
872 "ntb driver only supports 1 pair of 1-1 ntb mapping\n");
876 sndev->peer_partition = ffs(tpart_vec) - 1;
877 if (!(part_map & (1 << sndev->peer_partition))) {
878 dev_err(&sndev->stdev->dev,
879 "ntb target partition is not NT partition\n");
884 dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
885 sndev->self_partition, sndev->stdev->partition_count);
887 sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
888 SWITCHTEC_NTB_REG_CTRL_OFFSET;
889 sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
890 SWITCHTEC_NTB_REG_DBMSG_OFFSET;
892 sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
893 sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
894 sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
895 sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
900 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
901 struct ntb_ctrl_regs __iomem *ctl,
902 int lut_idx, int partition, u64 addr)
904 int peer_bar = sndev->peer_direct_mw_to_bar[0];
908 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
909 NTB_CTRL_PART_STATUS_LOCKED);
913 ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
915 ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
916 ctl_val |= ilog2(LUT_SIZE) << 8;
917 ctl_val |= (sndev->nr_lut_mw - 1) << 14;
918 iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
920 iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
921 &ctl->lut_entry[lut_idx]);
923 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
924 NTB_CTRL_PART_STATUS_NORMAL);
926 u32 bar_error, lut_error;
928 bar_error = ioread32(&ctl->bar_error);
929 lut_error = ioread32(&ctl->lut_error);
930 dev_err(&sndev->stdev->dev,
931 "Error setting up reserved lut window: %08x / %08x\n",
932 bar_error, lut_error);
939 static int config_req_id_table(struct switchtec_ntb *sndev,
940 struct ntb_ctrl_regs __iomem *mmio_ctrl,
941 int *req_ids, int count)
947 if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
948 dev_err(&sndev->stdev->dev,
949 "Not enough requester IDs available.\n");
953 rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
954 NTB_CTRL_PART_OP_LOCK,
955 NTB_CTRL_PART_STATUS_LOCKED);
959 iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
960 &mmio_ctrl->partition_ctrl);
962 for (i = 0; i < count; i++) {
963 iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
964 &mmio_ctrl->req_id_table[i]);
966 proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
967 dev_dbg(&sndev->stdev->dev,
968 "Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
969 req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
970 req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
971 (proxy_id >> 1) & 0x7);
974 rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
975 NTB_CTRL_PART_OP_CFG,
976 NTB_CTRL_PART_STATUS_NORMAL);
979 error = ioread32(&mmio_ctrl->req_id_error);
980 dev_err(&sndev->stdev->dev,
981 "Error setting up the requester ID table: %08x\n",
988 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
989 u64 *mw_addrs, int mw_count)
992 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
999 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
1000 NTB_CTRL_PART_STATUS_LOCKED);
1004 for (i = 0; i < sndev->nr_lut_mw; i++) {
1005 if (i == ntb_lut_idx)
1008 addr = mw_addrs[0] + LUT_SIZE * i;
1010 iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
1012 &ctl->lut_entry[i]);
1015 sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
1017 for (i = 0; i < sndev->nr_direct_mw; i++) {
1018 bar = sndev->direct_mw_to_bar[i];
1019 offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1020 addr = mw_addrs[i] + offset;
1021 size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1022 xlate_pos = ilog2(size);
1024 if (offset && size > offset)
1027 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1028 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1030 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1031 iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
1032 &ctl->bar_entry[bar].win_size);
1033 iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
1034 iowrite64(sndev->peer_partition | addr,
1035 &ctl->bar_entry[bar].xlate_addr);
1038 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1039 NTB_CTRL_PART_STATUS_NORMAL);
1041 u32 bar_error, lut_error;
1043 bar_error = ioread32(&ctl->bar_error);
1044 lut_error = ioread32(&ctl->lut_error);
1045 dev_err(&sndev->stdev->dev,
1046 "Error setting up cross link windows: %08x / %08x\n",
1047 bar_error, lut_error);
1054 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1055 struct ntb_ctrl_regs __iomem *mmio_ctrl)
1061 for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1062 proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1064 if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1067 req_ids[i] = ((proxy_id >> 1) & 0xFF);
1070 return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1074 * In crosslink configuration there is a virtual partition in the
1075 * middle of the two switches. The BARs in this partition have to be
1076 * enumerated and assigned addresses.
1078 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1081 struct part_cfg_regs __iomem *part_cfg =
1082 &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1083 u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1084 struct pff_csr_regs __iomem *mmio_pff =
1085 &sndev->stdev->mmio_pff_csr[pff];
1086 const u64 bar_space = 0x1000000000LL;
1091 iowrite16(0x6, &mmio_pff->pcicmd);
1093 for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1094 iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1095 bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1098 dev_dbg(&sndev->stdev->dev,
1099 "Crosslink BAR%d addr: %llx\n",
1102 if (bar_addr != bar_space * i)
1105 bar_addrs[bar_cnt++] = bar_addr;
1111 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1114 int bar = sndev->direct_mw_to_bar[0];
1115 const int ntb_lut_idx = 1;
1121 if (!crosslink_is_enabled(sndev))
1124 dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1125 sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1127 bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1128 if (bar_cnt < sndev->nr_direct_mw + 1) {
1129 dev_err(&sndev->stdev->dev,
1130 "Error enumerating crosslink partition\n");
1134 addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1135 SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1136 sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1138 offset = addr & (LUT_SIZE - 1);
1141 rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1142 sndev->peer_partition, addr);
1146 rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1151 rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1155 sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1156 LUT_SIZE, LUT_SIZE);
1157 if (!sndev->mmio_xlink_win) {
1162 sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1163 sndev->nr_rsvd_luts++;
1165 crosslink_init_dbmsgs(sndev);
1170 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1172 if (sndev->mmio_xlink_win)
1173 pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1176 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1181 for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1182 u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1184 if (r & NTB_CTRL_BAR_VALID)
1191 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1193 sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1194 sndev->mmio_self_ctrl);
1196 sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1197 sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1199 dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1200 sndev->nr_direct_mw, sndev->nr_lut_mw);
1202 sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1203 sndev->mmio_peer_ctrl);
1205 sndev->peer_nr_lut_mw =
1206 ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1207 sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1209 dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1210 sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1215 * There are 64 doorbells in the switch hardware but this is
1216 * shared among all partitions. So we must split them in half
1217 * (32 for each partition). However, the message interrupts are
1218 * also shared with the top 4 doorbells so we just limit this to
1219 * 28 doorbells per partition.
1221 * In crosslink mode, each side has it's own dbmsg register so
1222 * they can each use all 60 of the available doorbells.
1224 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1226 sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1228 if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1229 sndev->db_shift = 0;
1230 sndev->db_peer_shift = 0;
1231 sndev->db_valid_mask = sndev->db_mask;
1232 } else if (sndev->self_partition < sndev->peer_partition) {
1233 sndev->db_shift = 0;
1234 sndev->db_peer_shift = 32;
1235 sndev->db_valid_mask = 0x0FFFFFFF;
1237 sndev->db_shift = 32;
1238 sndev->db_peer_shift = 0;
1239 sndev->db_valid_mask = 0x0FFFFFFF;
1242 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1243 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1244 &sndev->mmio_peer_dbmsg->odb_mask);
1246 dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1247 sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1250 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1255 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1256 int m = i | sndev->peer_partition << 2;
1258 msg_map |= m << i * 8;
1261 iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1263 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1264 iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1265 &sndev->mmio_self_dbmsg->imsg[i]);
1269 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1274 * Root Complex Requester ID (which is 0:00.0)
1279 * Host Bridge Requester ID (as read from the mmap address)
1281 req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1283 return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1284 ARRAY_SIZE(req_ids));
1287 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1291 memset(sndev->self_shared, 0, LUT_SIZE);
1292 sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1293 sndev->self_shared->partition_id = sndev->stdev->partition;
1295 for (i = 0; i < sndev->nr_direct_mw; i++) {
1296 int bar = sndev->direct_mw_to_bar[i];
1297 resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1300 sz = min_t(resource_size_t, sz,
1301 LUT_SIZE * sndev->nr_lut_mw);
1303 sndev->self_shared->mw_sizes[i] = sz;
1306 for (i = 0; i < sndev->nr_lut_mw; i++) {
1307 int idx = sndev->nr_direct_mw + i;
1309 sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1313 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1315 int self_bar = sndev->direct_mw_to_bar[0];
1318 sndev->nr_rsvd_luts++;
1319 sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1321 &sndev->self_shared_dma,
1323 if (!sndev->self_shared) {
1324 dev_err(&sndev->stdev->dev,
1325 "unable to allocate memory for shared mw\n");
1329 switchtec_ntb_init_shared(sndev);
1331 rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1332 sndev->self_partition,
1333 sndev->self_shared_dma);
1335 goto unalloc_and_exit;
1337 sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1338 if (!sndev->peer_shared) {
1340 goto unalloc_and_exit;
1343 dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1347 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1348 sndev->self_shared, sndev->self_shared_dma);
1353 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1355 if (sndev->peer_shared)
1356 pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1358 if (sndev->self_shared)
1359 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1361 sndev->self_shared_dma);
1362 sndev->nr_rsvd_luts--;
1365 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1367 struct switchtec_ntb *sndev = dev;
1369 dev_dbg(&sndev->stdev->dev, "doorbell\n");
1371 ntb_db_event(&sndev->ntb, 0);
1376 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1379 struct switchtec_ntb *sndev = dev;
1381 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1382 u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1384 if (msg & NTB_DBMSG_IMSG_STATUS) {
1385 dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1387 iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1389 if (i == LINK_MESSAGE)
1390 switchtec_ntb_check_link(sndev, msg);
1397 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1401 int doorbell_irq = 0;
1402 int message_irq = 0;
1404 int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1406 event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1408 while (doorbell_irq == event_irq)
1410 while (message_irq == doorbell_irq ||
1411 message_irq == event_irq)
1414 dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1415 event_irq, doorbell_irq, message_irq);
1417 for (i = 0; i < idb_vecs - 4; i++)
1418 iowrite8(doorbell_irq,
1419 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1421 for (; i < idb_vecs; i++)
1422 iowrite8(message_irq,
1423 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1425 sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1426 sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1428 rc = request_irq(sndev->doorbell_irq,
1429 switchtec_ntb_doorbell_isr, 0,
1430 "switchtec_ntb_doorbell", sndev);
1434 rc = request_irq(sndev->message_irq,
1435 switchtec_ntb_message_isr, 0,
1436 "switchtec_ntb_message", sndev);
1438 free_irq(sndev->doorbell_irq, sndev);
1445 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1447 free_irq(sndev->doorbell_irq, sndev);
1448 free_irq(sndev->message_irq, sndev);
1451 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
1453 dev_info(&sndev->stdev->dev, "peer reinitialized\n");
1454 switchtec_ntb_deinit_shared_mw(sndev);
1455 switchtec_ntb_init_mw(sndev);
1456 return switchtec_ntb_init_shared_mw(sndev);
1459 static int switchtec_ntb_add(struct device *dev,
1460 struct class_interface *class_intf)
1462 struct switchtec_dev *stdev = to_stdev(dev);
1463 struct switchtec_ntb *sndev;
1466 stdev->sndev = NULL;
1468 if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
1471 sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1475 sndev->stdev = stdev;
1476 rc = switchtec_ntb_init_sndev(sndev);
1480 switchtec_ntb_init_mw(sndev);
1482 rc = switchtec_ntb_init_req_id_table(sndev);
1486 rc = switchtec_ntb_init_crosslink(sndev);
1490 switchtec_ntb_init_db(sndev);
1491 switchtec_ntb_init_msgs(sndev);
1493 rc = switchtec_ntb_init_shared_mw(sndev);
1495 goto deinit_crosslink;
1497 rc = switchtec_ntb_init_db_msg_irq(sndev);
1499 goto deinit_shared_and_exit;
1502 * If this host crashed, the other host may think the link is
1503 * still up. Tell them to force it down (it will go back up
1504 * once we register the ntb device).
1506 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
1508 rc = ntb_register_device(&sndev->ntb);
1510 goto deinit_and_exit;
1512 stdev->sndev = sndev;
1513 stdev->link_notifier = switchtec_ntb_link_notification;
1514 dev_info(dev, "NTB device registered\n");
1519 switchtec_ntb_deinit_db_msg_irq(sndev);
1520 deinit_shared_and_exit:
1521 switchtec_ntb_deinit_shared_mw(sndev);
1523 switchtec_ntb_deinit_crosslink(sndev);
1526 dev_err(dev, "failed to register ntb device: %d\n", rc);
1530 static void switchtec_ntb_remove(struct device *dev,
1531 struct class_interface *class_intf)
1533 struct switchtec_dev *stdev = to_stdev(dev);
1534 struct switchtec_ntb *sndev = stdev->sndev;
1539 stdev->link_notifier = NULL;
1540 stdev->sndev = NULL;
1541 ntb_unregister_device(&sndev->ntb);
1542 switchtec_ntb_deinit_db_msg_irq(sndev);
1543 switchtec_ntb_deinit_shared_mw(sndev);
1544 switchtec_ntb_deinit_crosslink(sndev);
1546 dev_info(dev, "ntb device unregistered\n");
1549 static struct class_interface switchtec_interface = {
1550 .add_dev = switchtec_ntb_add,
1551 .remove_dev = switchtec_ntb_remove,
1554 static int __init switchtec_ntb_init(void)
1556 switchtec_interface.class = switchtec_class;
1557 return class_interface_register(&switchtec_interface);
1559 module_init(switchtec_ntb_init);
1561 static void __exit switchtec_ntb_exit(void)
1563 class_interface_unregister(&switchtec_interface);
1565 module_exit(switchtec_ntb_exit);