2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
35 * Things not implemented:
36 * . Async Stream Packets
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
46 * Adam J Richter <adam@yggdrasil.com>
47 * . Use of pci_class to find device
49 * Andreas Tobler <toa@pop.agri.ch>
50 * . Updated proc_fs calls
52 * Emilie Chung <emilie.chung@axis.com>
53 * . Tip on Async Request Filter
55 * Pascal Drolet <pascal.drolet@informission.ca>
56 * . Various tips for optimization and functionnalities
58 * Robert Ficklin <rficklin@westengineering.com>
59 * . Loop in irq_handler
61 * James Goodwin <jamesg@Filanet.com>
62 * . Various tips on initialization, self-id reception, etc.
64 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
65 * . Apple PowerBook detection
67 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
68 * . Reset the board properly before leaving + misc cleanups
70 * Leon van Stuivenberg <leonvs@iae.nl>
73 * Ben Collins <bcollins@debian.org>
74 * . Working big-endian support
75 * . Updated to 2.4.x module scheme (PCI aswell)
76 * . Removed procfs support since it trashes random mem
77 * . Config ROM generation
80 #include <linux/config.h>
81 #include <linux/kernel.h>
82 #include <linux/list.h>
83 #include <linux/slab.h>
84 #include <linux/interrupt.h>
85 #include <linux/wait.h>
86 #include <linux/errno.h>
87 #include <linux/module.h>
88 #include <linux/pci.h>
90 #include <linux/poll.h>
91 #include <asm/byteorder.h>
92 #include <asm/atomic.h>
93 #include <asm/uaccess.h>
94 #include <linux/delay.h>
95 #include <linux/spinlock.h>
97 #include <asm/pgtable.h>
99 #include <linux/sched.h>
100 #include <linux/types.h>
101 #include <linux/wrapper.h>
102 #include <linux/vmalloc.h>
103 #include <linux/init.h>
105 #ifdef CONFIG_ALL_PPC
106 #include <asm/machdep.h>
107 #include <asm/pmac_feature.h>
108 #include <asm/prom.h>
109 #include <asm/pci-bridge.h>
112 #include "ieee1394.h"
113 #include "ieee1394_types.h"
115 #include "ieee1394_core.h"
116 #include "highlevel.h"
117 #include "ohci1394.h"
119 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
120 #define OHCI1394_DEBUG
127 #ifdef OHCI1394_DEBUG
128 #define DBGMSG(card, fmt, args...) \
129 printk(KERN_INFO "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
131 #define DBGMSG(card, fmt, args...)
134 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
135 #define OHCI_DMA_ALLOC(fmt, args...) \
136 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
137 ++global_outstanding_dmas, ## args)
138 #define OHCI_DMA_FREE(fmt, args...) \
139 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
140 --global_outstanding_dmas, ## args)
141 u32 global_outstanding_dmas = 0;
143 #define OHCI_DMA_ALLOC(fmt, args...)
144 #define OHCI_DMA_FREE(fmt, args...)
147 /* print general (card independent) information */
148 #define PRINT_G(level, fmt, args...) \
149 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
151 /* print card specific information */
152 #define PRINT(level, card, fmt, args...) \
153 printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
155 static char version[] __devinitdata =
156 "$Rev: 693 $ Ben Collins <bcollins@debian.org>";
158 /* Module Parameters */
159 MODULE_PARM(attempt_root,"i");
160 MODULE_PARM_DESC(attempt_root, "Attempt to make the host root (default = 0).");
161 static int attempt_root = 0;
163 MODULE_PARM(phys_dma,"i");
164 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
165 static int phys_dma = 1;
167 static void dma_trm_tasklet(unsigned long data);
168 static void dma_trm_reset(struct dma_trm_ctx *d);
170 static void ohci1394_pci_remove(struct pci_dev *pdev);
172 #ifndef __LITTLE_ENDIAN
173 /* Swap a series of quads inplace. */
174 static __inline__ void block_swab32(quadlet_t *data, size_t size) {
176 data[size] = swab32(data[size]);
179 static unsigned hdr_sizes[] =
181 3, /* TCODE_WRITEQ */
182 4, /* TCODE_WRITEB */
183 3, /* TCODE_WRITE_RESPONSE */
187 3, /* TCODE_READQ_RESPONSE */
188 4, /* TCODE_READB_RESPONSE */
189 1, /* TCODE_CYCLE_START (???) */
190 4, /* TCODE_LOCK_REQUEST */
191 2, /* TCODE_ISO_DATA */
192 4, /* TCODE_LOCK_RESPONSE */
196 static inline void packet_swab(quadlet_t *data, int tcode, int len)
198 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
200 block_swab32(data, hdr_sizes[tcode]);
203 /* Don't waste cycles on same sex byte swaps */
204 #define packet_swab(w,x,y)
205 #define block_swab32(x,y)
206 #endif /* !LITTLE_ENDIAN */
208 /***********************************
209 * IEEE-1394 functionality section *
210 ***********************************/
212 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
218 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
220 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
222 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
223 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
229 r = reg_read(ohci, OHCI1394_PhyControl);
231 if (i >= OHCI_LOOP_COUNT)
232 PRINT (KERN_ERR, ohci->id, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
233 r, r & 0x80000000, i);
235 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
237 return (r & 0x00ff0000) >> 16;
240 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
246 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
248 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
250 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
251 r = reg_read(ohci, OHCI1394_PhyControl);
252 if (!(r & 0x00004000))
258 if (i == OHCI_LOOP_COUNT)
259 PRINT (KERN_ERR, ohci->id, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
260 r, r & 0x00004000, i);
262 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
267 /* Or's our value into the current value */
268 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
272 old = get_phy_reg (ohci, addr);
274 set_phy_reg (ohci, addr, old);
279 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
280 int phyid, int isroot)
282 quadlet_t *q = ohci->selfid_buf_cpu;
283 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
287 /* Check status of self-id reception */
289 if (ohci->selfid_swap)
290 q0 = le32_to_cpu(q[0]);
294 if ((self_id_count & 0x80000000) ||
295 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
296 PRINT(KERN_ERR, ohci->id,
297 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
298 self_id_count, q0, ohci->self_id_errors);
300 /* Tip by James Goodwin <jamesg@Filanet.com>:
301 * We had an error, generate another bus reset in response. */
302 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
303 set_phy_reg_mask (ohci, 1, 0x40);
304 ohci->self_id_errors++;
306 PRINT(KERN_ERR, ohci->id,
307 "Too many errors on SelfID error reception, giving up!");
312 /* SelfID Ok, reset error counter. */
313 ohci->self_id_errors = 0;
315 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
319 if (ohci->selfid_swap) {
320 q0 = le32_to_cpu(q[0]);
321 q1 = le32_to_cpu(q[1]);
328 DBGMSG (ohci->id, "SelfID packet 0x%x received", q0);
329 hpsb_selfid_received(host, cpu_to_be32(q0));
330 if (((q0 & 0x3f000000) >> 24) == phyid)
331 DBGMSG (ohci->id, "SelfID for this node is 0x%08x", q0);
333 PRINT(KERN_ERR, ohci->id,
334 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
340 DBGMSG(ohci->id, "SelfID complete");
342 hpsb_selfid_complete(host, phyid, isroot);
347 static void ohci_soft_reset(struct ti_ohci *ohci) {
350 reg_write(ohci, OHCI1394_HCControlSet, 0x00010000);
352 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
353 if (reg_read(ohci, OHCI1394_HCControlSet) & 0x00010000)
357 DBGMSG (ohci->id, "Soft reset finished");
360 static int run_context(struct ti_ohci *ohci, int reg, char *msg)
364 /* check that the node id is valid */
365 nodeId = reg_read(ohci, OHCI1394_NodeID);
366 if (!(nodeId&0x80000000)) {
367 PRINT(KERN_ERR, ohci->id,
368 "Running dma failed because Node ID is not valid");
372 /* check that the node number != 63 */
373 if ((nodeId&0x3f)==63) {
374 PRINT(KERN_ERR, ohci->id,
375 "Running dma failed because Node ID == 63");
379 /* Run the dma context */
380 reg_write(ohci, reg, 0x8000);
382 if (msg) PRINT(KERN_DEBUG, ohci->id, "%s", msg);
387 /* Generate the dma receive prgs and start the context */
388 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
390 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
393 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
395 for (i=0; i<d->num_desc; i++) {
398 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
402 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
404 /* End of descriptor list? */
405 if (i + 1 < d->num_desc) {
406 d->prg_cpu[i]->branchAddress =
407 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
409 d->prg_cpu[i]->branchAddress =
410 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
413 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
414 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
420 /* Tell the controller where the first AR program is */
421 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424 reg_write(ohci, d->ctrlSet, 0x00008000);
426 DBGMSG(ohci->id, "Receive DMA ctx=%d initialized", d->ctx);
429 /* Initialize the dma transmit context */
430 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
432 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
434 /* Stop the context */
435 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
439 d->free_prgs = d->num_desc;
440 d->branchAddrPtr = NULL;
441 INIT_LIST_HEAD(&d->fifo_list);
442 INIT_LIST_HEAD(&d->pending_list);
444 DBGMSG(ohci->id, "Transmit DMA ctx=%d initialized", d->ctx);
447 /* Count the number of available iso contexts */
448 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
453 reg_write(ohci, reg, 0xffffffff);
454 tmp = reg_read(ohci, reg);
456 DBGMSG(ohci->id,"Iso contexts reg: %08x implemented: %08x", reg, tmp);
458 /* Count the number of contexts */
459 for(i=0; i<32; i++) {
466 static void ohci_init_config_rom(struct ti_ohci *ohci);
468 /* Global initialization */
469 static void ohci_initialize(struct ti_ohci *ohci)
473 spin_lock_init(&ohci->phy_reg_lock);
474 spin_lock_init(&ohci->event_lock);
476 /* Put some defaults to these undefined bus options */
477 buf = reg_read(ohci, OHCI1394_BusOptions);
478 buf |= 0xE0000000; /* Enable IRMC, CMC and ISC */
479 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
480 buf &= ~0x18000000; /* Disable PMC and BMC */
481 reg_write(ohci, OHCI1394_BusOptions, buf);
483 /* Set the bus number */
484 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
486 /* Enable posted writes */
487 reg_write(ohci, OHCI1394_HCControlSet, 0x00040000);
489 /* Clear link control register */
490 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
492 /* Enable cycle timer and cycle master and set the IRM
493 * contender bit in our self ID packets. */
494 reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000);
495 set_phy_reg_mask(ohci, 4, 0xc0);
497 /* Clear interrupt registers */
498 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
499 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
501 /* Set up self-id dma buffer */
502 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
504 /* enable self-id dma */
505 reg_write(ohci, OHCI1394_LinkControlSet, 0x00000200);
507 /* Set the Config ROM mapping register */
508 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
510 /* Initialize the Config ROM */
511 ohci_init_config_rom(ohci);
513 /* Now get our max packet size */
514 ohci->max_packet_size =
515 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
517 /* Don't accept phy packets into AR request context */
518 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
520 /* Set bufferFill, isochHeader, multichannel for IR context */
521 reg_write(ohci, OHCI1394_IsoRcvContextControlSet, 0xd0000000);
523 /* Set the context match register to match on all tags */
524 reg_write(ohci, OHCI1394_IsoRcvContextMatch, 0xf0000000);
526 /* Clear the interrupt mask */
527 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
528 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
530 /* Clear the interrupt mask */
531 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
532 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
534 /* Clear the multi channel mask high and low registers */
535 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
536 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
538 /* Initialize AR dma */
539 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
540 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
542 /* Initialize AT dma */
543 initialize_dma_trm_ctx(&ohci->at_req_context);
544 initialize_dma_trm_ctx(&ohci->at_resp_context);
546 /* Initialize IR dma */
547 initialize_dma_rcv_ctx(&ohci->ir_context, 1);
549 /* Initialize IT dma */
550 initialize_dma_trm_ctx(&ohci->it_context);
552 /* Set up isoRecvIntMask to generate interrupts for context 0
553 (thanks to Michael Greger for seeing that I forgot this) */
554 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 0x00000001);
556 /* Set up isoXmitIntMask to generate interrupts for context 0 */
557 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 0x00000001);
560 * Accept AT requests from all nodes. This probably
561 * will have to be controlled from the subsystem
562 * on a per node basis.
564 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
566 /* Specify AT retries */
567 reg_write(ohci, OHCI1394_ATRetries,
568 OHCI1394_MAX_AT_REQ_RETRIES |
569 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
570 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
572 /* We don't want hardware swapping */
573 reg_write(ohci, OHCI1394_HCControlClear, 0x40000000);
575 /* Enable interrupts */
576 reg_write(ohci, OHCI1394_IntMaskSet,
577 OHCI1394_masterIntEnable |
579 OHCI1394_selfIDComplete |
582 OHCI1394_respTxComplete |
583 OHCI1394_reqTxComplete |
586 OHCI1394_cycleInconsistent);
589 reg_write(ohci, OHCI1394_HCControlSet, 0x00020000);
591 buf = reg_read(ohci, OHCI1394_Version);
592 PRINT(KERN_INFO, ohci->id, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
593 "MMIO=[%lx-%lx] Max Packet=[%d]",
594 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
595 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
596 pci_resource_start(ohci->dev, 0),
597 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
598 ohci->max_packet_size);
602 * Insert a packet in the AT DMA fifo and generate the DMA prg
603 * FIXME: rewrite the program in order to accept packets crossing
605 * check also that a single dma descriptor doesn't cross a
608 static void insert_packet(struct ti_ohci *ohci,
609 struct dma_trm_ctx *d, struct hpsb_packet *packet)
612 int idx = d->prg_ind;
614 DBGMSG(ohci->id, "Inserting packet for node %d, tlabel=%d, tcode=0x%x, speed=%d",
615 packet->node_id, packet->tlabel, packet->tcode, packet->speed_code);
617 d->prg_cpu[idx]->begin.address = 0;
618 d->prg_cpu[idx]->begin.branchAddress = 0;
620 if (d->type == DMA_CTX_ASYNC_RESP) {
622 * For response packets, we need to put a timeout value in
623 * the 16 lower bits of the status... let's try 1 sec timeout
625 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
626 d->prg_cpu[idx]->begin.status = cpu_to_le32(
627 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
628 ((cycleTimer&0x01fff000)>>12));
630 DBGMSG(ohci->id, "cycleTimer: %08x timeStamp: %08x",
631 cycleTimer, d->prg_cpu[idx]->begin.status);
633 d->prg_cpu[idx]->begin.status = 0;
635 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
637 if (packet->type == hpsb_raw) {
638 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
639 d->prg_cpu[idx]->data[1] = packet->header[0];
640 d->prg_cpu[idx]->data[2] = packet->header[1];
642 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
643 (packet->header[0] & 0xFFFF);
644 d->prg_cpu[idx]->data[1] =
645 (packet->header[1] & 0xFFFF) |
646 (packet->header[0] & 0xFFFF0000);
647 d->prg_cpu[idx]->data[2] = packet->header[2];
648 d->prg_cpu[idx]->data[3] = packet->header[3];
649 packet_swab(d->prg_cpu[idx]->data, packet->tcode,
650 packet->header_size>>2);
653 if (packet->data_size) { /* block transmit */
654 d->prg_cpu[idx]->begin.control =
655 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
656 DMA_CTL_IMMEDIATE | 0x10);
657 d->prg_cpu[idx]->end.control =
658 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
663 * Check that the packet data buffer
664 * does not cross a page boundary.
666 if (cross_bound((unsigned long)packet->data,
667 packet->data_size)>0) {
668 /* FIXME: do something about it */
669 PRINT(KERN_ERR, ohci->id,
670 "%s: packet data addr: %p size %Zd bytes "
671 "cross page boundary", __FUNCTION__,
672 packet->data, packet->data_size);
675 d->prg_cpu[idx]->end.address = cpu_to_le32(
676 pci_map_single(ohci->dev, packet->data,
679 OHCI_DMA_ALLOC("single, block transmit packet");
681 d->prg_cpu[idx]->end.branchAddress = 0;
682 d->prg_cpu[idx]->end.status = 0;
683 if (d->branchAddrPtr)
684 *(d->branchAddrPtr) =
685 cpu_to_le32(d->prg_bus[idx] | 0x3);
687 &(d->prg_cpu[idx]->end.branchAddress);
688 } else { /* quadlet transmit */
689 if (packet->type == hpsb_raw)
690 d->prg_cpu[idx]->begin.control =
691 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
695 (packet->header_size + 4));
697 d->prg_cpu[idx]->begin.control =
698 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
702 packet->header_size);
704 if (d->branchAddrPtr)
705 *(d->branchAddrPtr) =
706 cpu_to_le32(d->prg_bus[idx] | 0x2);
708 &(d->prg_cpu[idx]->begin.branchAddress);
711 } else { /* iso packet */
712 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
713 (packet->header[0] & 0xFFFF);
714 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
715 packet_swab(d->prg_cpu[idx]->data, packet->tcode, packet->header_size>>2);
717 d->prg_cpu[idx]->begin.control =
718 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
719 DMA_CTL_IMMEDIATE | 0x8);
720 d->prg_cpu[idx]->end.control =
721 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
726 d->prg_cpu[idx]->end.address = cpu_to_le32(
727 pci_map_single(ohci->dev, packet->data,
728 packet->data_size, PCI_DMA_TODEVICE));
729 OHCI_DMA_ALLOC("single, iso transmit packet");
731 d->prg_cpu[idx]->end.branchAddress = 0;
732 d->prg_cpu[idx]->end.status = 0;
733 DBGMSG(ohci->id, "Iso xmit context info: header[%08x %08x]\n"
734 " begin=%08x %08x %08x %08x\n"
735 " %08x %08x %08x %08x\n"
736 " end =%08x %08x %08x %08x",
737 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
738 d->prg_cpu[idx]->begin.control,
739 d->prg_cpu[idx]->begin.address,
740 d->prg_cpu[idx]->begin.branchAddress,
741 d->prg_cpu[idx]->begin.status,
742 d->prg_cpu[idx]->data[0],
743 d->prg_cpu[idx]->data[1],
744 d->prg_cpu[idx]->data[2],
745 d->prg_cpu[idx]->data[3],
746 d->prg_cpu[idx]->end.control,
747 d->prg_cpu[idx]->end.address,
748 d->prg_cpu[idx]->end.branchAddress,
749 d->prg_cpu[idx]->end.status);
750 if (d->branchAddrPtr)
751 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
752 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
756 /* queue the packet in the appropriate context queue */
757 list_add_tail(&packet->driver_list, &d->fifo_list);
758 d->prg_ind = (d->prg_ind+1)%d->num_desc;
762 * This function fills the AT FIFO with the (eventual) pending packets
763 * and runs or wakes up the AT DMA prg if necessary.
765 * The function MUST be called with the d->lock held.
767 static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
769 struct hpsb_packet *p;
772 if (list_empty(&d->pending_list) || d->free_prgs == 0)
775 p = driver_packet(d->pending_list.next);
777 z = (p->data_size) ? 3 : 2;
779 /* insert the packets into the at dma fifo */
780 while (d->free_prgs > 0 && !list_empty(&d->pending_list)) {
781 struct hpsb_packet *p = driver_packet(d->pending_list.next);
782 list_del(&p->driver_list);
783 insert_packet(ohci, d, p);
786 if (d->free_prgs == 0)
787 PRINT(KERN_INFO, ohci->id,
788 "Transmit DMA FIFO ctx=%d is full... waiting",d->ctx);
790 /* Is the context running ? (should be unless it is
791 the first packet to be sent in this context) */
792 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
793 DBGMSG(ohci->id,"Starting transmit DMA ctx=%d",d->ctx);
794 reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z);
795 run_context(ohci, d->ctrlSet, NULL);
798 /* Wake up the dma context if necessary */
799 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
800 DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx);
801 reg_write(ohci, d->ctrlSet, 0x1000);
807 /* Transmission of an async packet */
808 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
810 struct ti_ohci *ohci = host->hostdata;
811 struct dma_trm_ctx *d;
814 if (packet->data_size > ohci->max_packet_size) {
815 PRINT(KERN_ERR, ohci->id,
816 "Transmit packet size %Zd is too big",
821 /* Decide wether we have an iso, a request, or a response packet */
822 if (packet->type == hpsb_raw)
823 d = &ohci->at_req_context;
824 else if (packet->tcode == TCODE_ISO_DATA)
825 d = &ohci->it_context;
826 else if (packet->tcode & 0x02)
827 d = &ohci->at_resp_context;
829 d = &ohci->at_req_context;
831 spin_lock_irqsave(&d->lock,flags);
833 list_add_tail(&packet->driver_list, &d->pending_list);
835 dma_trm_flush(ohci, d);
837 spin_unlock_irqrestore(&d->lock,flags);
842 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
844 struct ti_ohci *ohci = host->hostdata;
850 DBGMSG(ohci->id, "devctl: Bus reset requested%s",
851 attempt_root ? " and attempting to become root" : "");
852 set_phy_reg_mask (ohci, 1, 0x40 | (attempt_root ? 0x80 : 0));
855 case GET_CYCLE_COUNTER:
856 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
859 case SET_CYCLE_COUNTER:
860 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
864 PRINT(KERN_ERR, ohci->id, "devctl command SET_BUS_ID err");
867 case ACT_CYCLE_MASTER:
869 /* check if we are root and other nodes are present */
870 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
871 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
873 * enable cycleTimer, cycleMaster
875 DBGMSG(ohci->id, "Cycle master enabled");
876 reg_write(ohci, OHCI1394_LinkControlSet,
880 /* disable cycleTimer, cycleMaster, cycleSource */
881 reg_write(ohci, OHCI1394_LinkControlClear, 0x00700000);
885 case CANCEL_REQUESTS:
886 DBGMSG(ohci->id, "Cancel request received");
887 dma_trm_reset(&ohci->at_req_context);
888 dma_trm_reset(&ohci->at_resp_context);
900 case ISO_LISTEN_CHANNEL:
904 if (arg<0 || arg>63) {
905 PRINT(KERN_ERR, ohci->id,
906 "%s: IS0 listen channel %d is out of range",
911 mask = (u64)0x1<<arg;
913 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
915 if (ohci->ISO_channel_usage & mask) {
916 PRINT(KERN_ERR, ohci->id,
917 "%s: IS0 listen channel %d is already used",
919 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
923 ohci->ISO_channel_usage |= mask;
926 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
929 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
932 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
933 DBGMSG(ohci->id, "Listening enabled on channel %d", arg);
936 case ISO_UNLISTEN_CHANNEL:
940 if (arg<0 || arg>63) {
941 PRINT(KERN_ERR, ohci->id,
942 "%s: IS0 unlisten channel %d is out of range",
947 mask = (u64)0x1<<arg;
949 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
951 if (!(ohci->ISO_channel_usage & mask)) {
952 PRINT(KERN_ERR, ohci->id,
953 "%s: IS0 unlisten channel %d is not used",
955 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
959 ohci->ISO_channel_usage &= ~mask;
962 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
965 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
968 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
969 DBGMSG(ohci->id, "Listening disabled on channel %d", arg);
973 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
980 /***************************************
981 * IEEE-1394 functionality section END *
982 ***************************************/
985 /********************************************************
986 * Global stuff (interrupt handler, init/shutdown code) *
987 ********************************************************/
989 static void dma_trm_reset(struct dma_trm_ctx *d)
992 LIST_HEAD(packet_list);
994 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
996 /* Lock the context, reset it and release it. Move the packets
997 * that were pending in the context to packet_list and free
998 * them after releasing the lock. */
1000 spin_lock_irqsave(&d->lock, flags);
1002 list_splice(&d->fifo_list, &packet_list);
1003 list_splice(&d->pending_list, &packet_list);
1004 INIT_LIST_HEAD(&d->fifo_list);
1005 INIT_LIST_HEAD(&d->pending_list);
1007 d->branchAddrPtr = NULL;
1008 d->sent_ind = d->prg_ind;
1009 d->free_prgs = d->num_desc;
1011 spin_unlock_irqrestore(&d->lock, flags);
1013 /* Now process subsystem callbacks for the packets from the
1016 while (!list_empty(&packet_list)) {
1017 struct hpsb_packet *p = driver_packet(packet_list.next);
1018 PRINT(KERN_INFO, d->ohci->id,
1019 "AT dma reset ctx=%d, aborting transmission", d->ctx);
1020 list_del(&p->driver_list);
1021 hpsb_packet_sent(d->ohci->host, p, ACKX_ABORTED);
1025 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
1029 struct list_head *lh;
1030 struct ohci1394_iso_tasklet *t;
1033 spin_lock(&ohci->iso_tasklet_list_lock);
1035 list_for_each(lh, &ohci->iso_tasklet_list) {
1036 t = list_entry(lh, struct ohci1394_iso_tasklet, link);
1037 mask = 1 << t->context;
1039 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
1040 tasklet_schedule(&t->tasklet);
1041 if (t->type == OHCI_ISO_RECEIVE && rx_event & mask)
1042 tasklet_schedule(&t->tasklet);
1045 spin_unlock(&ohci->iso_tasklet_list_lock);
1049 static void ohci_irq_handler(int irq, void *dev_id,
1050 struct pt_regs *regs_are_unused)
1052 quadlet_t event, node_id;
1053 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
1054 struct hpsb_host *host = ohci->host;
1055 int phyid = -1, isroot = 0;
1056 unsigned long flags;
1058 /* Read and clear the interrupt event register. Don't clear
1059 * the busReset event, though, this is done when we get the
1060 * selfIDComplete interrupt. */
1061 spin_lock_irqsave(&ohci->event_lock, flags);
1062 event = reg_read(ohci, OHCI1394_IntEventClear);
1063 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1064 spin_unlock_irqrestore(&ohci->event_lock, flags);
1068 DBGMSG(ohci->id, "IntEvent: %08x", event);
1070 /* Die right here an now */
1071 if (event & OHCI1394_unrecoverableError) {
1072 PRINT(KERN_ERR, ohci->id, "Unrecoverable error, shutting down card!");
1076 if (event & OHCI1394_cycleInconsistent) {
1077 /* We subscribe to the cycleInconsistent event only to
1078 * clear the corresponding event bit... otherwise,
1079 * isochronous cycleMatch DMA wont work. */
1080 DBGMSG(ohci->id, "OHCI1394_cycleInconsistent");
1081 event &= ~OHCI1394_cycleInconsistent;
1084 if (event & OHCI1394_busReset) {
1085 /* The busReset event bit can't be cleared during the
1086 * selfID phase, so we disable busReset interrupts, to
1087 * avoid burying the cpu in interrupt requests. */
1088 spin_lock_irqsave(&ohci->event_lock, flags);
1089 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
1090 if (ohci->dev->vendor == PCI_VENDOR_ID_APPLE &&
1091 ohci->dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
1093 while(reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1094 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1095 spin_unlock_irqrestore(&ohci->event_lock, flags);
1097 spin_lock_irqsave(&ohci->event_lock, flags);
1100 spin_unlock_irqrestore(&ohci->event_lock, flags);
1101 if (!host->in_bus_reset) {
1102 DBGMSG(ohci->id, "irq_handler: Bus reset requested%s",
1103 (attempt_root) ? " and attempting to become root"
1106 /* Subsystem call */
1107 hpsb_bus_reset(ohci->host);
1109 event &= ~OHCI1394_busReset;
1112 /* XXX: We need a way to also queue the OHCI1394_reqTxComplete,
1113 * but for right now we simply run it upon reception, to make sure
1114 * we get sent acks before response packets. This sucks mainly
1115 * because it halts the interrupt handler. */
1116 if (event & OHCI1394_reqTxComplete) {
1117 struct dma_trm_ctx *d = &ohci->at_req_context;
1118 DBGMSG(ohci->id, "Got reqTxComplete interrupt "
1119 "status=0x%08X", reg_read(ohci, d->ctrlSet));
1120 if (reg_read(ohci, d->ctrlSet) & 0x800)
1121 ohci1394_stop_context(ohci, d->ctrlClear,
1124 dma_trm_tasklet ((unsigned long)d);
1125 event &= ~OHCI1394_reqTxComplete;
1127 if (event & OHCI1394_respTxComplete) {
1128 struct dma_trm_ctx *d = &ohci->at_resp_context;
1129 DBGMSG(ohci->id, "Got respTxComplete interrupt "
1130 "status=0x%08X", reg_read(ohci, d->ctrlSet));
1131 if (reg_read(ohci, d->ctrlSet) & 0x800)
1132 ohci1394_stop_context(ohci, d->ctrlClear,
1135 tasklet_schedule(&d->task);
1136 event &= ~OHCI1394_respTxComplete;
1138 if (event & OHCI1394_RQPkt) {
1139 struct dma_rcv_ctx *d = &ohci->ar_req_context;
1140 DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X",
1141 reg_read(ohci, d->ctrlSet));
1142 if (reg_read(ohci, d->ctrlSet) & 0x800)
1143 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
1145 tasklet_schedule(&d->task);
1146 event &= ~OHCI1394_RQPkt;
1148 if (event & OHCI1394_RSPkt) {
1149 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
1150 DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X",
1151 reg_read(ohci, d->ctrlSet));
1152 if (reg_read(ohci, d->ctrlSet) & 0x800)
1153 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
1155 tasklet_schedule(&d->task);
1156 event &= ~OHCI1394_RSPkt;
1158 if (event & OHCI1394_isochRx) {
1161 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
1162 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
1163 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
1164 event &= ~OHCI1394_isochRx;
1166 if (event & OHCI1394_isochTx) {
1169 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
1170 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
1171 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
1172 event &= ~OHCI1394_isochTx;
1174 if (event & OHCI1394_selfIDComplete) {
1175 if (host->in_bus_reset) {
1176 node_id = reg_read(ohci, OHCI1394_NodeID);
1178 if (!(node_id & 0x80000000)) {
1179 PRINT(KERN_ERR, ohci->id,
1180 "SelfID received, but NodeID invalid "
1181 "(probably new bus reset occured): %08X",
1183 goto selfid_not_valid;
1186 phyid = node_id & 0x0000003f;
1187 isroot = (node_id & 0x40000000) != 0;
1190 "SelfID interrupt received "
1191 "(phyid %d, %s)", phyid,
1192 (isroot ? "root" : "not root"));
1194 handle_selfid(ohci, host, phyid, isroot);
1196 /* Clear the bus reset event and re-enable the
1197 * busReset interrupt. */
1198 spin_lock_irqsave(&ohci->event_lock, flags);
1199 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1200 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1201 spin_unlock_irqrestore(&ohci->event_lock, flags);
1203 /* Accept Physical requests from all nodes. */
1204 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
1205 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
1207 /* Turn on phys dma reception.
1209 * TODO: Enable some sort of filtering management.
1212 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
1213 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
1214 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
1216 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
1217 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
1219 DBGMSG(ohci->id, "PhyReqFilter=%08x%08x\n",
1220 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
1221 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
1223 hpsb_selfid_complete(host, phyid, isroot);
1225 PRINT(KERN_ERR, ohci->id,
1226 "SelfID received outside of bus reset sequence");
1228 event &= ~OHCI1394_selfIDComplete;
1232 /* Make sure we handle everything, just in case we accidentally
1233 * enabled an interrupt that we didn't write a handler for. */
1235 PRINT(KERN_ERR, ohci->id, "Unhandled interrupt(s) 0x%08x",
1239 /* Put the buffer back into the dma context */
1240 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
1242 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
1243 DBGMSG(ohci->id, "Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
1245 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
1246 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
1247 idx = (idx + d->num_desc - 1 ) % d->num_desc;
1248 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
1250 /* wake up the dma context if necessary */
1251 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
1252 PRINT(KERN_INFO, ohci->id,
1253 "Waking dma ctx=%d ... processing is probably too slow",
1255 reg_write(ohci, d->ctrlSet, 0x1000);
1259 #define cond_le32_to_cpu(data, noswap) \
1260 (noswap ? data : le32_to_cpu(data))
1262 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
1263 -1, 0, -1, 0, -1, -1, 16, -1};
1266 * Determine the length of a packet in the buffer
1267 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
1269 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
1270 int offset, unsigned char tcode, int noswap)
1274 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
1275 length = TCODE_SIZE[tcode];
1277 if (offset + 12 >= d->buf_size) {
1278 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
1279 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
1281 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
1285 } else if (d->type == DMA_CTX_ISO) {
1286 /* Assumption: buffer fill mode with header/trailer */
1287 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
1290 if (length > 0 && length % 4)
1291 length += 4 - (length % 4);
1296 /* Tasklet that processes dma receive buffers */
1297 static void dma_rcv_tasklet (unsigned long data)
1299 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
1300 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
1301 unsigned int split_left, idx, offset, rescount;
1302 unsigned char tcode;
1303 int length, bytes_left, ack;
1304 unsigned long flags;
1309 spin_lock_irqsave(&d->lock, flags);
1312 offset = d->buf_offset;
1313 buf_ptr = d->buf_cpu[idx] + offset/4;
1315 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
1316 bytes_left = d->buf_size - rescount - offset;
1318 while (bytes_left > 0) {
1319 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
1321 /* packet_length() will return < 4 for an error */
1322 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
1324 if (length < 4) { /* something is wrong */
1325 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
1326 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
1328 ohci1394_stop_context(ohci, d->ctrlClear, msg);
1329 spin_unlock_irqrestore(&d->lock, flags);
1333 /* The first case is where we have a packet that crosses
1334 * over more than one descriptor. The next case is where
1335 * it's all in the first descriptor. */
1336 if ((offset + length) > d->buf_size) {
1337 DBGMSG(ohci->id,"Split packet rcv'd");
1338 if (length > d->split_buf_size) {
1339 ohci1394_stop_context(ohci, d->ctrlClear,
1340 "Split packet size exceeded");
1342 d->buf_offset = offset;
1343 spin_unlock_irqrestore(&d->lock, flags);
1347 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
1349 /* Other part of packet not written yet.
1350 * this should never happen I think
1351 * anyway we'll get it on the next call. */
1352 PRINT(KERN_INFO, ohci->id,
1353 "Got only half a packet!");
1355 d->buf_offset = offset;
1356 spin_unlock_irqrestore(&d->lock, flags);
1360 split_left = length;
1361 split_ptr = (char *)d->spb;
1362 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
1363 split_left -= d->buf_size-offset;
1364 split_ptr += d->buf_size-offset;
1365 insert_dma_buffer(d, idx);
1366 idx = (idx+1) % d->num_desc;
1367 buf_ptr = d->buf_cpu[idx];
1370 while (split_left >= d->buf_size) {
1371 memcpy(split_ptr,buf_ptr,d->buf_size);
1372 split_ptr += d->buf_size;
1373 split_left -= d->buf_size;
1374 insert_dma_buffer(d, idx);
1375 idx = (idx+1) % d->num_desc;
1376 buf_ptr = d->buf_cpu[idx];
1379 if (split_left > 0) {
1380 memcpy(split_ptr, buf_ptr, split_left);
1381 offset = split_left;
1382 buf_ptr += offset/4;
1385 DBGMSG(ohci->id,"Single packet rcv'd");
1386 memcpy(d->spb, buf_ptr, length);
1388 buf_ptr += length/4;
1389 if (offset==d->buf_size) {
1390 insert_dma_buffer(d, idx);
1391 idx = (idx+1) % d->num_desc;
1392 buf_ptr = d->buf_cpu[idx];
1397 /* We get one phy packet to the async descriptor for each
1398 * bus reset. We always ignore it. */
1399 if (tcode != OHCI1394_TCODE_PHY) {
1400 if (!ohci->no_swap_incoming)
1401 packet_swab(d->spb, tcode, (length - 4) >> 2);
1402 DBGMSG(ohci->id, "Packet received from node"
1403 " %d ack=0x%02X spd=%d tcode=0x%X"
1404 " length=%d ctx=%d tlabel=%d",
1405 (d->spb[1]>>16)&0x3f,
1406 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
1407 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
1408 tcode, length, d->ctx,
1409 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
1411 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
1414 hpsb_packet_received(ohci->host, d->spb,
1417 #ifdef OHCI1394_DEBUG
1419 PRINT (KERN_DEBUG, ohci->id, "Got phy packet ctx=%d ... discarded",
1423 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
1425 bytes_left = d->buf_size - rescount - offset;
1430 d->buf_offset = offset;
1432 spin_unlock_irqrestore(&d->lock, flags);
1435 /* Bottom half that processes sent packets */
1436 static void dma_trm_tasklet (unsigned long data)
1438 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
1439 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
1440 struct hpsb_packet *packet;
1441 unsigned long flags;
1445 spin_lock_irqsave(&d->lock, flags);
1447 while (!list_empty(&d->fifo_list)) {
1448 packet = driver_packet(d->fifo_list.next);
1449 datasize = packet->data_size;
1450 if (datasize && packet->type != hpsb_raw)
1452 d->prg_cpu[d->sent_ind]->end.status) >> 16;
1455 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
1458 /* this packet hasn't been sent yet*/
1461 if (!(ack & 0x10)) {
1462 /* XXX: This is an OHCI evt_* code. We need to handle
1463 * this specially! For right now, we just fake an
1464 * ackx_send_error. */
1465 PRINT(KERN_DEBUG, ohci->id, "Received OHCI evt_* error 0x%x",
1467 ack = (ack & 0xffe0) | ACK_BUSY_A;
1470 #ifdef OHCI1394_DEBUG
1473 "Packet sent to node %d tcode=0x%X tLabel="
1474 "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
1475 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
1477 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
1479 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
1481 ack&0x1f, (ack>>5)&0x3,
1482 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])
1487 "Packet sent to node %d tcode=0x%X tLabel="
1488 "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
1489 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
1491 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
1493 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
1495 ack&0x1f, (ack>>5)&0x3,
1496 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
1500 list_del(&packet->driver_list);
1501 hpsb_packet_sent(ohci->host, packet, ack & 0xf);
1504 pci_unmap_single(ohci->dev,
1505 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
1506 datasize, PCI_DMA_TODEVICE);
1507 OHCI_DMA_FREE("single Xmit data packet");
1510 d->sent_ind = (d->sent_ind+1)%d->num_desc;
1514 dma_trm_flush(ohci, d);
1516 spin_unlock_irqrestore(&d->lock, flags);
1519 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
1523 if (d->ohci == NULL)
1526 DBGMSG(d->ohci->id, "Freeing dma_rcv_ctx %d", d->ctx);
1528 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
1530 if (d->type == DMA_CTX_ISO)
1531 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_tasklet);
1533 tasklet_kill(&d->task);
1536 for (i=0; i<d->num_desc; i++)
1537 if (d->buf_cpu[i] && d->buf_bus[i]) {
1538 pci_free_consistent(
1539 d->ohci->dev, d->buf_size,
1540 d->buf_cpu[i], d->buf_bus[i]);
1541 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
1547 for (i=0; i<d->num_desc; i++)
1548 if (d->prg_cpu[i] && d->prg_bus[i]) {
1549 pci_free_consistent(
1550 d->ohci->dev, sizeof(struct dma_cmd),
1551 d->prg_cpu[i], d->prg_bus[i]);
1552 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
1557 if (d->spb) kfree(d->spb);
1559 /* Mark this context as freed. */
1564 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
1565 enum context_type type, int ctx, int num_desc,
1566 int buf_size, int split_buf_size, int context_base)
1574 d->num_desc = num_desc;
1575 d->buf_size = buf_size;
1576 d->split_buf_size = split_buf_size;
1578 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
1579 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
1580 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
1582 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
1583 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
1585 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
1586 PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
1587 free_dma_rcv_ctx(d);
1590 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
1591 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
1593 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
1595 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
1597 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
1598 PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg");
1599 free_dma_rcv_ctx(d);
1602 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
1603 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
1605 d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
1607 if (d->spb == NULL) {
1608 PRINT(KERN_ERR, ohci->id, "Failed to allocate split buffer");
1609 free_dma_rcv_ctx(d);
1613 for (i=0; i<d->num_desc; i++) {
1614 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
1617 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
1619 if (d->buf_cpu[i] != NULL) {
1620 memset(d->buf_cpu[i], 0, d->buf_size);
1622 PRINT(KERN_ERR, ohci->id,
1623 "Failed to allocate dma buffer");
1624 free_dma_rcv_ctx(d);
1629 d->prg_cpu[i] = pci_alloc_consistent(ohci->dev,
1630 sizeof(struct dma_cmd),
1632 OHCI_DMA_ALLOC("consistent dma_rcv prg[%d]", i);
1634 if (d->prg_cpu[i] != NULL) {
1635 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
1637 PRINT(KERN_ERR, ohci->id,
1638 "Failed to allocate dma prg");
1639 free_dma_rcv_ctx(d);
1644 spin_lock_init(&d->lock);
1646 if (type == DMA_CTX_ISO) {
1647 ohci1394_init_iso_tasklet(&ohci->ir_tasklet, OHCI_ISO_RECEIVE,
1648 dma_rcv_tasklet, (unsigned long) d);
1649 if (ohci1394_register_iso_tasklet(ohci,
1650 &ohci->ir_tasklet) < 0) {
1651 PRINT(KERN_ERR, ohci->id, "No IR DMA context available");
1652 free_dma_rcv_ctx(d);
1657 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
1662 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
1666 if (d->ohci == NULL)
1669 DBGMSG(d->ohci->id, "Freeing dma_trm_ctx %d", d->ctx);
1671 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
1673 if (d->type == DMA_CTX_ISO)
1674 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->it_tasklet);
1676 tasklet_kill(&d->task);
1679 for (i=0; i<d->num_desc; i++)
1680 if (d->prg_cpu[i] && d->prg_bus[i]) {
1681 pci_free_consistent(
1682 d->ohci->dev, sizeof(struct at_dma_prg),
1683 d->prg_cpu[i], d->prg_bus[i]);
1684 OHCI_DMA_FREE("consistent dma_trm prg[%d]", i);
1690 /* Mark this context as freed. */
1695 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
1696 enum context_type type, int ctx, int num_desc,
1704 d->num_desc = num_desc;
1705 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
1706 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
1707 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
1709 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
1711 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
1713 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
1714 PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg");
1715 free_dma_trm_ctx(d);
1718 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
1719 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
1721 for (i = 0; i < d->num_desc; i++) {
1722 d->prg_cpu[i] = pci_alloc_consistent(ohci->dev,
1723 sizeof(struct at_dma_prg),
1725 OHCI_DMA_ALLOC("consistent dma_trm prg[%d]", i);
1727 if (d->prg_cpu[i] != NULL) {
1728 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
1730 PRINT(KERN_ERR, ohci->id,
1731 "Failed to allocate at dma prg");
1732 free_dma_trm_ctx(d);
1737 spin_lock_init(&d->lock);
1739 /* initialize tasklet */
1740 if (type == DMA_CTX_ISO) {
1741 ohci1394_init_iso_tasklet(&ohci->it_tasklet, OHCI_ISO_TRANSMIT,
1742 dma_rcv_tasklet, (unsigned long) d);
1743 if (ohci1394_register_iso_tasklet(ohci,
1744 &ohci->it_tasklet) < 0) {
1745 PRINT(KERN_ERR, ohci->id, "No IT DMA context available");
1746 free_dma_trm_ctx(d);
1751 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
1756 static u16 ohci_crc16 (u32 *ptr, int length)
1762 for (; length > 0; length--) {
1763 data = be32_to_cpu(*ptr++);
1764 for (shift = 28; shift >= 0; shift -= 4) {
1765 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
1766 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
1773 /* Config ROM macro implementation influenced by NetBSD OHCI driver */
1775 struct config_rom_unit {
1782 struct config_rom_ptr {
1785 struct config_rom_unit unitdir[10];
1788 #define cf_put_1quad(cr, q) (((cr)->data++)[0] = cpu_to_be32(q))
1790 #define cf_put_4bytes(cr, b1, b2, b3, b4) \
1791 (((cr)->data++)[0] = cpu_to_be32(((b1) << 24) | ((b2) << 16) | ((b3) << 8) | (b4)))
1793 #define cf_put_keyval(cr, key, val) (((cr)->data++)[0] = cpu_to_be32(((key) << 24) | (val)))
1795 static inline void cf_put_str(struct config_rom_ptr *cr, const char *str)
1801 memset(fourb, 0, 4);
1802 for (t = 0; t < 4 && str[t]; t++)
1804 cf_put_4bytes(cr, fourb[0], fourb[1], fourb[2], fourb[3]);
1805 str += strlen(str) < 4 ? strlen(str) : 4;
1810 static inline void cf_put_crc16(struct config_rom_ptr *cr, int unit)
1812 *cr->unitdir[unit].start =
1813 cpu_to_be32((cr->unitdir[unit].length << 16) |
1814 ohci_crc16(cr->unitdir[unit].start + 1,
1815 cr->unitdir[unit].length));
1818 static inline void cf_unit_begin(struct config_rom_ptr *cr, int unit)
1820 if (cr->unitdir[unit].refer != NULL) {
1821 *cr->unitdir[unit].refer |=
1822 cpu_to_be32 (cr->data - cr->unitdir[unit].refer);
1823 cf_put_crc16(cr, cr->unitdir[unit].refunit);
1826 cr->unitdir[unit].start = cr->data++;
1829 static inline void cf_put_refer(struct config_rom_ptr *cr, char key, int unit)
1831 cr->unitdir[unit].refer = cr->data;
1832 cr->unitdir[unit].refunit = cr->unitnum;
1833 (cr->data++)[0] = cpu_to_be32(key << 24);
1836 static inline void cf_unit_end(struct config_rom_ptr *cr)
1838 cr->unitdir[cr->unitnum].length = cr->data -
1839 (cr->unitdir[cr->unitnum].start + 1);
1840 cf_put_crc16(cr, cr->unitnum);
1843 /* End of NetBSD derived code. */
1845 static void ohci_init_config_rom(struct ti_ohci *ohci)
1847 struct config_rom_ptr cr;
1849 memset(&cr, 0, sizeof(cr));
1850 memset(ohci->csr_config_rom_cpu, 0, sizeof (ohci->csr_config_rom_cpu));
1852 cr.data = ohci->csr_config_rom_cpu;
1854 /* Bus info block */
1855 cf_unit_begin(&cr, 0);
1856 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusID));
1857 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusOptions));
1858 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDHi));
1859 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDLo));
1862 DBGMSG(ohci->id, "GUID: %08x:%08x", reg_read(ohci, OHCI1394_GUIDHi),
1863 reg_read(ohci, OHCI1394_GUIDLo));
1865 /* IEEE P1212 suggests the initial ROM header CRC should only
1866 * cover the header itself (and not the entire ROM). Since we do
1867 * this, then we can make our bus_info_len the same as the CRC
1869 ohci->csr_config_rom_cpu[0] |= cpu_to_be32(
1870 (be32_to_cpu(ohci->csr_config_rom_cpu[0]) & 0x00ff0000) << 8);
1871 reg_write(ohci, OHCI1394_ConfigROMhdr,
1872 be32_to_cpu(ohci->csr_config_rom_cpu[0]));
1874 /* Root directory */
1875 cf_unit_begin(&cr, 1);
1877 cf_put_keyval(&cr, 0x03, reg_read(ohci,OHCI1394_VendorID) & 0xFFFFFF);
1878 cf_put_refer(&cr, 0x81, 2); /* Textual description unit */
1879 cf_put_keyval(&cr, 0x0c, 0x0083c0); /* Node capabilities */
1880 /* NOTE: Add other unit referers here, and append at bottom */
1883 /* Textual description - "Linux 1394" */
1884 cf_unit_begin(&cr, 2);
1885 cf_put_keyval(&cr, 0, 0);
1886 cf_put_1quad(&cr, 0);
1887 cf_put_str(&cr, "Linux OHCI-1394");
1890 ohci->csr_config_rom_length = cr.data - ohci->csr_config_rom_cpu;
1893 static size_t ohci_get_rom(struct hpsb_host *host, quadlet_t **ptr)
1895 struct ti_ohci *ohci=host->hostdata;
1897 DBGMSG(ohci->id, "request csr_rom address: %p",
1898 ohci->csr_config_rom_cpu);
1900 *ptr = ohci->csr_config_rom_cpu;
1902 return ohci->csr_config_rom_length * 4;
1905 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
1906 quadlet_t data, quadlet_t compare)
1908 struct ti_ohci *ohci = host->hostdata;
1911 reg_write(ohci, OHCI1394_CSRData, data);
1912 reg_write(ohci, OHCI1394_CSRCompareData, compare);
1913 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
1915 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1916 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1922 return reg_read(ohci, OHCI1394_CSRData);
1925 static struct hpsb_host_driver ohci1394_driver = {
1926 .name = OHCI1394_DRIVER_NAME,
1927 .get_rom = ohci_get_rom,
1928 .transmit_packet = ohci_transmit,
1929 .devctl = ohci_devctl,
1930 .hw_csr_reg = ohci_hw_csr_reg,
1935 /***********************************
1936 * PCI Driver Interface functions *
1937 ***********************************/
1939 #define FAIL(err, fmt, args...) \
1941 PRINT_G(KERN_ERR, fmt , ## args); \
1942 ohci1394_pci_remove(dev); \
1946 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
1947 const struct pci_device_id *ent)
1949 static unsigned int card_id_counter = 0;
1950 static int version_printed = 0;
1952 struct hpsb_host *host;
1953 struct ti_ohci *ohci; /* shortcut to currently handled device */
1954 unsigned long ohci_base;
1956 if (version_printed++ == 0)
1957 PRINT_G(KERN_INFO, "%s", version);
1959 if (pci_enable_device(dev))
1960 FAIL(-ENXIO, "Failed to enable OHCI hardware %d",
1962 pci_set_master(dev);
1964 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci));
1965 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
1967 ohci = host->hostdata;
1968 ohci->id = card_id_counter++;
1971 ohci->init_state = OHCI_INIT_ALLOC_HOST;
1973 pci_set_drvdata(dev, ohci);
1975 /* We don't want hardware swapping */
1976 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1978 /* Some oddball Apple controllers do not order the selfid
1979 * properly, so we make up for it here. */
1980 #ifndef __LITTLE_ENDIAN
1981 /* XXX: Need a better way to check this. I'm wondering if we can
1982 * read the values of the OHCI1394_PCI_HCI_Control and the
1983 * noByteSwapData registers to see if they were not cleared to
1984 * zero. Should this work? Obviously it's not defined what these
1985 * registers will read when they aren't supported. Bleh! */
1986 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
1987 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
1988 ohci->no_swap_incoming = 1;
1989 ohci->selfid_swap = 0;
1991 ohci->selfid_swap = 1;
1994 /* We hardwire the MMIO length, since some CardBus adaptors
1995 * fail to report the right length. Anyway, the ohci spec
1996 * clearly says it's 2kb, so this shouldn't be a problem. */
1997 ohci_base = pci_resource_start(dev, 0);
1998 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
1999 PRINT(KERN_WARNING, ohci->id, "Unexpected PCI resource length of %lx!",
2000 pci_resource_len(dev, 0));
2002 /* Seems PCMCIA handles this internally. Not sure why. Seems
2003 * pretty bogus to force a driver to special case this. */
2005 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
2006 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
2007 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
2009 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
2011 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
2012 if (ohci->registers == NULL)
2013 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
2014 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
2015 DBGMSG(ohci->id, "Remapped memory spaces reg 0x%p", ohci->registers);
2017 /* csr_config rom allocation */
2018 ohci->csr_config_rom_cpu =
2019 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
2020 &ohci->csr_config_rom_bus);
2021 OHCI_DMA_ALLOC("consistent csr_config_rom");
2022 if (ohci->csr_config_rom_cpu == NULL)
2023 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
2024 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
2026 /* self-id dma buffer allocation */
2027 ohci->selfid_buf_cpu =
2028 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
2029 &ohci->selfid_buf_bus);
2030 OHCI_DMA_ALLOC("consistent selfid_buf");
2032 if (ohci->selfid_buf_cpu == NULL)
2033 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
2034 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
2036 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
2037 PRINT(KERN_INFO, ohci->id, "SelfID buffer %p is not aligned on "
2038 "8Kb boundary... may cause problems on some CXD3222 chip",
2039 ohci->selfid_buf_cpu);
2041 /* No self-id errors at startup */
2042 ohci->self_id_errors = 0;
2044 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
2045 /* AR DMA request context allocation */
2046 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
2047 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
2048 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
2049 OHCI1394_AsReqRcvContextBase) < 0)
2050 FAIL(-ENOMEM, "Failed to allocate AR Req context");
2052 /* AR DMA response context allocation */
2053 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
2054 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
2055 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
2056 OHCI1394_AsRspRcvContextBase) < 0)
2057 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
2059 /* AT DMA request context */
2060 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
2061 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
2062 OHCI1394_AsReqTrContextBase) < 0)
2063 FAIL(-ENOMEM, "Failed to allocate AT Req context");
2065 /* AT DMA response context */
2066 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
2067 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
2068 OHCI1394_AsRspTrContextBase) < 0)
2069 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
2071 /* Start off with a soft reset, to clear everything to a sane
2073 ohci_soft_reset(ohci);
2075 /* Now enable LPS, which we need in order to start accessing
2076 * most of the registers. In fact, on some cards (ALI M5251),
2077 * accessing registers in the SClk domain without LPS enabled
2078 * will lock up the machine. Wait 50msec to make sure we have
2079 * full link enabled. */
2080 reg_write(ohci, OHCI1394_HCControlSet, 0x00080000);
2083 /* Determine the number of available IR and IT contexts. */
2084 ohci->nb_iso_rcv_ctx =
2085 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
2086 DBGMSG(ohci->id, "%d iso receive contexts available",
2087 ohci->nb_iso_rcv_ctx);
2089 ohci->nb_iso_xmit_ctx =
2090 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
2091 DBGMSG(ohci->id, "%d iso transmit contexts available",
2092 ohci->nb_iso_xmit_ctx);
2094 /* Set the usage bits for non-existent contexts so they can't
2096 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
2097 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
2099 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
2100 spin_lock_init(&ohci->iso_tasklet_list_lock);
2101 ohci->ISO_channel_usage = 0;
2102 spin_lock_init(&ohci->IR_channel_lock);
2104 /* IR DMA context */
2105 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_context,
2106 DMA_CTX_ISO, 0, IR_NUM_DESC,
2107 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
2108 OHCI1394_IsoRcvContextBase) < 0)
2109 FAIL(-ENOMEM, "Failed to allocate IR context");
2112 /* IT DMA context allocation */
2113 if (alloc_dma_trm_ctx(ohci, &ohci->it_context,
2114 DMA_CTX_ISO, 0, IT_NUM_DESC,
2115 OHCI1394_IsoXmitContextBase) < 0)
2116 FAIL(-ENOMEM, "Failed to allocate IT context");
2118 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
2119 OHCI1394_DRIVER_NAME, ohci))
2120 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
2122 ohci->init_state = OHCI_INIT_HAVE_IRQ;
2123 ohci_initialize(ohci);
2125 /* Tell the highlevel this host is ready */
2126 hpsb_add_host(host);
2127 ohci->init_state = OHCI_INIT_DONE;
2133 static void ohci1394_pci_remove(struct pci_dev *pdev)
2135 struct ti_ohci *ohci;
2137 ohci = pci_get_drvdata(pdev);
2141 switch (ohci->init_state) {
2142 case OHCI_INIT_DONE:
2143 hpsb_remove_host(ohci->host);
2145 case OHCI_INIT_HAVE_IRQ:
2146 /* Soft reset before we start - this disables
2147 * interrupts and clears linkEnable and LPS. */
2148 ohci_soft_reset(ohci);
2149 free_irq(ohci->dev->irq, ohci);
2151 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
2153 free_dma_rcv_ctx(&ohci->ar_req_context);
2154 free_dma_rcv_ctx(&ohci->ar_resp_context);
2157 free_dma_trm_ctx(&ohci->at_req_context);
2158 free_dma_trm_ctx(&ohci->at_resp_context);
2161 free_dma_rcv_ctx(&ohci->ir_context);
2164 free_dma_trm_ctx(&ohci->it_context);
2166 case OHCI_INIT_HAVE_SELFID_BUFFER:
2167 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
2168 ohci->selfid_buf_cpu,
2169 ohci->selfid_buf_bus);
2170 OHCI_DMA_FREE("consistent selfid_buf");
2172 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
2173 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
2174 ohci->csr_config_rom_cpu,
2175 ohci->csr_config_rom_bus);
2176 OHCI_DMA_FREE("consistent csr_config_rom");
2178 case OHCI_INIT_HAVE_IOMAPPING:
2179 iounmap(ohci->registers);
2181 case OHCI_INIT_HAVE_MEM_REGION:
2183 release_mem_region(pci_resource_start(ohci->dev, 0),
2184 OHCI1394_REGISTER_SIZE);
2187 #ifdef CONFIG_ALL_PPC
2188 /* On UniNorth, power down the cable and turn off the chip
2189 * clock when the module is removed to save power on
2190 * laptops. Turning it back ON is done by the arch code when
2191 * pci_enable_device() is called */
2193 struct device_node* of_node;
2195 of_node = pci_device_to_OF_node(ohci->dev);
2197 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
2198 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
2201 #endif /* CONFIG_ALL_PPC */
2203 case OHCI_INIT_ALLOC_HOST:
2204 pci_set_drvdata(ohci->dev, NULL);
2205 hpsb_unref_host(ohci->host);
2209 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
2211 static struct pci_device_id ohci1394_pci_tbl[] __devinitdata = {
2213 .class = PCI_CLASS_FIREWIRE_OHCI,
2214 .class_mask = 0x00ffffff,
2215 .vendor = PCI_ANY_ID,
2216 .device = PCI_ANY_ID,
2217 .subvendor = PCI_ANY_ID,
2218 .subdevice = PCI_ANY_ID,
2223 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
2225 static struct pci_driver ohci1394_pci_driver = {
2226 .name = OHCI1394_DRIVER_NAME,
2227 .id_table = ohci1394_pci_tbl,
2228 .probe = ohci1394_pci_probe,
2229 .remove = ohci1394_pci_remove,
2234 /***********************************
2235 * OHCI1394 Video Interface *
2236 ***********************************/
2238 /* essentially the only purpose of this code is to allow another
2239 module to hook into ohci's interrupt handler */
2241 void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
2245 /* stop the channel program if it's still running */
2246 reg_write(ohci, reg, 0x8000);
2248 /* Wait until it effectively stops */
2249 while (reg_read(ohci, reg) & 0x400) {
2252 PRINT(KERN_ERR, ohci->id,
2253 "Runaway loop while stopping context...");
2257 if (msg) PRINT(KERN_ERR, ohci->id, "%s: dma prg stopped", msg);
2260 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
2261 void (*func)(unsigned long), unsigned long data)
2263 tasklet_init(&tasklet->tasklet, func, data);
2264 tasklet->type = type;
2265 /* We init the tasklet->link field, so we can list_del() it
2266 * without worrying wether it was added to the list or not. */
2267 INIT_LIST_HEAD(&tasklet->link);
2270 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
2271 struct ohci1394_iso_tasklet *tasklet)
2273 unsigned long flags, *usage;
2274 int n, i, r = -EBUSY;
2276 if (tasklet->type == OHCI_ISO_TRANSMIT) {
2277 n = ohci->nb_iso_xmit_ctx;
2278 usage = &ohci->it_ctx_usage;
2281 n = ohci->nb_iso_rcv_ctx;
2282 usage = &ohci->ir_ctx_usage;
2285 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2287 for (i = 0; i < n; i++)
2288 if (!test_and_set_bit(i, usage)) {
2289 tasklet->context = i;
2290 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
2295 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2300 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
2301 struct ohci1394_iso_tasklet *tasklet)
2303 unsigned long flags;
2305 tasklet_kill(&tasklet->tasklet);
2307 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2309 if (tasklet->type == OHCI_ISO_TRANSMIT)
2310 clear_bit(tasklet->context, &ohci->it_ctx_usage);
2312 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
2314 list_del(&tasklet->link);
2316 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2319 EXPORT_SYMBOL(ohci1394_stop_context);
2320 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
2321 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
2322 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
2325 /***********************************
2326 * General module initialization *
2327 ***********************************/
2329 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
2330 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
2331 MODULE_LICENSE("GPL");
2333 static void __exit ohci1394_cleanup (void)
2335 pci_unregister_driver(&ohci1394_pci_driver);
2338 static int __init ohci1394_init(void)
2340 return pci_module_init(&ohci1394_pci_driver);
2343 module_init(ohci1394_init);
2344 module_exit(ohci1394_cleanup);