1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2 * Parallel-port resource manager code.
4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5 * Tim Waugh <tim@cyberelk.demon.co.uk>
6 * Jose Renau <renau@acm.org>
7 * Philip Blundell <philb@gnu.org>
10 * based on work by Grant Guenther <grant@torque.net>
13 * Any part of this program may be used in documents licensed under
14 * the GNU Free Documentation License, Version 1.1 or any later version
15 * published by the Free Software Foundation.
18 #undef PARPORT_DEBUG_SHARING /* undef for production */
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/string.h>
23 #include <linux/threads.h>
24 #include <linux/parport.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/ioport.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
32 #include <linux/kmod.h>
34 #include <linux/spinlock.h>
37 #undef PARPORT_PARANOID
39 #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
41 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
42 int parport_default_spintime = DEFAULT_SPIN_TIME;
44 static struct parport *portlist = NULL, *portlist_tail = NULL;
45 static spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
47 static struct parport_driver *driver_chain = NULL;
48 static spinlock_t driverlist_lock = SPIN_LOCK_UNLOCKED;
50 /* What you can do to a port that's gone away.. */
51 static void dead_write_lines (struct parport *p, unsigned char b){}
52 static unsigned char dead_read_lines (struct parport *p) { return 0; }
53 static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
54 unsigned char c) { return 0; }
55 static void dead_onearg (struct parport *p){}
56 static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
57 static void dead_state (struct parport *p, struct parport_state *s) { }
58 static void dead_noargs (void) { }
59 static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
61 static size_t dead_read (struct parport *p, void *b, size_t l, int f)
63 static struct parport_operations dead_ops = {
64 dead_write_lines, /* data */
66 dead_write_lines, /* control */
69 dead_read_lines, /* status */
70 dead_onearg, /* enable_irq */
71 dead_onearg, /* disable_irq */
72 dead_onearg, /* data_forward */
73 dead_onearg, /* data_reverse */
74 dead_initstate, /* init_state */
77 dead_noargs, /* xxx_use_count */
86 dead_write, /* compat */
87 dead_read, /* nibble */
91 /* Call attach(port) for each registered driver. */
92 static void attach_driver_chain(struct parport *port)
94 struct parport_driver *drv;
95 void (**attach) (struct parport *);
98 /* This is complicated because attach() must be able to block,
99 * but we can't let it do that while we're holding a
102 spin_lock (&driverlist_lock);
103 for (drv = driver_chain; drv; drv = drv->next)
105 spin_unlock (&driverlist_lock);
107 /* Drivers can unregister here; that's okay. If they register
108 * they'll be given an attach during parport_register_driver,
109 * so that's okay too. The only worry is that someone might
110 * get given an attach twice if they registered just before
111 * this function gets called. */
113 /* Hmm, this could be fixed with a generation number..
116 attach = kmalloc (sizeof (void(*)(struct parport *)) * count,
119 printk (KERN_WARNING "parport: not enough memory to attach\n");
123 spin_lock (&driverlist_lock);
124 for (i = 0, drv = driver_chain; drv && i < count; drv = drv->next)
125 attach[i++] = drv->attach;
126 spin_unlock (&driverlist_lock);
128 for (count = 0; count < i; count++)
129 (*attach[count]) (port);
134 /* Call detach(port) for each registered driver. */
135 static void detach_driver_chain(struct parport *port)
137 struct parport_driver *drv;
139 spin_lock (&driverlist_lock);
140 for (drv = driver_chain; drv; drv = drv->next)
142 spin_unlock (&driverlist_lock);
145 /* Ask kmod for some lowlevel drivers. */
146 static void get_lowlevel_driver (void)
148 /* There is no actual module called this: you should set
149 * up an alias for modutils. */
150 request_module ("parport_lowlevel");
154 * parport_register_driver - register a parallel port device driver
155 * @drv: structure describing the driver
157 * This can be called by a parallel port device driver in order
158 * to receive notifications about ports being found in the
159 * system, as well as ports no longer available.
161 * The @drv structure is allocated by the caller and must not be
162 * deallocated until after calling parport_unregister_driver().
164 * The driver's attach() function may block. The port that
165 * attach() is given will be valid for the duration of the
166 * callback, but if the driver wants to take a copy of the
167 * pointer it must call parport_get_port() to do so. Calling
168 * parport_register_device() on that port will do this for you.
170 * The driver's detach() function may not block. The port that
171 * detach() is given will be valid for the duration of the
172 * callback, but if the driver wants to take a copy of the
173 * pointer it must call parport_get_port() to do so.
175 * Returns 0 on success. Currently it always succeeds.
178 int parport_register_driver (struct parport_driver *drv)
180 struct parport *port;
181 struct parport **ports;
185 get_lowlevel_driver ();
187 /* We have to take the portlist lock for this to be sure
188 * that port is valid for the duration of the callback. */
190 /* This is complicated by the fact that attach must be allowed
191 * to block, so we can't be holding any spinlocks when we call
192 * it. But we need to hold a spinlock to iterate over the
195 spin_lock (&parportlist_lock);
196 for (port = portlist; port; port = port->next)
198 spin_unlock (&parportlist_lock);
200 ports = kmalloc (sizeof (struct parport *) * count, GFP_KERNEL);
202 printk (KERN_WARNING "parport: not enough memory to attach\n");
204 spin_lock (&parportlist_lock);
205 for (i = 0, port = portlist; port && i < count;
208 spin_unlock (&parportlist_lock);
210 for (count = 0; count < i; count++)
211 drv->attach (ports[count]);
216 spin_lock (&driverlist_lock);
217 drv->next = driver_chain;
219 spin_unlock (&driverlist_lock);
225 * parport_unregister_driver - deregister a parallel port device driver
226 * @arg: structure describing the driver that was given to
227 * parport_register_driver()
229 * This should be called by a parallel port device driver that
230 * has registered itself using parport_register_driver() when it
231 * is about to be unloaded.
233 * When it returns, the driver's attach() routine will no longer
234 * be called, and for each port that attach() was called for, the
235 * detach() routine will have been called.
237 * If the caller's attach() function can block, it is their
238 * responsibility to make sure to wait for it to exit before
241 * All the driver's detach() calls are guaranteed to have
242 * finished by the time this function returns.
244 * The driver's detach() call is not allowed to block.
247 void parport_unregister_driver (struct parport_driver *arg)
249 struct parport_driver *drv = driver_chain, *olddrv = NULL;
253 struct parport *port;
255 spin_lock (&driverlist_lock);
257 olddrv->next = drv->next;
259 driver_chain = drv->next;
260 spin_unlock (&driverlist_lock);
262 /* Call the driver's detach routine for each
263 * port to clean up any resources that the
264 * attach routine acquired. */
265 spin_lock (&parportlist_lock);
266 for (port = portlist; port; port = port->next)
268 spin_unlock (&parportlist_lock);
277 static void free_port (struct parport *port)
280 for (d = 0; d < 5; d++) {
281 if (port->probe_info[d].class_name)
282 kfree (port->probe_info[d].class_name);
283 if (port->probe_info[d].mfr)
284 kfree (port->probe_info[d].mfr);
285 if (port->probe_info[d].model)
286 kfree (port->probe_info[d].model);
287 if (port->probe_info[d].cmdset)
288 kfree (port->probe_info[d].cmdset);
289 if (port->probe_info[d].description)
290 kfree (port->probe_info[d].description);
298 * parport_get_port - increment a port's reference count
301 * This ensure's that a struct parport pointer remains valid
302 * until the matching parport_put_port() call.
305 struct parport *parport_get_port (struct parport *port)
307 atomic_inc (&port->ref_count);
312 * parport_put_port - decrement a port's reference count
315 * This should be called once for each call to parport_get_port(),
316 * once the port is no longer needed.
319 void parport_put_port (struct parport *port)
321 if (atomic_dec_and_test (&port->ref_count))
322 /* Can destroy it now. */
329 * parport_enumerate - return a list of the system's parallel ports
331 * This returns the head of the list of parallel ports in the
332 * system, as a &struct parport. The structure that is returned
333 * describes the first port in the list, and its 'next' member
334 * points to the next port, or %NULL if it's the last port.
336 * If there are no parallel ports in the system,
337 * parport_enumerate() will return %NULL.
340 struct parport *parport_enumerate(void)
342 /* Don't use this: use parport_register_driver instead. */
345 get_lowlevel_driver ();
351 * parport_register_port - register a parallel port
352 * @base: base I/O address
355 * @ops: pointer to the port driver's port operations structure
357 * When a parallel port (lowlevel) driver finds a port that
358 * should be made available to parallel port device drivers, it
359 * should call parport_register_port(). The @base, @irq, and
360 * @dma parameters are for the convenience of port drivers, and
361 * for ports where they aren't meaningful needn't be set to
362 * anything special. They can be altered afterwards by adjusting
363 * the relevant members of the parport structure that is returned
364 * and represents the port. They should not be tampered with
365 * after calling parport_announce_port, however.
367 * If there are parallel port device drivers in the system that
368 * have registered themselves using parport_register_driver(),
369 * they are not told about the port at this time; that is done by
370 * parport_announce_port().
372 * The @ops structure is allocated by the caller, and must not be
373 * deallocated before calling parport_unregister_port().
375 * If there is no memory to allocate a new parport structure,
376 * this function will return %NULL.
379 struct parport *parport_register_port(unsigned long base, int irq, int dma,
380 struct parport_operations *ops)
387 tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
389 printk(KERN_WARNING "parport: memory squeeze\n");
393 /* Search for the lowest free parport number. */
395 spin_lock_irq (&parportlist_lock);
396 for (portnum = 0; ; portnum++) {
397 struct parport *itr = portlist;
399 if (itr->number == portnum)
400 /* No good, already used. */
407 /* Got to the end of the list. */
410 spin_unlock_irq (&parportlist_lock);
412 /* Init our structure */
413 memset(tmp, 0, sizeof(struct parport));
417 tmp->muxport = tmp->daisy = tmp->muxsel = -1;
420 tmp->devices = tmp->cad = NULL;
423 tmp->portnum = tmp->number = portnum;
425 memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
426 tmp->cad_lock = RW_LOCK_UNLOCKED;
427 spin_lock_init(&tmp->waitlist_lock);
428 spin_lock_init(&tmp->pardevice_lock);
429 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
430 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
431 init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
432 tmp->spintime = parport_default_spintime;
433 atomic_set (&tmp->ref_count, 1);
435 name = kmalloc(15, GFP_KERNEL);
437 printk(KERN_ERR "parport: memory squeeze\n");
441 sprintf(name, "parport%d", portnum);
445 * Chain the entry to our list.
447 * This function must not run from an irq handler so we don' t need
448 * to clear irq on the local CPU. -arca
451 spin_lock(&parportlist_lock);
453 /* We are locked against anyone else performing alterations, but
454 * because of parport_enumerate people can still _read_ the list
455 * while we are changing it; so be careful..
457 * It's okay to have portlist_tail a little bit out of sync
458 * since it's only used for changing the list, not for reading
463 portlist_tail->next = tmp;
467 spin_unlock(&parportlist_lock);
469 for (device = 0; device < 5; device++)
470 /* assume the worst */
471 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
473 tmp->waithead = tmp->waittail = NULL;
479 * parport_announce_port - tell device drivers about a parallel port
480 * @port: parallel port to announce
482 * After a port driver has registered a parallel port with
483 * parport_register_port, and performed any necessary
484 * initialisation or adjustments, it should call
485 * parport_announce_port() in order to notify all device drivers
486 * that have called parport_register_driver(). Their attach()
487 * functions will be called, with @port as the parameter.
490 void parport_announce_port (struct parport *port)
492 #ifdef CONFIG_PARPORT_1284
493 /* Analyse the IEEE1284.3 topology of the port. */
494 if (parport_daisy_init (port) == 0) {
495 /* No devices were detected. Perhaps they are in some
496 funny state; let's try to reset them and see if
498 parport_daisy_fini (port);
499 parport_write_control (port, PARPORT_CONTROL_SELECT);
501 parport_write_control (port,
502 PARPORT_CONTROL_SELECT |
503 PARPORT_CONTROL_INIT);
505 parport_daisy_init (port);
509 /* Let drivers know that a new port has arrived. */
510 attach_driver_chain (port);
514 * parport_unregister_port - deregister a parallel port
515 * @port: parallel port to deregister
517 * When a parallel port driver is forcibly unloaded, or a
518 * parallel port becomes inaccessible, the port driver must call
519 * this function in order to deal with device drivers that still
522 * The parport structure associated with the port has its
523 * operations structure replaced with one containing 'null'
524 * operations that return errors or just don't do anything.
526 * Any drivers that have registered themselves using
527 * parport_register_driver() are notified that the port is no
528 * longer accessible by having their detach() routines called
529 * with @port as the parameter.
532 void parport_unregister_port(struct parport *port)
536 port->ops = &dead_ops;
538 /* Spread the word. */
539 detach_driver_chain (port);
541 #ifdef CONFIG_PARPORT_1284
542 /* Forget the IEEE1284.3 topology of the port. */
543 parport_daisy_fini (port);
546 spin_lock(&parportlist_lock);
548 /* We are protected from other people changing the list, but
549 * they can still see it (using parport_enumerate). So be
550 * careful about the order of writes.. */
551 if (portlist == port) {
552 if ((portlist = port->next) == NULL)
553 portlist_tail = NULL;
555 for (p = portlist; (p != NULL) && (p->next != port);
558 if ((p->next = port->next) == NULL)
561 else printk (KERN_WARNING
562 "%s not found in port list!\n", port->name);
564 spin_unlock(&parportlist_lock);
566 /* Yes, parport_enumerate _is_ unsafe. Don't use it. */
567 parport_put_port (port);
571 * parport_register_device - register a device on a parallel port
572 * @port: port to which the device is attached
573 * @name: a name to refer to the device
574 * @pf: preemption callback
575 * @kf: kick callback (wake-up)
576 * @irq_func: interrupt handler
577 * @flags: registration flags
578 * @handle: data for callback functions
580 * This function, called by parallel port device drivers,
581 * declares that a device is connected to a port, and tells the
582 * system all it needs to know.
584 * The @name is allocated by the caller and must not be
585 * deallocated until the caller calls @parport_unregister_device
588 * The preemption callback function, @pf, is called when this
589 * device driver has claimed access to the port but another
590 * device driver wants to use it. It is given @handle as its
591 * parameter, and should return zero if it is willing for the
592 * system to release the port to another driver on its behalf.
593 * If it wants to keep control of the port it should return
594 * non-zero, and no action will be taken. It is good manners for
595 * the driver to try to release the port at the earliest
596 * opportunity after its preemption callback rejects a preemption
597 * attempt. Note that if a preemption callback is happy for
598 * preemption to go ahead, there is no need to release the port;
599 * it is done automatically. This function may not block, as it
600 * may be called from interrupt context. If the device driver
601 * does not support preemption, @pf can be %NULL.
603 * The wake-up ("kick") callback function, @kf, is called when
604 * the port is available to be claimed for exclusive access; that
605 * is, parport_claim() is guaranteed to succeed when called from
606 * inside the wake-up callback function. If the driver wants to
607 * claim the port it should do so; otherwise, it need not take
608 * any action. This function may not block, as it may be called
609 * from interrupt context. If the device driver does not want to
610 * be explicitly invited to claim the port in this way, @kf can
613 * The interrupt handler, @irq_func, is called when an interrupt
614 * arrives from the parallel port. Note that if a device driver
615 * wants to use interrupts it should use parport_enable_irq(),
616 * and can also check the irq member of the parport structure
617 * representing the port.
619 * The parallel port (lowlevel) driver is the one that has called
620 * request_irq() and whose interrupt handler is called first.
621 * This handler does whatever needs to be done to the hardware to
622 * acknowledge the interrupt (for PC-style ports there is nothing
623 * special to be done). It then tells the IEEE 1284 code about
624 * the interrupt, which may involve reacting to an IEEE 1284
625 * event depending on the current IEEE 1284 phase. After this,
626 * it calls @irq_func. Needless to say, @irq_func will be called
627 * from interrupt context, and may not block.
629 * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
630 * so should only be used when sharing the port with other device
631 * drivers is impossible and would lead to incorrect behaviour.
632 * Use it sparingly! Normally, @flags will be zero.
634 * This function returns a pointer to a structure that represents
635 * the device on the port, or %NULL if there is not enough memory
636 * to allocate space for that structure.
640 parport_register_device(struct parport *port, const char *name,
641 int (*pf)(void *), void (*kf)(void *),
642 void (*irq_func)(int, void *, struct pt_regs *),
643 int flags, void *handle)
645 struct pardevice *tmp;
647 if (port->physport->flags & PARPORT_FLAG_EXCL) {
648 /* An exclusive device is registered. */
649 printk (KERN_DEBUG "%s: no more devices allowed\n",
654 if (flags & PARPORT_DEV_LURK) {
656 printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
661 /* We up our own module reference count, and that of the port
662 on which a device is to be registered, to ensure that
663 neither of us gets unloaded while we sleep in (e.g.)
664 kmalloc. To be absolutely safe, we have to require that
665 our caller doesn't sleep in between parport_enumerate and
666 parport_register_device.. */
668 port->ops->inc_use_count();
669 parport_get_port (port);
671 tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
673 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
677 tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
678 if (tmp->state == NULL) {
679 printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
680 goto out_free_pardevice;
688 tmp->private = handle;
690 tmp->irq_func = irq_func;
692 tmp->timeout = 5 * HZ;
694 /* Chain this onto the list */
697 * This function must not run from an irq handler so we don' t need
698 * to clear irq on the local CPU. -arca
700 spin_lock(&port->physport->pardevice_lock);
702 if (flags & PARPORT_DEV_EXCL) {
703 if (port->physport->devices) {
704 spin_unlock (&port->physport->pardevice_lock);
706 "%s: cannot grant exclusive access for "
707 "device %s\n", port->name, name);
710 port->flags |= PARPORT_FLAG_EXCL;
713 tmp->next = port->physport->devices;
714 wmb(); /* Make sure that tmp->next is written before it's
715 added to the list; see comments marked 'no locking
717 if (port->physport->devices)
718 port->physport->devices->prev = tmp;
719 port->physport->devices = tmp;
720 spin_unlock(&port->physport->pardevice_lock);
722 init_waitqueue_head(&tmp->wait_q);
723 tmp->timeslice = parport_default_timeslice;
724 tmp->waitnext = tmp->waitprev = NULL;
727 * This has to be run as last thing since init_state may need other
728 * pardevice fields. -arca
730 port->ops->init_state(tmp, tmp->state);
731 parport_device_proc_register(tmp);
740 port->ops->dec_use_count();
741 parport_put_port (port);
746 * parport_unregister_device - deregister a device on a parallel port
747 * @dev: pointer to structure representing device
749 * This undoes the effect of parport_register_device().
752 void parport_unregister_device(struct pardevice *dev)
754 struct parport *port;
756 #ifdef PARPORT_PARANOID
758 printk(KERN_ERR "parport_unregister_device: passed NULL\n");
763 parport_device_proc_unregister(dev);
765 port = dev->port->physport;
767 if (port->cad == dev) {
768 printk(KERN_DEBUG "%s: %s forgot to release port\n",
769 port->name, dev->name);
770 parport_release (dev);
773 spin_lock(&port->pardevice_lock);
775 dev->next->prev = dev->prev;
777 dev->prev->next = dev->next;
779 port->devices = dev->next;
781 if (dev->flags & PARPORT_DEV_EXCL)
782 port->flags &= ~PARPORT_FLAG_EXCL;
784 spin_unlock(&port->pardevice_lock);
786 /* Make sure we haven't left any pointers around in the wait
788 spin_lock (&port->waitlist_lock);
789 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
791 dev->waitprev->waitnext = dev->waitnext;
793 port->waithead = dev->waitnext;
795 dev->waitnext->waitprev = dev->waitprev;
797 port->waittail = dev->waitprev;
799 spin_unlock (&port->waitlist_lock);
805 port->ops->dec_use_count();
806 parport_put_port (port);
808 /* Yes, that's right, someone _could_ still have a pointer to
809 * port, if they used parport_enumerate. That's why they
810 * shouldn't use it (and use parport_register_driver instead)..
815 * parport_find_number - find a parallel port by number
816 * @number: parallel port number
818 * This returns the parallel port with the specified number, or
819 * %NULL if there is none.
821 * There is an implicit parport_get_port() done already; to throw
822 * away the reference to the port that parport_find_number()
823 * gives you, use parport_put_port().
826 struct parport *parport_find_number (int number)
828 struct parport *port, *result = NULL;
831 get_lowlevel_driver ();
833 spin_lock (&parportlist_lock);
834 for (port = portlist; port; port = port->next)
835 if (port->number == number) {
836 result = parport_get_port (port);
839 spin_unlock (&parportlist_lock);
844 * parport_find_base - find a parallel port by base address
845 * @base: base I/O address
847 * This returns the parallel port with the specified base
848 * address, or %NULL if there is none.
850 * There is an implicit parport_get_port() done already; to throw
851 * away the reference to the port that parport_find_base()
852 * gives you, use parport_put_port().
855 struct parport *parport_find_base (unsigned long base)
857 struct parport *port, *result = NULL;
860 get_lowlevel_driver ();
862 spin_lock (&parportlist_lock);
863 for (port = portlist; port; port = port->next)
864 if (port->base == base) {
865 result = parport_get_port (port);
868 spin_unlock (&parportlist_lock);
873 * parport_claim - claim access to a parallel port device
874 * @dev: pointer to structure representing a device on the port
876 * This function will not block and so can be used from interrupt
877 * context. If parport_claim() succeeds in claiming access to
878 * the port it returns zero and the port is available to use. It
879 * may fail (returning non-zero) if the port is in use by another
880 * driver and that driver is not willing to relinquish control of
884 int parport_claim(struct pardevice *dev)
886 struct pardevice *oldcad;
887 struct parport *port = dev->port->physport;
890 if (port->cad == dev) {
891 printk(KERN_INFO "%s: %s already owner\n",
892 dev->port->name,dev->name);
896 /* Preempt any current device */
897 write_lock_irqsave (&port->cad_lock, flags);
898 if ((oldcad = port->cad) != NULL) {
899 if (oldcad->preempt) {
900 if (oldcad->preempt(oldcad->private))
902 port->ops->save_state(port, dev->state);
906 if (port->cad != oldcad) {
907 /* I think we'll actually deadlock rather than
908 get here, but just in case.. */
910 "%s: %s released port when preempted!\n",
911 port->name, oldcad->name);
917 /* Can't fail from now on, so mark ourselves as no longer waiting. */
918 if (dev->waiting & 1) {
921 /* Take ourselves out of the wait list again. */
922 spin_lock_irq (&port->waitlist_lock);
924 dev->waitprev->waitnext = dev->waitnext;
926 port->waithead = dev->waitnext;
928 dev->waitnext->waitprev = dev->waitprev;
930 port->waittail = dev->waitprev;
931 spin_unlock_irq (&port->waitlist_lock);
932 dev->waitprev = dev->waitnext = NULL;
935 /* Now we do the change of devices */
938 #ifdef CONFIG_PARPORT_1284
939 /* If it's a mux port, select it. */
940 if (dev->port->muxport >= 0) {
942 port->muxsel = dev->port->muxport;
945 /* If it's a daisy chain device, select it. */
946 if (dev->daisy >= 0) {
947 /* This could be lazier. */
948 if (!parport_daisy_select (port, dev->daisy,
949 IEEE1284_MODE_COMPAT))
950 port->daisy = dev->daisy;
952 #endif /* IEEE1284.3 support */
954 /* Restore control registers */
955 port->ops->restore_state(port, dev->state);
956 write_unlock_irqrestore(&port->cad_lock, flags);
961 /* If this is the first time we tried to claim the port, register an
962 interest. This is only allowed for devices sleeping in
963 parport_claim_or_block(), or those with a wakeup function. */
965 /* The cad_lock is still held for writing here */
966 if (dev->waiting & 2 || dev->wakeup) {
967 spin_lock (&port->waitlist_lock);
968 if (test_and_set_bit(0, &dev->waiting) == 0) {
969 /* First add ourselves to the end of the wait list. */
970 dev->waitnext = NULL;
971 dev->waitprev = port->waittail;
972 if (port->waittail) {
973 port->waittail->waitnext = dev;
974 port->waittail = dev;
976 port->waithead = port->waittail = dev;
978 spin_unlock (&port->waitlist_lock);
980 write_unlock_irqrestore (&port->cad_lock, flags);
985 * parport_claim_or_block - claim access to a parallel port device
986 * @dev: pointer to structure representing a device on the port
988 * This behaves like parport_claim(), but will block if necessary
989 * to wait for the port to be free. A return value of 1
990 * indicates that it slept; 0 means that it succeeded without
991 * needing to sleep. A negative error code indicates failure.
994 int parport_claim_or_block(struct pardevice *dev)
998 /* Signal to parport_claim() that we can wait even without a
1002 /* Try to claim the port. If this fails, we need to sleep. */
1003 r = parport_claim(dev);
1005 unsigned long flags;
1006 #ifdef PARPORT_DEBUG_SHARING
1007 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
1011 /* If dev->waiting is clear now, an interrupt
1012 gave us the port and we would deadlock if we slept. */
1014 interruptible_sleep_on (&dev->wait_q);
1015 if (signal_pending (current)) {
1016 restore_flags (flags);
1022 #ifdef PARPORT_DEBUG_SHARING
1023 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1027 restore_flags(flags);
1028 #ifdef PARPORT_DEBUG_SHARING
1029 if (dev->port->physport->cad != dev)
1030 printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
1031 "but %s owns port!\n", dev->name,
1032 dev->port->physport->cad ?
1033 dev->port->physport->cad->name:"nobody");
1041 * parport_release - give up access to a parallel port device
1042 * @dev: pointer to structure representing parallel port device
1044 * This function cannot fail, but it should not be called without
1045 * the port claimed. Similarly, if the port is already claimed
1046 * you should not try claiming it again.
1049 void parport_release(struct pardevice *dev)
1051 struct parport *port = dev->port->physport;
1052 struct pardevice *pd;
1053 unsigned long flags;
1055 /* Make sure that dev is the current device */
1056 write_lock_irqsave(&port->cad_lock, flags);
1057 if (port->cad != dev) {
1058 write_unlock_irqrestore (&port->cad_lock, flags);
1059 printk(KERN_WARNING "%s: %s tried to release parport "
1060 "when not owner\n", port->name, dev->name);
1064 #ifdef CONFIG_PARPORT_1284
1065 /* If this is on a mux port, deselect it. */
1066 if (dev->port->muxport >= 0) {
1071 /* If this is a daisy device, deselect it. */
1072 if (dev->daisy >= 0) {
1073 parport_daisy_deselect_all (port);
1079 write_unlock_irqrestore(&port->cad_lock, flags);
1081 /* Save control registers */
1082 port->ops->save_state(port, dev->state);
1084 /* If anybody is waiting, find out who's been there longest and
1085 then wake them up. (Note: no locking required) */
1086 /* !!! LOCKING IS NEEDED HERE */
1087 for (pd = port->waithead; pd; pd = pd->waitnext) {
1088 if (pd->waiting & 2) { /* sleeping in claim_or_block */
1090 if (waitqueue_active(&pd->wait_q))
1091 wake_up_interruptible(&pd->wait_q);
1093 } else if (pd->wakeup) {
1094 pd->wakeup(pd->private);
1095 if (dev->port->cad) /* racy but no matter */
1098 printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
1102 /* Nobody was waiting, so walk the list to see if anyone is
1103 interested in being woken up. (Note: no locking required) */
1104 /* !!! LOCKING IS NEEDED HERE */
1105 for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
1106 if (pd->wakeup && pd != dev)
1107 pd->wakeup(pd->private);
1111 static int parport_parse_params (int nports, const char *str[], int val[],
1112 int automatic, int none, int nofifo)
1115 for (i = 0; i < nports && str[i]; i++) {
1116 if (!strncmp(str[i], "auto", 4))
1118 else if (!strncmp(str[i], "none", 4))
1120 else if (nofifo && !strncmp(str[i], "nofifo", 4))
1124 unsigned long r = simple_strtoul(str[i], &ep, 0);
1128 printk(KERN_ERR "parport: bad specifier `%s'\n", str[i]);
1137 int parport_parse_irqs(int nports, const char *irqstr[], int irqval[])
1139 return parport_parse_params (nports, irqstr, irqval, PARPORT_IRQ_AUTO,
1140 PARPORT_IRQ_NONE, 0);
1143 int parport_parse_dmas(int nports, const char *dmastr[], int dmaval[])
1145 return parport_parse_params (nports, dmastr, dmaval, PARPORT_DMA_AUTO,
1146 PARPORT_DMA_NONE, PARPORT_DMA_NOFIFO);
1148 MODULE_LICENSE("GPL");