2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
4 * PCI Bus Services, see include/linux/pci.h for further explanation.
6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/ioport.h>
21 #include <linux/spinlock.h>
23 #include <linux/kmod.h> /* for hotplug_path */
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/cache.h>
29 #include <asm/dma.h> /* isa_dma_bridge_buggy */
34 #define DBG(x...) printk(x)
39 LIST_HEAD(pci_root_buses);
40 LIST_HEAD(pci_devices);
43 * pci_find_slot - locate PCI device from a given PCI slot
44 * @bus: number of PCI bus on which desired PCI device resides
45 * @devfn: encodes number of PCI slot in which the desired PCI
46 * device resides and the logical device number within that slot
47 * in case of multi-function devices.
49 * Given a PCI bus and slot/function number, the desired PCI device
50 * is located in system global list of PCI devices. If the device
51 * is found, a pointer to its data structure is returned. If no
52 * device is found, %NULL is returned.
55 pci_find_slot(unsigned int bus, unsigned int devfn)
59 pci_for_each_dev(dev) {
60 if (dev->bus->number == bus && dev->devfn == devfn)
67 * pci_find_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
68 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
69 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
70 * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids
71 * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
72 * @from: Previous PCI device found in search, or %NULL for new search.
74 * Iterates through the list of known PCI devices. If a PCI device is
75 * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
76 * device structure is returned. Otherwise, %NULL is returned.
77 * A new search is initiated by passing %NULL to the @from argument.
78 * Otherwise if @from is not %NULL, searches continue from next device on the global list.
81 pci_find_subsys(unsigned int vendor, unsigned int device,
82 unsigned int ss_vendor, unsigned int ss_device,
83 const struct pci_dev *from)
85 struct list_head *n = from ? from->global_list.next : pci_devices.next;
87 while (n != &pci_devices) {
88 struct pci_dev *dev = pci_dev_g(n);
89 if ((vendor == PCI_ANY_ID || dev->vendor == vendor) &&
90 (device == PCI_ANY_ID || dev->device == device) &&
91 (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) &&
92 (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device))
101 * pci_find_device - begin or continue searching for a PCI device by vendor/device id
102 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
103 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
104 * @from: Previous PCI device found in search, or %NULL for new search.
106 * Iterates through the list of known PCI devices. If a PCI device is
107 * found with a matching @vendor and @device, a pointer to its device structure is
108 * returned. Otherwise, %NULL is returned.
109 * A new search is initiated by passing %NULL to the @from argument.
110 * Otherwise if @from is not %NULL, searches continue from next device on the global list.
113 pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
115 return pci_find_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
120 * pci_find_class - begin or continue searching for a PCI device by class
121 * @class: search for a PCI device with this class designation
122 * @from: Previous PCI device found in search, or %NULL for new search.
124 * Iterates through the list of known PCI devices. If a PCI device is
125 * found with a matching @class, a pointer to its device structure is
126 * returned. Otherwise, %NULL is returned.
127 * A new search is initiated by passing %NULL to the @from argument.
128 * Otherwise if @from is not %NULL, searches continue from next device
129 * on the global list.
132 pci_find_class(unsigned int class, const struct pci_dev *from)
134 struct list_head *n = from ? from->global_list.next : pci_devices.next;
136 while (n != &pci_devices) {
137 struct pci_dev *dev = pci_dev_g(n);
138 if (dev->class == class)
146 * pci_find_capability - query for devices' capabilities
147 * @dev: PCI device to query
148 * @cap: capability code
150 * Tell if a device supports a given PCI capability.
151 * Returns the address of the requested capability structure within the
152 * device's PCI configuration space or 0 in case the device does not
153 * support it. Possible values for @cap:
155 * %PCI_CAP_ID_PM Power Management
157 * %PCI_CAP_ID_AGP Accelerated Graphics Port
159 * %PCI_CAP_ID_VPD Vital Product Data
161 * %PCI_CAP_ID_SLOTID Slot Identification
163 * %PCI_CAP_ID_MSI Message Signalled Interrupts
165 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
167 * %PCI_CAP_ID_PCIX PCI-X
170 pci_find_capability(struct pci_dev *dev, int cap)
176 pci_read_config_word(dev, PCI_STATUS, &status);
177 if (!(status & PCI_STATUS_CAP_LIST))
179 switch (dev->hdr_type) {
180 case PCI_HEADER_TYPE_NORMAL:
181 case PCI_HEADER_TYPE_BRIDGE:
182 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &pos);
184 case PCI_HEADER_TYPE_CARDBUS:
185 pci_read_config_byte(dev, PCI_CB_CAPABILITY_LIST, &pos);
190 while (ttl-- && pos >= 0x40) {
192 pci_read_config_byte(dev, pos + PCI_CAP_LIST_ID, &id);
197 pci_read_config_byte(dev, pos + PCI_CAP_LIST_NEXT, &pos);
204 * pci_find_parent_resource - return resource region of parent bus of given region
205 * @dev: PCI device structure contains resources to be searched
206 * @res: child resource record for which parent is sought
208 * For given resource region of given device, return the resource
209 * region of parent bus the given region is contained in or where
210 * it should be allocated from.
213 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
215 const struct pci_bus *bus = dev->bus;
217 struct resource *best = NULL;
220 struct resource *r = bus->resource[i];
223 if (res->start && !(res->start >= r->start && res->end <= r->end))
224 continue; /* Not contained */
225 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
226 continue; /* Wrong type */
227 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
228 return r; /* Exact match */
229 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
230 best = r; /* Approximating prefetchable by non-prefetchable */
236 * pci_set_power_state - Set the power state of a PCI device
237 * @dev: PCI device to be suspended
238 * @state: Power state we're entering
240 * Transition a device to a new power state, using the Power Management
241 * Capabilities in the device's config space.
244 * -EINVAL if trying to enter a lower state than we're already in.
245 * 0 if we're already in the requested state.
246 * -EIO if device does not support PCI PM.
247 * 0 if we can successfully change the power state.
251 pci_set_power_state(struct pci_dev *dev, int state)
256 /* bound the state we're entering */
257 if (state > 3) state = 3;
259 /* Validate current state:
260 * Can enter D0 from any state, but if we can only go deeper
261 * to sleep if we're already in a low power state
263 if (state > 0 && dev->current_state > state)
265 else if (dev->current_state == state)
266 return 0; /* we're already there */
268 /* find PCI PM capability in list */
269 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
271 /* abort if the device doesn't support PM capabilities */
272 if (!pm) return -EIO;
274 /* check if this device supports the desired state */
275 if (state == 1 || state == 2) {
277 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
278 if (state == 1 && !(pmc & PCI_PM_CAP_D1)) return -EIO;
279 else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) return -EIO;
282 /* If we're in D3, force entire word to 0.
283 * This doesn't affect PME_Status, disables PME_En, and
284 * sets PowerState to 0.
286 if (dev->current_state >= 3)
289 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
290 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
294 /* enter specified state */
295 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
297 /* Mandatory power management transition delays */
298 /* see PCI PM 1.1 5.6.1 table 18 */
299 if(state == 3 || dev->current_state == 3)
301 set_current_state(TASK_UNINTERRUPTIBLE);
302 schedule_timeout(HZ/100);
304 else if(state == 2 || dev->current_state == 2)
306 dev->current_state = state;
312 * pci_save_state - save the PCI configuration space of a device before suspending
313 * @dev: - PCI device that we're dealing with
314 * @buffer: - buffer to hold config space context
316 * @buffer must be large enough to hold the entire PCI 2.2 config space
320 pci_save_state(struct pci_dev *dev, u32 *buffer)
324 /* XXX: 100% dword access ok here? */
325 for (i = 0; i < 16; i++)
326 pci_read_config_dword(dev, i * 4,&buffer[i]);
332 * pci_restore_state - Restore the saved state of a PCI device
333 * @dev: - PCI device that we're dealing with
334 * @buffer: - saved PCI config space
338 pci_restore_state(struct pci_dev *dev, u32 *buffer)
343 for (i = 0; i < 16; i++)
344 pci_write_config_dword(dev,i * 4, buffer[i]);
347 * otherwise, write the context information we know from bootup.
348 * This works around a problem where warm-booting from Windows
349 * combined with a D3(hot)->D0 transition causes PCI config
350 * header data to be forgotten.
353 for (i = 0; i < 6; i ++)
354 pci_write_config_dword(dev,
355 PCI_BASE_ADDRESS_0 + (i * 4),
356 dev->resource[i].start);
357 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
363 * pci_enable_device_bars - Initialize some of a device for use
364 * @dev: PCI device to be initialized
365 * @bars: bitmask of BAR's that must be configured
367 * Initialize device before it's used by a driver. Ask low-level code
368 * to enable selected I/O and memory resources. Wake up the device if it
369 * was suspended. Beware, this function can fail.
373 pci_enable_device_bars(struct pci_dev *dev, int bars)
377 pci_set_power_state(dev, 0);
378 if ((err = pcibios_enable_device(dev, bars)) < 0)
384 * pci_enable_device - Initialize device before it's used by a driver.
385 * @dev: PCI device to be initialized
387 * Initialize device before it's used by a driver. Ask low-level code
388 * to enable I/O and memory. Wake up the device if it was suspended.
389 * Beware, this function can fail.
392 pci_enable_device(struct pci_dev *dev)
394 return pci_enable_device_bars(dev, 0x3F);
398 * pci_disable_device - Disable PCI device after use
399 * @dev: PCI device to be disabled
401 * Signal to the system that the PCI device is not in use by the system
402 * anymore. This only involves disabling PCI bus-mastering, if active.
405 pci_disable_device(struct pci_dev *dev)
409 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
410 if (pci_command & PCI_COMMAND_MASTER) {
411 pci_command &= ~PCI_COMMAND_MASTER;
412 pci_write_config_word(dev, PCI_COMMAND, pci_command);
417 * pci_enable_wake - enable device to generate PME# when suspended
418 * @dev: - PCI device to operate on
419 * @state: - Current state of device.
420 * @enable: - Flag to enable or disable generation
422 * Set the bits in the device's PM Capabilities to generate PME# when
423 * the system is suspended.
425 * -EIO is returned if device doesn't have PM Capabilities.
426 * -EINVAL is returned if device supports it, but can't generate wake events.
427 * 0 if operation is successful.
430 int pci_enable_wake(struct pci_dev *dev, u32 state, int enable)
435 /* find PCI PM capability in list */
436 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
438 /* If device doesn't support PM Capabilities, but request is to disable
439 * wake events, it's a nop; otherwise fail */
441 return enable ? -EIO : 0;
443 /* Check device's ability to generate PME# */
444 pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
446 value &= PCI_PM_CAP_PME_MASK;
447 value >>= ffs(value); /* First bit of mask */
449 /* Check if it can generate PME# from requested state. */
450 if (!value || !(value & (1 << state)))
451 return enable ? -EINVAL : 0;
453 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
455 /* Clear PME_Status by writing 1 to it and enable PME# */
456 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
459 value &= ~PCI_PM_CTRL_PME_ENABLE;
461 pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
467 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
471 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
475 while (dev->bus->self) {
476 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
477 dev = dev->bus->self;
484 * pci_release_region - Release a PCI bar
485 * @pdev: PCI device whose resources were previously reserved by pci_request_region
486 * @bar: BAR to release
488 * Releases the PCI I/O and memory resources previously reserved by a
489 * successful call to pci_request_region. Call this function only
490 * after all use of the PCI regions has ceased.
492 void pci_release_region(struct pci_dev *pdev, int bar)
494 if (pci_resource_len(pdev, bar) == 0)
496 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
497 release_region(pci_resource_start(pdev, bar),
498 pci_resource_len(pdev, bar));
499 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
500 release_mem_region(pci_resource_start(pdev, bar),
501 pci_resource_len(pdev, bar));
505 * pci_request_region - Reserved PCI I/O and memory resource
506 * @pdev: PCI device whose resources are to be reserved
507 * @bar: BAR to be reserved
508 * @res_name: Name to be associated with resource.
510 * Mark the PCI region associated with PCI device @pdev BR @bar as
511 * being reserved by owner @res_name. Do not access any
512 * address inside the PCI regions unless this call returns
515 * Returns 0 on success, or %EBUSY on error. A warning
516 * message is also printed on failure.
518 int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
520 if (pci_resource_len(pdev, bar) == 0)
523 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
524 if (!request_region(pci_resource_start(pdev, bar),
525 pci_resource_len(pdev, bar), res_name))
528 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
529 if (!request_mem_region(pci_resource_start(pdev, bar),
530 pci_resource_len(pdev, bar), res_name))
537 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
538 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
539 bar + 1, /* PCI BAR # */
540 pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
547 * pci_release_regions - Release reserved PCI I/O and memory resources
548 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
550 * Releases all PCI I/O and memory resources previously reserved by a
551 * successful call to pci_request_regions. Call this function only
552 * after all use of the PCI regions has ceased.
555 void pci_release_regions(struct pci_dev *pdev)
559 for (i = 0; i < 6; i++)
560 pci_release_region(pdev, i);
564 * pci_request_regions - Reserved PCI I/O and memory resources
565 * @pdev: PCI device whose resources are to be reserved
566 * @res_name: Name to be associated with resource.
568 * Mark all PCI regions associated with PCI device @pdev as
569 * being reserved by owner @res_name. Do not access any
570 * address inside the PCI regions unless this call returns
573 * Returns 0 on success, or %EBUSY on error. A warning
574 * message is also printed on failure.
576 int pci_request_regions(struct pci_dev *pdev, char *res_name)
580 for (i = 0; i < 6; i++)
581 if(pci_request_region(pdev, i, res_name))
586 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
587 pci_resource_flags(pdev, i) & IORESOURCE_IO ? "I/O" : "mem",
588 i + 1, /* PCI BAR # */
589 pci_resource_len(pdev, i), pci_resource_start(pdev, i),
592 pci_release_region(pdev, i);
599 * Registration of PCI drivers and handling of hot-pluggable devices.
602 static LIST_HEAD(pci_drivers);
605 * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
606 * @ids: array of PCI device id structures to search in
607 * @dev: the PCI device structure to match against
609 * Used by a driver to check whether a PCI device present in the
610 * system is in its list of supported devices.Returns the matching
611 * pci_device_id structure or %NULL if there is no match.
613 const struct pci_device_id *
614 pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev)
616 while (ids->vendor || ids->subvendor || ids->class_mask) {
617 if ((ids->vendor == PCI_ANY_ID || ids->vendor == dev->vendor) &&
618 (ids->device == PCI_ANY_ID || ids->device == dev->device) &&
619 (ids->subvendor == PCI_ANY_ID || ids->subvendor == dev->subsystem_vendor) &&
620 (ids->subdevice == PCI_ANY_ID || ids->subdevice == dev->subsystem_device) &&
621 !((ids->class ^ dev->class) & ids->class_mask))
629 pci_announce_device(struct pci_driver *drv, struct pci_dev *dev)
631 const struct pci_device_id *id;
635 id = pci_match_device(drv->id_table, dev);
644 if (drv->probe(dev, id) >= 0) {
654 * pci_register_driver - register a new pci driver
655 * @drv: the driver structure to register
657 * Adds the driver structure to the list of registered drivers
658 * Returns the number of pci devices which were claimed by the driver
659 * during registration. The driver remains registered even if the
660 * return value is zero.
663 pci_register_driver(struct pci_driver *drv)
668 list_add_tail(&drv->node, &pci_drivers);
669 pci_for_each_dev(dev) {
670 if (!pci_dev_driver(dev))
671 count += pci_announce_device(drv, dev);
677 * pci_unregister_driver - unregister a pci driver
678 * @drv: the driver structure to unregister
680 * Deletes the driver structure from the list of registered PCI drivers,
681 * gives it a chance to clean up by calling its remove() function for
682 * each device it was responsible for, and marks those devices as
687 pci_unregister_driver(struct pci_driver *drv)
691 list_del(&drv->node);
692 pci_for_each_dev(dev) {
693 if (dev->driver == drv) {
701 #ifdef CONFIG_HOTPLUG
705 #define TRUE (!FALSE)
709 run_sbin_hotplug(struct pci_dev *pdev, int insert)
712 char *argv[3], *envp[8];
713 char id[20], sub_id[24], bus_id[24], class_id[20];
715 if (!hotplug_path[0])
718 sprintf(class_id, "PCI_CLASS=%04X", pdev->class);
719 sprintf(id, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device);
720 sprintf(sub_id, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, pdev->subsystem_device);
721 sprintf(bus_id, "PCI_SLOT_NAME=%s", pdev->slot_name);
724 argv[i++] = hotplug_path;
729 /* minimal command environment */
730 envp[i++] = "HOME=/";
731 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
733 /* other stuff we want to pass to /sbin/hotplug */
734 envp[i++] = class_id;
739 envp[i++] = "ACTION=add";
741 envp[i++] = "ACTION=remove";
744 call_usermodehelper (argv [0], argv, envp);
748 * pci_announce_device_to_drivers - tell the drivers a new device has appeared
749 * @dev: the device that has shown up
751 * Notifys the drivers that a new device has appeared, and also notifys
752 * userspace through /sbin/hotplug.
755 pci_announce_device_to_drivers(struct pci_dev *dev)
757 struct list_head *ln;
759 for(ln=pci_drivers.next; ln != &pci_drivers; ln=ln->next) {
760 struct pci_driver *drv = list_entry(ln, struct pci_driver, node);
761 if (drv->remove && pci_announce_device(drv, dev))
765 /* notify userspace of new hotplug device */
766 run_sbin_hotplug(dev, TRUE);
770 * pci_insert_device - insert a hotplug device
771 * @dev: the device to insert
772 * @bus: where to insert it
774 * Add a new device to the device lists and notify userspace (/sbin/hotplug).
777 pci_insert_device(struct pci_dev *dev, struct pci_bus *bus)
779 list_add_tail(&dev->bus_list, &bus->devices);
780 list_add_tail(&dev->global_list, &pci_devices);
781 #ifdef CONFIG_PROC_FS
782 pci_proc_attach_device(dev);
784 pci_announce_device_to_drivers(dev);
788 pci_free_resources(struct pci_dev *dev)
792 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
793 struct resource *res = dev->resource + i;
795 release_resource(res);
800 * pci_remove_device - remove a hotplug device
801 * @dev: the device to remove
803 * Delete the device structure from the device lists and
804 * notify userspace (/sbin/hotplug).
807 pci_remove_device(struct pci_dev *dev)
810 if (dev->driver->remove)
811 dev->driver->remove(dev);
814 list_del(&dev->bus_list);
815 list_del(&dev->global_list);
816 pci_free_resources(dev);
817 #ifdef CONFIG_PROC_FS
818 pci_proc_detach_device(dev);
821 /* notify userspace of hotplug device removal */
822 run_sbin_hotplug(dev, FALSE);
827 static struct pci_driver pci_compat_driver = {
832 * pci_dev_driver - get the pci_driver of a device
833 * @dev: the device to query
835 * Returns the appropriate pci_driver structure or %NULL if there is no
836 * registered driver for the device.
839 pci_dev_driver(const struct pci_dev *dev)
845 for(i=0; i<=PCI_ROM_RESOURCE; i++)
846 if (dev->resource[i].flags & IORESOURCE_BUSY)
847 return &pci_compat_driver;
854 * This interrupt-safe spinlock protects all accesses to PCI
855 * configuration space.
858 static spinlock_t pci_lock = SPIN_LOCK_UNLOCKED;
861 * Wrappers for all PCI configuration access functions. They just check
862 * alignment, do locking and call the low-level functions pointed to
866 #define PCI_byte_BAD 0
867 #define PCI_word_BAD (pos & 1)
868 #define PCI_dword_BAD (pos & 3)
870 #define PCI_OP(rw,size,type) \
871 int pci_##rw##_config_##size (struct pci_dev *dev, int pos, type value) \
874 unsigned long flags; \
875 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
876 spin_lock_irqsave(&pci_lock, flags); \
877 res = dev->bus->ops->rw##_##size(dev, pos, value); \
878 spin_unlock_irqrestore(&pci_lock, flags); \
882 PCI_OP(read, byte, u8 *)
883 PCI_OP(read, word, u16 *)
884 PCI_OP(read, dword, u32 *)
885 PCI_OP(write, byte, u8)
886 PCI_OP(write, word, u16)
887 PCI_OP(write, dword, u32)
890 * pci_set_master - enables bus-mastering for device dev
891 * @dev: the PCI device to enable
893 * Enables bus-mastering on the device and calls pcibios_set_master()
894 * to do the needed arch specific settings.
897 pci_set_master(struct pci_dev *dev)
901 pci_read_config_word(dev, PCI_COMMAND, &cmd);
902 if (! (cmd & PCI_COMMAND_MASTER)) {
903 DBG("PCI: Enabling bus mastering for device %s\n", dev->slot_name);
904 cmd |= PCI_COMMAND_MASTER;
905 pci_write_config_word(dev, PCI_COMMAND, cmd);
907 pcibios_set_master(dev);
910 #ifndef HAVE_ARCH_PCI_MWI
911 /* This can be overridden by arch code. */
912 u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
915 * pci_generic_prep_mwi - helper function for pci_set_mwi
916 * @dev: the PCI device for which MWI is enabled
918 * Helper function for implementation the arch-specific pcibios_set_mwi
919 * function. Originally copied from drivers/net/acenic.c.
920 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
922 * RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
925 pci_generic_prep_mwi(struct pci_dev *dev)
929 if (!pci_cache_line_size)
930 return -EINVAL; /* The system doesn't support MWI. */
932 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
933 equal to or multiple of the right value. */
934 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
935 if (cacheline_size >= pci_cache_line_size &&
936 (cacheline_size % pci_cache_line_size) == 0)
939 /* Write the correct value. */
940 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
942 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
943 if (cacheline_size == pci_cache_line_size)
946 printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
947 "by device %s\n", pci_cache_line_size << 2, dev->slot_name);
951 #endif /* !HAVE_ARCH_PCI_MWI */
954 * pci_set_mwi - enables memory-write-invalidate PCI transaction
955 * @dev: the PCI device for which MWI is enabled
957 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
958 * and then calls @pcibios_set_mwi to do the needed arch specific
959 * operations or a generic mwi-prep function.
961 * RETURNS: An appriopriate -ERRNO error value on eror, or zero for success.
964 pci_set_mwi(struct pci_dev *dev)
969 #ifdef HAVE_ARCH_PCI_MWI
970 rc = pcibios_set_mwi(dev);
972 rc = pci_generic_prep_mwi(dev);
978 pci_read_config_word(dev, PCI_COMMAND, &cmd);
979 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
980 DBG("PCI: Enabling Mem-Wr-Inval for device %s\n", dev->slot_name);
981 cmd |= PCI_COMMAND_INVALIDATE;
982 pci_write_config_word(dev, PCI_COMMAND, cmd);
989 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
990 * @dev: the PCI device to disable
992 * Disables PCI Memory-Write-Invalidate transaction on the device
995 pci_clear_mwi(struct pci_dev *dev)
999 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1000 if (cmd & PCI_COMMAND_INVALIDATE) {
1001 cmd &= ~PCI_COMMAND_INVALIDATE;
1002 pci_write_config_word(dev, PCI_COMMAND, cmd);
1007 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
1009 if (!pci_dma_supported(dev, mask))
1012 dev->dma_mask = mask;
1018 pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
1020 if (!pci_dac_dma_supported(dev, mask))
1023 dev->dma_mask = mask;
1029 * Translate the low bits of the PCI base
1030 * to the resource type
1032 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
1034 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
1035 return IORESOURCE_IO;
1037 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
1038 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
1040 return IORESOURCE_MEM;
1044 * Find the extent of a PCI decode, do sanity checks.
1046 static u32 pci_size(u32 base, u32 maxbase, unsigned long mask)
1048 u32 size = mask & maxbase; /* Find the significant bits */
1051 size = size & ~(size-1); /* Get the lowest of them to find the decode size */
1052 size -= 1; /* extent = size - 1 */
1053 if (base == maxbase && ((base | size) & mask) != mask)
1054 return 0; /* base == maxbase can be valid only
1055 if the BAR has been already
1056 programmed with all 1s */
1060 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
1062 unsigned int pos, reg, next;
1064 struct resource *res;
1066 for(pos=0; pos<howmany; pos = next) {
1068 res = &dev->resource[pos];
1069 res->name = dev->name;
1070 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
1071 pci_read_config_dword(dev, reg, &l);
1072 pci_write_config_dword(dev, reg, ~0);
1073 pci_read_config_dword(dev, reg, &sz);
1074 pci_write_config_dword(dev, reg, l);
1075 if (!sz || sz == 0xffffffff)
1077 if (l == 0xffffffff)
1079 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
1080 sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK);
1083 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
1084 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
1086 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
1089 res->start = l & PCI_BASE_ADDRESS_IO_MASK;
1090 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
1092 res->end = res->start + (unsigned long) sz;
1093 res->flags |= pci_calc_resource_flags(l);
1094 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
1095 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
1096 pci_read_config_dword(dev, reg+4, &l);
1098 #if BITS_PER_LONG == 64
1099 res->start |= ((unsigned long) l) << 32;
1100 res->end = res->start + sz;
1101 pci_write_config_dword(dev, reg+4, ~0);
1102 pci_read_config_dword(dev, reg+4, &sz);
1103 pci_write_config_dword(dev, reg+4, l);
1105 res->end = res->start + 0xffffffff +
1106 (((unsigned long) ~sz) << 32);
1109 printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", dev->slot_name);
1118 dev->rom_base_reg = rom;
1119 res = &dev->resource[PCI_ROM_RESOURCE];
1120 res->name = dev->name;
1121 pci_read_config_dword(dev, rom, &l);
1122 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
1123 pci_read_config_dword(dev, rom, &sz);
1124 pci_write_config_dword(dev, rom, l);
1125 if (l == 0xffffffff)
1127 if (sz && sz != 0xffffffff) {
1128 sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK);
1131 res->flags = (l & PCI_ROM_ADDRESS_ENABLE) |
1132 IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
1133 res->start = l & PCI_ROM_ADDRESS_MASK;
1134 res->end = res->start + (unsigned long) sz;
1139 void __devinit pci_read_bridge_bases(struct pci_bus *child)
1141 struct pci_dev *dev = child->self;
1142 u8 io_base_lo, io_limit_lo;
1143 u16 mem_base_lo, mem_limit_lo;
1144 unsigned long base, limit;
1145 struct resource *res;
1148 if (!dev) /* It's a host bus, nothing to read */
1151 if (dev->transparent) {
1152 printk("Transparent bridge - %s\n", dev->name);
1153 for(i = 0; i < 4; i++)
1154 child->resource[i] = child->parent->resource[i];
1159 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
1161 res = child->resource[0];
1162 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
1163 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
1164 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
1165 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
1167 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1168 u16 io_base_hi, io_limit_hi;
1169 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
1170 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
1171 base |= (io_base_hi << 16);
1172 limit |= (io_limit_hi << 16);
1175 if (base && base <= limit) {
1176 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
1178 res->end = limit + 0xfff;
1181 res = child->resource[1];
1182 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
1183 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
1184 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
1185 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
1186 if (base && base <= limit) {
1187 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
1189 res->end = limit + 0xfffff;
1192 res = child->resource[2];
1193 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
1194 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
1195 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
1196 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1198 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
1199 u32 mem_base_hi, mem_limit_hi;
1200 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
1201 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
1202 #if BITS_PER_LONG == 64
1203 base |= ((long) mem_base_hi) << 32;
1204 limit |= ((long) mem_limit_hi) << 32;
1206 if (mem_base_hi || mem_limit_hi) {
1207 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for %s\n", child->name);
1212 if (base && base <= limit) {
1213 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
1215 res->end = limit + 0xfffff;
1219 static struct pci_bus * __devinit pci_alloc_bus(void)
1223 b = kmalloc(sizeof(*b), GFP_KERNEL);
1225 memset(b, 0, sizeof(*b));
1226 INIT_LIST_HEAD(&b->children);
1227 INIT_LIST_HEAD(&b->devices);
1232 struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
1234 struct pci_bus *child;
1238 * Allocate a new bus, and inherit stuff from the parent..
1240 child = pci_alloc_bus();
1244 list_add_tail(&child->node, &parent->children);
1246 dev->subordinate = child;
1247 child->parent = parent;
1248 child->ops = parent->ops;
1249 child->sysdata = parent->sysdata;
1252 * Set up the primary, secondary and subordinate
1255 child->number = child->secondary = busnr;
1256 child->primary = parent->secondary;
1257 child->subordinate = 0xff;
1259 /* Set up default resource pointers and names.. */
1260 for (i = 0; i < 4; i++) {
1261 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
1262 child->resource[i]->name = child->name;
1268 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus);
1271 * If it's a bridge, configure it and scan the bus behind it.
1272 * For CardBus bridges, we don't scan behind as the devices will
1273 * be handled by the bridge driver itself.
1275 * We need to process bridges in two passes -- first we scan those
1276 * already configured by the BIOS and after we are done with all of
1277 * them, we proceed to assigning numbers to the remaining buses in
1278 * order to avoid overlaps between old and new bus numbers.
1280 static int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
1284 struct pci_bus *child;
1285 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
1287 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1288 DBG("Scanning behind PCI bridge %s, config %06x, pass %d\n", dev->slot_name, buses & 0xffffff, pass);
1289 if ((buses & 0xffff00) && !pcibios_assign_all_busses()) {
1291 * Bus already configured by firmware, process it in the first
1292 * pass and just note the configuration.
1297 child = pci_add_new_bus(bus, dev, 0);
1301 child->primary = buses & 0xFF;
1302 child->secondary = (buses >> 8) & 0xFF;
1303 child->subordinate = (buses >> 16) & 0xFF;
1304 child->number = child->secondary;
1306 unsigned int cmax = pci_do_scan_bus(child);
1307 if (cmax > max) max = cmax;
1309 unsigned int cmax = child->subordinate;
1310 if (cmax > max) max = cmax;
1314 * We need to assign a number to this bus which we always
1315 * do in the second pass. We also keep all address decoders
1316 * on the bridge disabled during scanning. FIXME: Why?
1320 pci_read_config_word(dev, PCI_COMMAND, &cr);
1321 pci_write_config_word(dev, PCI_COMMAND, 0x0000);
1322 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1324 child = pci_add_new_bus(bus, dev, ++max);
1328 buses = (buses & 0xff000000)
1329 | ((unsigned int)(child->primary) << 0)
1330 | ((unsigned int)(child->secondary) << 8)
1331 | ((unsigned int)(child->subordinate) << 16);
1333 * We need to blast all three values with a single write.
1335 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1337 /* Now we can scan all subordinate buses... */
1338 max = pci_do_scan_bus(child);
1341 * For CardBus bridges, we leave 4 bus numbers
1342 * as cards with a PCI-to-PCI bridge can be
1348 * Set the subordinate bus number to its real value.
1350 child->subordinate = max;
1351 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1352 pci_write_config_word(dev, PCI_COMMAND, cr);
1354 sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
1359 * Read interrupt line and base address registers.
1360 * The architecture-dependent code can tweak these, of course.
1362 static void pci_read_irq(struct pci_dev *dev)
1366 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1368 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1373 * pci_setup_device - fill in class and map information of a device
1374 * @dev: the device structure to fill
1376 * Initialize the device structure with information about the device's
1377 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1378 * Called at initialisation of the PCI subsystem and by CardBus services.
1379 * Returns 0 on success and -1 if unknown type of device (not normal, bridge
1382 int pci_setup_device(struct pci_dev * dev)
1386 sprintf(dev->slot_name, "%02x:%02x.%d", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
1387 sprintf(dev->name, "PCI device %04x:%04x", dev->vendor, dev->device);
1389 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1390 class >>= 8; /* upper 3 bytes */
1394 DBG("Found %02x:%02x [%04x/%04x] %06x %02x\n", dev->bus->number, dev->devfn, dev->vendor, dev->device, class, dev->hdr_type);
1396 /* "Unknown power state" */
1397 dev->current_state = 4;
1399 switch (dev->hdr_type) { /* header type */
1400 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1401 if (class == PCI_CLASS_BRIDGE_PCI)
1404 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1405 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1406 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1409 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1410 if (class != PCI_CLASS_BRIDGE_PCI)
1412 /* The PCI-to-PCI bridge spec requires that subtractive
1413 decoding (i.e. transparent) bridge must have programming
1414 interface code of 0x01. */
1415 dev->transparent = ((dev->class & 0xff) == 1);
1416 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1419 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1420 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1423 pci_read_bases(dev, 1, 0);
1424 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1425 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1428 default: /* unknown header */
1429 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
1430 dev->slot_name, dev->hdr_type);
1434 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
1435 dev->slot_name, class, dev->hdr_type);
1436 dev->class = PCI_CLASS_NOT_DEFINED;
1439 /* We found a fine healthy device, go go go... */
1444 * Read the config data for a PCI device, sanity-check it
1445 * and fill in the dev structure...
1447 struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
1449 struct pci_dev *dev;
1452 if (pci_read_config_dword(temp, PCI_VENDOR_ID, &l))
1455 /* some broken boards return 0 or ~0 if a slot is empty: */
1456 if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
1459 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1463 memcpy(dev, temp, sizeof(*dev));
1464 dev->vendor = l & 0xffff;
1465 dev->device = (l >> 16) & 0xffff;
1467 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1468 set this higher, assuming the system even supports it. */
1469 dev->dma_mask = 0xffffffff;
1470 if (pci_setup_device(dev) < 0) {
1477 struct pci_dev * __devinit pci_scan_slot(struct pci_dev *temp)
1479 struct pci_bus *bus = temp->bus;
1480 struct pci_dev *dev;
1481 struct pci_dev *first_dev = NULL;
1486 for (func = 0; func < 8; func++, temp->devfn++) {
1487 if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
1489 temp->hdr_type = hdr_type & 0x7f;
1491 dev = pci_scan_device(temp);
1492 if (!pcibios_scan_all_fns() && func == 0) {
1501 pci_name_device(dev);
1503 is_multi = hdr_type & 0x80;
1508 * Link the device to both the global PCI device chain and
1509 * the per-bus list of devices.
1511 list_add_tail(&dev->global_list, &pci_devices);
1512 list_add_tail(&dev->bus_list, &bus->devices);
1514 /* Fix up broken headers */
1515 pci_fixup_device(PCI_FIXUP_HEADER, dev);
1518 * If this is a single function device
1519 * don't scan past the first function.
1528 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
1530 unsigned int devfn, max, pass;
1531 struct list_head *ln;
1532 struct pci_dev *dev, dev0;
1534 DBG("Scanning bus %02x\n", bus->number);
1535 max = bus->secondary;
1537 /* Create a device template */
1538 memset(&dev0, 0, sizeof(dev0));
1540 dev0.sysdata = bus->sysdata;
1542 /* Go find them, Rover! */
1543 for (devfn = 0; devfn < 0x100; devfn += 8) {
1545 pci_scan_slot(&dev0);
1549 * After performing arch-dependent fixup of the bus, look behind
1550 * all PCI-to-PCI bridges on this bus.
1552 DBG("Fixups for bus %02x\n", bus->number);
1553 pcibios_fixup_bus(bus);
1554 for (pass=0; pass < 2; pass++)
1555 for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
1556 dev = pci_dev_b(ln);
1557 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1558 max = pci_scan_bridge(bus, dev, max, pass);
1562 * We've scanned the bus and so we know all about what's on
1563 * the other side of any bridges that may be on this bus plus
1566 * Return how far we've got finding sub-buses.
1568 DBG("Bus scan for %02x returning with max=%02x\n", bus->number, max);
1572 int __devinit pci_bus_exists(const struct list_head *list, int nr)
1574 const struct list_head *l;
1576 for(l=list->next; l != list; l = l->next) {
1577 const struct pci_bus *b = pci_bus_b(l);
1578 if (b->number == nr || pci_bus_exists(&b->children, nr))
1584 struct pci_bus * __devinit pci_alloc_primary_bus(int bus)
1588 if (pci_bus_exists(&pci_root_buses, bus)) {
1589 /* If we already got to this bus through a different bridge, ignore it */
1590 DBG("PCI: Bus %02x already known\n", bus);
1594 b = pci_alloc_bus();
1595 list_add_tail(&b->node, &pci_root_buses);
1597 b->number = b->secondary = bus;
1598 b->resource[0] = &ioport_resource;
1599 b->resource[1] = &iomem_resource;
1603 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
1605 struct pci_bus *b = pci_alloc_primary_bus(bus);
1607 b->sysdata = sysdata;
1609 b->subordinate = pci_do_scan_bus(b);
1617 * PCI Power management..
1619 * This needs to be done centralized, so that we power manage PCI
1620 * devices in the right order: we should not shut down PCI bridges
1621 * before we've shut down the devices behind them, and we should
1622 * not wake up devices before we've woken up the bridge to the
1625 * We do not touch devices that don't have a driver that exports
1626 * a suspend/resume function. That is just too dangerous. If the default
1627 * PCI suspend/resume functions work for a device, the driver can
1628 * easily implement them (ie just have a suspend function that calls
1629 * the pci_set_power_state() function).
1632 static int pci_pm_save_state_device(struct pci_dev *dev, u32 state)
1636 struct pci_driver *driver = dev->driver;
1637 if (driver && driver->save_state)
1638 error = driver->save_state(dev,state);
1643 static int pci_pm_suspend_device(struct pci_dev *dev, u32 state)
1647 struct pci_driver *driver = dev->driver;
1648 if (driver && driver->suspend)
1649 error = driver->suspend(dev,state);
1654 static int pci_pm_resume_device(struct pci_dev *dev)
1658 struct pci_driver *driver = dev->driver;
1659 if (driver && driver->resume)
1660 error = driver->resume(dev);
1665 static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
1667 struct list_head *list;
1670 list_for_each(list, &bus->children) {
1671 error = pci_pm_save_state_bus(pci_bus_b(list),state);
1672 if (error) return error;
1674 list_for_each(list, &bus->devices) {
1675 error = pci_pm_save_state_device(pci_dev_b(list),state);
1676 if (error) return error;
1681 static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
1683 struct list_head *list;
1685 /* Walk the bus children list */
1686 list_for_each(list, &bus->children)
1687 pci_pm_suspend_bus(pci_bus_b(list),state);
1689 /* Walk the device children list */
1690 list_for_each(list, &bus->devices)
1691 pci_pm_suspend_device(pci_dev_b(list),state);
1695 static int pci_pm_resume_bus(struct pci_bus *bus)
1697 struct list_head *list;
1699 /* Walk the device children list */
1700 list_for_each(list, &bus->devices)
1701 pci_pm_resume_device(pci_dev_b(list));
1703 /* And then walk the bus children */
1704 list_for_each(list, &bus->children)
1705 pci_pm_resume_bus(pci_bus_b(list));
1709 static int pci_pm_save_state(u32 state)
1711 struct list_head *list;
1712 struct pci_bus *bus;
1715 list_for_each(list, &pci_root_buses) {
1716 bus = pci_bus_b(list);
1717 error = pci_pm_save_state_bus(bus,state);
1719 error = pci_pm_save_state_device(bus->self,state);
1724 static int pci_pm_suspend(u32 state)
1726 struct list_head *list;
1727 struct pci_bus *bus;
1729 list_for_each(list, &pci_root_buses) {
1730 bus = pci_bus_b(list);
1731 pci_pm_suspend_bus(bus,state);
1732 pci_pm_suspend_device(bus->self,state);
1737 int pci_pm_resume(void)
1739 struct list_head *list;
1740 struct pci_bus *bus;
1742 list_for_each(list, &pci_root_buses) {
1743 bus = pci_bus_b(list);
1744 pci_pm_resume_device(bus->self);
1745 pci_pm_resume_bus(bus);
1751 pci_pm_callback(struct pm_dev *pm_device, pm_request_t rqst, void *data)
1757 error = pci_pm_save_state((unsigned long)data);
1760 error = pci_pm_suspend((unsigned long)data);
1763 error = pci_pm_resume();
1773 * Pool allocator ... wraps the pci_alloc_consistent page allocator, so
1774 * small blocks are easily used by drivers for bus mastering controllers.
1775 * This should probably be sharing the guts of the slab allocator.
1778 struct pci_pool { /* the pool */
1779 struct list_head page_list;
1781 size_t blocks_per_page;
1784 struct pci_dev *dev;
1787 wait_queue_head_t waitq;
1790 struct pci_page { /* cacheable header for 'allocation' bytes */
1791 struct list_head page_list;
1794 unsigned long bitmap [0];
1797 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
1798 #define POOL_POISON_BYTE 0xa7
1800 // #define CONFIG_PCIPOOL_DEBUG
1804 * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
1805 * @name: name of pool, for diagnostics
1806 * @pdev: pci device that will be doing the DMA
1807 * @size: size of the blocks in this pool.
1808 * @align: alignment requirement for blocks; must be a power of two
1809 * @allocation: returned blocks won't cross this boundary (or zero)
1810 * @flags: SLAB_* flags (not all are supported).
1812 * Returns a pci allocation pool with the requested characteristics, or
1813 * null if one can't be created. Given one of these pools, pci_pool_alloc()
1814 * may be used to allocate memory. Such memory will all have "consistent"
1815 * DMA mappings, accessible by the device and its driver without using
1816 * cache flushing primitives. The actual size of blocks allocated may be
1817 * larger than requested because of alignment.
1819 * If allocation is nonzero, objects returned from pci_pool_alloc() won't
1820 * cross that size boundary. This is useful for devices which have
1821 * addressing restrictions on individual DMA transfers, such as not crossing
1822 * boundaries of 4KBytes.
1825 pci_pool_create (const char *name, struct pci_dev *pdev,
1826 size_t size, size_t align, size_t allocation, int flags)
1828 struct pci_pool *retval;
1834 else if (size < align)
1836 else if ((size % align) != 0) {
1838 size &= ~(align - 1);
1841 if (allocation == 0) {
1842 if (PAGE_SIZE < size)
1845 allocation = PAGE_SIZE;
1846 // FIXME: round up for less fragmentation
1847 } else if (allocation < size)
1850 if (!(retval = kmalloc (sizeof *retval, flags)))
1853 #ifdef CONFIG_PCIPOOL_DEBUG
1854 flags |= SLAB_POISON;
1857 strncpy (retval->name, name, sizeof retval->name);
1858 retval->name [sizeof retval->name - 1] = 0;
1861 INIT_LIST_HEAD (&retval->page_list);
1862 spin_lock_init (&retval->lock);
1863 retval->size = size;
1864 retval->flags = flags;
1865 retval->allocation = allocation;
1866 retval->blocks_per_page = allocation / size;
1867 init_waitqueue_head (&retval->waitq);
1869 #ifdef CONFIG_PCIPOOL_DEBUG
1870 printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
1871 pdev ? pdev->slot_name : NULL, retval->name, size,
1872 retval->blocks_per_page, allocation);
1879 static struct pci_page *
1880 pool_alloc_page (struct pci_pool *pool, int mem_flags)
1882 struct pci_page *page;
1885 mapsize = pool->blocks_per_page;
1886 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
1887 mapsize *= sizeof (long);
1889 page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
1892 page->vaddr = pci_alloc_consistent (pool->dev,
1896 memset (page->bitmap, 0xff, mapsize); // bit set == free
1897 if (pool->flags & SLAB_POISON)
1898 memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
1899 list_add (&page->page_list, &pool->page_list);
1909 is_page_busy (int blocks, unsigned long *bitmap)
1911 while (blocks > 0) {
1912 if (*bitmap++ != ~0UL)
1914 blocks -= BITS_PER_LONG;
1920 pool_free_page (struct pci_pool *pool, struct pci_page *page)
1922 dma_addr_t dma = page->dma;
1924 if (pool->flags & SLAB_POISON)
1925 memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
1926 pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
1927 list_del (&page->page_list);
1933 * pci_pool_destroy - destroys a pool of pci memory blocks.
1934 * @pool: pci pool that will be destroyed
1936 * Caller guarantees that no more memory from the pool is in use,
1937 * and that nothing will try to use the pool after this call.
1940 pci_pool_destroy (struct pci_pool *pool)
1942 unsigned long flags;
1944 #ifdef CONFIG_PCIPOOL_DEBUG
1945 printk (KERN_DEBUG "pcipool destroy %s/%s\n",
1946 pool->dev ? pool->dev->slot_name : NULL,
1950 spin_lock_irqsave (&pool->lock, flags);
1951 while (!list_empty (&pool->page_list)) {
1952 struct pci_page *page;
1953 page = list_entry (pool->page_list.next,
1954 struct pci_page, page_list);
1955 if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
1956 printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
1957 pool->dev ? pool->dev->slot_name : NULL,
1958 pool->name, page->vaddr);
1959 /* leak the still-in-use consistent memory */
1960 list_del (&page->page_list);
1963 pool_free_page (pool, page);
1965 spin_unlock_irqrestore (&pool->lock, flags);
1971 * pci_pool_alloc - get a block of consistent memory
1972 * @pool: pci pool that will produce the block
1973 * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
1974 * @handle: pointer to dma address of block
1976 * This returns the kernel virtual address of a currently unused block,
1977 * and reports its dma address through the handle.
1978 * If such a memory block can't be allocated, null is returned.
1981 pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
1983 unsigned long flags;
1984 struct list_head *entry;
1985 struct pci_page *page;
1991 spin_lock_irqsave (&pool->lock, flags);
1992 list_for_each (entry, &pool->page_list) {
1994 page = list_entry (entry, struct pci_page, page_list);
1995 /* only cachable accesses here ... */
1996 for (map = 0, i = 0;
1997 i < pool->blocks_per_page;
1998 i += BITS_PER_LONG, map++) {
1999 if (page->bitmap [map] == 0)
2001 block = ffz (~ page->bitmap [map]);
2002 if ((i + block) < pool->blocks_per_page) {
2003 clear_bit (block, &page->bitmap [map]);
2004 offset = (BITS_PER_LONG * map) + block;
2005 offset *= pool->size;
2010 if (!(page = pool_alloc_page (pool, mem_flags))) {
2011 if (mem_flags == SLAB_KERNEL) {
2012 DECLARE_WAITQUEUE (wait, current);
2014 current->state = TASK_INTERRUPTIBLE;
2015 add_wait_queue (&pool->waitq, &wait);
2016 spin_unlock_irqrestore (&pool->lock, flags);
2018 schedule_timeout (POOL_TIMEOUT_JIFFIES);
2020 current->state = TASK_RUNNING;
2021 remove_wait_queue (&pool->waitq, &wait);
2028 clear_bit (0, &page->bitmap [0]);
2031 retval = offset + page->vaddr;
2032 *handle = offset + page->dma;
2034 spin_unlock_irqrestore (&pool->lock, flags);
2039 static struct pci_page *
2040 pool_find_page (struct pci_pool *pool, dma_addr_t dma)
2042 unsigned long flags;
2043 struct list_head *entry;
2044 struct pci_page *page;
2046 spin_lock_irqsave (&pool->lock, flags);
2047 list_for_each (entry, &pool->page_list) {
2048 page = list_entry (entry, struct pci_page, page_list);
2049 if (dma < page->dma)
2051 if (dma < (page->dma + pool->allocation))
2056 spin_unlock_irqrestore (&pool->lock, flags);
2062 * pci_pool_free - put block back into pci pool
2063 * @pool: the pci pool holding the block
2064 * @vaddr: virtual address of block
2065 * @dma: dma address of block
2067 * Caller promises neither device nor driver will again touch this block
2068 * unless it is first re-allocated.
2071 pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
2073 struct pci_page *page;
2074 unsigned long flags;
2077 if ((page = pool_find_page (pool, dma)) == 0) {
2078 printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n",
2079 pool->dev ? pool->dev->slot_name : NULL,
2080 pool->name, vaddr, (int) (dma & 0xffffffff));
2083 #ifdef CONFIG_PCIPOOL_DEBUG
2084 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
2085 printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n",
2086 pool->dev ? pool->dev->slot_name : NULL,
2087 pool->name, vaddr, (int) (dma & 0xffffffff));
2092 block = dma - page->dma;
2093 block /= pool->size;
2094 map = block / BITS_PER_LONG;
2095 block %= BITS_PER_LONG;
2097 #ifdef CONFIG_PCIPOOL_DEBUG
2098 if (page->bitmap [map] & (1UL << block)) {
2099 printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
2100 pool->dev ? pool->dev->slot_name : NULL,
2105 if (pool->flags & SLAB_POISON)
2106 memset (vaddr, POOL_POISON_BYTE, pool->size);
2108 spin_lock_irqsave (&pool->lock, flags);
2109 set_bit (block, &page->bitmap [map]);
2110 if (waitqueue_active (&pool->waitq))
2111 wake_up (&pool->waitq);
2113 * Resist a temptation to do
2114 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
2115 * it is not interrupt safe. Better have empty pages hang around.
2117 spin_unlock_irqrestore (&pool->lock, flags);
2121 void __devinit pci_init(void)
2123 struct pci_dev *dev;
2127 pci_for_each_dev(dev) {
2128 pci_fixup_device(PCI_FIXUP_FINAL, dev);
2132 pm_register(PM_PCI_DEV, 0, pci_pm_callback);
2136 static int __devinit pci_setup(char *str)
2139 char *k = strchr(str, ',');
2142 if (*str && (str = pcibios_setup(str)) && *str) {
2143 /* PCI layer options should be handled here */
2144 printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
2151 __setup("pci=", pci_setup);
2153 EXPORT_SYMBOL(pci_read_config_byte);
2154 EXPORT_SYMBOL(pci_read_config_word);
2155 EXPORT_SYMBOL(pci_read_config_dword);
2156 EXPORT_SYMBOL(pci_write_config_byte);
2157 EXPORT_SYMBOL(pci_write_config_word);
2158 EXPORT_SYMBOL(pci_write_config_dword);
2159 EXPORT_SYMBOL(pci_devices);
2160 EXPORT_SYMBOL(pci_root_buses);
2161 EXPORT_SYMBOL(pci_enable_device_bars);
2162 EXPORT_SYMBOL(pci_enable_device);
2163 EXPORT_SYMBOL(pci_disable_device);
2164 EXPORT_SYMBOL(pci_find_capability);
2165 EXPORT_SYMBOL(pci_release_regions);
2166 EXPORT_SYMBOL(pci_request_regions);
2167 EXPORT_SYMBOL(pci_release_region);
2168 EXPORT_SYMBOL(pci_request_region);
2169 EXPORT_SYMBOL(pci_find_class);
2170 EXPORT_SYMBOL(pci_find_device);
2171 EXPORT_SYMBOL(pci_find_slot);
2172 EXPORT_SYMBOL(pci_find_subsys);
2173 EXPORT_SYMBOL(pci_set_master);
2174 EXPORT_SYMBOL(pci_set_mwi);
2175 EXPORT_SYMBOL(pci_clear_mwi);
2176 EXPORT_SYMBOL(pci_set_dma_mask);
2177 EXPORT_SYMBOL(pci_dac_set_dma_mask);
2178 EXPORT_SYMBOL(pci_assign_resource);
2179 EXPORT_SYMBOL(pci_register_driver);
2180 EXPORT_SYMBOL(pci_unregister_driver);
2181 EXPORT_SYMBOL(pci_dev_driver);
2182 EXPORT_SYMBOL(pci_match_device);
2183 EXPORT_SYMBOL(pci_find_parent_resource);
2185 #ifdef CONFIG_HOTPLUG
2186 EXPORT_SYMBOL(pci_setup_device);
2187 EXPORT_SYMBOL(pci_insert_device);
2188 EXPORT_SYMBOL(pci_remove_device);
2189 EXPORT_SYMBOL(pci_announce_device_to_drivers);
2190 EXPORT_SYMBOL(pci_add_new_bus);
2191 EXPORT_SYMBOL(pci_do_scan_bus);
2192 EXPORT_SYMBOL(pci_scan_slot);
2193 EXPORT_SYMBOL(pci_scan_bus);
2194 EXPORT_SYMBOL(pci_scan_device);
2195 EXPORT_SYMBOL(pci_read_bridge_bases);
2196 #ifdef CONFIG_PROC_FS
2197 EXPORT_SYMBOL(pci_proc_attach_device);
2198 EXPORT_SYMBOL(pci_proc_detach_device);
2199 EXPORT_SYMBOL(pci_proc_attach_bus);
2200 EXPORT_SYMBOL(pci_proc_detach_bus);
2201 EXPORT_SYMBOL(proc_bus_pci_dir);
2205 EXPORT_SYMBOL(pci_set_power_state);
2206 EXPORT_SYMBOL(pci_save_state);
2207 EXPORT_SYMBOL(pci_restore_state);
2208 EXPORT_SYMBOL(pci_enable_wake);
2210 /* Obsolete functions */
2212 EXPORT_SYMBOL(pcibios_present);
2213 EXPORT_SYMBOL(pcibios_read_config_byte);
2214 EXPORT_SYMBOL(pcibios_read_config_word);
2215 EXPORT_SYMBOL(pcibios_read_config_dword);
2216 EXPORT_SYMBOL(pcibios_write_config_byte);
2217 EXPORT_SYMBOL(pcibios_write_config_word);
2218 EXPORT_SYMBOL(pcibios_write_config_dword);
2219 EXPORT_SYMBOL(pcibios_find_class);
2220 EXPORT_SYMBOL(pcibios_find_device);
2224 EXPORT_SYMBOL(isa_dma_bridge_buggy);
2225 EXPORT_SYMBOL(pci_pci_problems);
2227 /* Pool allocator */
2229 EXPORT_SYMBOL (pci_pool_create);
2230 EXPORT_SYMBOL (pci_pool_destroy);
2231 EXPORT_SYMBOL (pci_pool_alloc);
2232 EXPORT_SYMBOL (pci_pool_free);