static int __init
efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
{
- memory_present(0, start, end);
+ memory_present(0, PFN_UP(start), PFN_DOWN(end));
return 0;
}
spin_unlock_irqrestore(&cobalt_lock, flags);
}
-static struct hw_interrupt_type cobalt_irq_type = {
+static struct irq_chip cobalt_irq_type = {
.typename = "Cobalt-APIC",
.startup = startup_cobalt_irq,
.shutdown = disable_cobalt_irq,
spin_unlock_irqrestore(&cobalt_lock, flags);
}
-static struct hw_interrupt_type piix4_master_irq_type = {
+static struct irq_chip piix4_master_irq_type = {
.typename = "PIIX4-master",
.startup = startup_piix4_master_irq,
.ack = ack_cobalt_irq,
};
-static struct hw_interrupt_type piix4_virtual_irq_type = {
+static struct irq_chip piix4_virtual_irq_type = {
.typename = "PIIX4-virtual",
- .startup = startup_8259A_irq,
.shutdown = disable_8259A_irq,
.enable = enable_8259A_irq,
.disable = disable_8259A_irq,
__setup_end = .;
__initcall_start = .;
.initcall.init : {
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
+ INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
__setup_end = .;
__initcall_start = .;
.initcall.init : {
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
+ INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
return -EFAULT;
}
+ interval = 0;
sscanf(buf, "%i", &interval);
if (interval <= 0) {
P_ERROR("Timer CPU interval has to be > 0!\n");
put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp)))
error = -EFAULT;
}
- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
+ if (copy_to_user(args->__unused, tmp.__unused,
+ sizeof(tmp.__unused)))
+ error = -EFAULT;
}
return error;
}
compat_old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(sa_handler, &act->sa_handler) ||
- __get_user(sa_restorer, &act->sa_restorer))
+ __get_user(sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
sa_restorer = (unsigned long) old_ka.sa.sa_restorer;
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(sa_handler, &oact->sa_handler) ||
- __put_user(sa_restorer, &oact->sa_restorer))
+ __put_user(sa_restorer, &oact->sa_restorer) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
llgtr %r2,%r2 # unsigned *
llgtr %r3,%r3 # unsigned *
llgtr %r4,%r4 # struct getcpu_cache *
- jg sys_tee
+ jg sys_getcpu
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
local_irq_enable();
if (regs->psw.mask & PSW_MASK_PSTATE) {
- get_user(*((__u16 *) opcode), (__u16 __user *) location);
+ if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
+ return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
if (current->ptrace & PT_PTRACED)
force_sig(SIGTRAP, current);
signal = SIGILL;
#ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) {
- get_user(*((__u16 *) (opcode+2)), location+1);
+ if (get_user(*((__u16 *) (opcode+2)), location+1))
+ return;
signal = math_emu_b3(opcode, regs);
} else if (opcode[0] == 0xed) {
- get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1));
+ if (get_user(*((__u32 *) (opcode+2)),
+ (__u32 __user *)(location+1)))
+ return;
signal = math_emu_ed(opcode, regs);
} else if (*((__u16 *) opcode) == 0xb299) {
- get_user(*((__u16 *) (opcode+2)), location+1);
+ if (get_user(*((__u16 *) (opcode+2)), location+1))
+ return;
signal = math_emu_srnm(opcode, regs);
} else if (*((__u16 *) opcode) == 0xb29c) {
- get_user(*((__u16 *) (opcode+2)), location+1);
+ if (get_user(*((__u16 *) (opcode+2)), location+1))
+ return;
signal = math_emu_stfpc(opcode, regs);
} else if (*((__u16 *) opcode) == 0xb29d) {
- get_user(*((__u16 *) (opcode+2)), location+1);
+ if (get_user(*((__u16 *) (opcode+2)), location+1))
+ return;
signal = math_emu_lfpc(opcode, regs);
#endif
} else
If you are unsure about this, say N here.
-endmenu
-
config SYS_HYPERVISOR
bool
default n
+
+endmenu
*block_size = BLOCK_SIZE;
}
if (*total_size != (__u32) 0)
- printk(KERN_INFO " blocks= %lld block_size= %d\n",
- *total_size, *block_size);
+ printk(KERN_INFO " blocks= %llu block_size= %d\n",
+ (unsigned long long)*total_size, *block_size);
kfree(buf);
return;
}
*total_size = 0;
*block_size = BLOCK_SIZE;
}
- printk(KERN_INFO " blocks= %lld block_size= %d\n",
- *total_size, *block_size);
+ printk(KERN_INFO " blocks= %llu block_size= %d\n",
+ (unsigned long long)*total_size, *block_size);
kfree(buf);
return;
}
if (io == -1) {
printk(KERN_ERR PFX "io parameter must be specified\n");
ret = -EINVAL;
- goto out_clean;
+ goto out_pnp;
}
#if defined CONFIG_PNP
if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
printk(KERN_ERR PFX "Unable to register IO port %#x\n", io);
ret = -EBUSY;
- goto out_clean;
+ goto out_pnp;
}
ret = sc1200wdt_probe();
out_io:
release_region(io, io_len);
+out_pnp:
+#if defined CONFIG_PNP
+ if (isapnp)
+ pnp_unregister_driver(&scl200wdt_pnp_driver);
+#endif
goto out_clean;
}
(!(PCI_FUNC(dev->devfn) & 1)))
goto out;
- if (dev->vendor == PCI_VENDOR_ID_JMICRON && PCI_FUNC(dev->devfn) != 1)
- goto out;
+ if (dev->vendor == PCI_VENDOR_ID_JMICRON) {
+ if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 && PCI_FUNC(dev->devfn) != 1)
+ goto out;
+ }
if (dev->vendor != PCI_VENDOR_ID_JMICRON) {
pci_read_config_word(dev, PCI_COMMAND, &command);
{
int err;
+ printk(KERN_INFO "%s does not fully support suspend and resume yet\n",
+ OHCI1394_DRIVER_NAME);
+
err = pci_save_state(pdev);
- if (err)
- goto out;
+ if (err) {
+ printk(KERN_ERR "%s: pci_save_state failed with %d\n",
+ OHCI1394_DRIVER_NAME, err);
+ return err;
+ }
err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+#ifdef OHCI1394_DEBUG
if (err)
- goto out;
+ printk(KERN_DEBUG "%s: pci_set_power_state failed with %d\n",
+ OHCI1394_DRIVER_NAME, err);
+#endif /* OHCI1394_DEBUG */
/* PowerMac suspend code comes last */
#ifdef CONFIG_PPC_PMAC
pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
}
#endif /* CONFIG_PPC_PMAC */
-out:
- return err;
+
+ return 0;
}
#endif /* CONFIG_PM */
kobject_init(&rdev->kobj);
rdev->desc_nr = -1;
+ rdev->saved_raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
return -EBUSY;
ITERATE_RDEV(mddev,rdev,tmp) {
sector_t avail;
- if (rdev->sb_offset > rdev->data_offset)
- avail = (rdev->sb_offset*2) - rdev->data_offset;
- else
- avail = get_capacity(rdev->bdev->bd_disk)
- - rdev->data_offset;
+ avail = rdev->size * 2;
+
if (fit && (size == 0 || size > avail/2))
size = avail/2;
if (avail < ((sector_t)size << 1))
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
conf->working_disks--;
+ mddev->degraded++;
printk(KERN_ALERT "multipath: IO failure on %s,"
" disabling IO path. \n Operation continuing"
" on %d IO paths.\n",
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
+ mddev->degraded--;
rdev->raid_disk = path;
set_bit(In_sync, &rdev->flags);
rcu_assign_pointer(p->rdev, rdev);
"raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
mdname(mddev), s,
- (unsigned long long)sect +
- rdev->data_offset,
+ (unsigned long long)(sect +
+ rdev->data_offset),
bdevname(rdev->bdev, b));
}
}
"raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
- (unsigned long long)sect+
- rdev->data_offset,
+ (unsigned long long)(sect+
+ rdev->data_offset),
bdevname(rdev->bdev, b));
rdev_dec_pending(rdev, mddev);
do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
printk(KERN_DEBUG
- "IOC4 %s: PCI clock is %lld ns.\n",
- pci_name(idd->idd_pdev), ns);
+ "IOC4 %s: PCI clock is %llu ns.\n",
+ pci_name(idd->idd_pdev), (unsigned long long)ns);
}
/* Remember results. We store the extint clock period rather
PCMCIA_DEVICE_PROD_ID123(
"U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
0xc7b8df9d, 0x1700d087, 0x4b74baa0),
+ PCMCIA_DEVICE_PROD_ID123(
+ "Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio",
+ "Ver. 1.00",
+ 0x5cd01705, 0x4271660f, 0x9d08ee12),
+ PCMCIA_DEVICE_PROD_ID123(
+ "corega", "WL PCCL-11", "ISL37300P",
+ 0xa21501a, 0x59868926, 0xc9049a39),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
csa = at91_sys_read(AT91_EBI_CSA);
at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
- /* force poweron defaults for these pins ... */
- (void) at91_set_A_periph(AT91_PIN_PC9, 0); /* A25/CFRNW */
- (void) at91_set_A_periph(AT91_PIN_PC10, 0); /* NCS4/CFCS */
- (void) at91_set_A_periph(AT91_PIN_PC11, 0); /* NCS5/CFCE1 */
- (void) at91_set_A_periph(AT91_PIN_PC12, 0); /* NCS6/CFCE2 */
-
/* nWAIT is _not_ a default setting */
(void) at91_set_A_periph(AT91_PIN_PC6, 1); /* nWAIT */
return 0;
fail2:
- iounmap((void __iomem *) cf->socket.io_offset);
release_mem_region(io->start, io->end + 1 - io->start);
fail1:
+ if (cf->socket.io_offset)
+ iounmap((void __iomem *) cf->socket.io_offset);
if (board->irq_pin)
free_irq(board->irq_pin, cf);
fail0a:
+ device_init_wakeup(&pdev->dev, 0);
free_irq(board->det_pin, cf);
device_init_wakeup(&pdev->dev, 0);
fail0:
struct at91_cf_data *board = cf->board;
pcmcia_socket_dev_suspend(&pdev->dev, mesg);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev)) {
enable_irq_wake(board->det_pin);
- else {
+ if (board->irq_pin)
+ enable_irq_wake(board->irq_pin);
+ } else {
disable_irq_wake(board->det_pin);
- disable_irq(board->det_pin);
+ if (board->irq_pin)
+ disable_irq_wake(board->irq_pin);
}
- if (board->irq_pin)
- disable_irq(board->irq_pin);
return 0;
}
static int at91_cf_resume(struct platform_device *pdev)
{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
- struct at91_cf_data *board = cf->board;
-
- if (board->irq_pin)
- enable_irq(board->irq_pin);
- if (!device_may_wakeup(&pdev->dev))
- enable_irq(board->det_pin);
pcmcia_socket_dev_resume(&pdev->dev);
return 0;
}
int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr)
{
struct skt_dev_info *sinfo;
+ struct au1000_pcmcia_socket *skt;
int ret, i;
sinfo = kzalloc(sizeof(struct skt_dev_info), GFP_KERNEL);
* Initialise the per-socket structure.
*/
for (i = 0; i < nr; i++) {
- struct au1000_pcmcia_socket *skt = PCMCIA_SOCKET(i);
+ skt = PCMCIA_SOCKET(i);
memset(skt, 0, sizeof(*skt));
skt->socket.resource_ops = &pccard_static_ops;
dev_set_drvdata(dev, sinfo);
return 0;
- do {
- struct au1000_pcmcia_socket *skt = PCMCIA_SOCKET(i);
+
+out_err:
+ flush_scheduled_work();
+ ops->hw_shutdown(skt);
+ while (i-- > 0) {
+ skt = PCMCIA_SOCKET(i);
del_timer_sync(&skt->poll_timer);
pcmcia_unregister_socket(&skt->socket);
-out_err:
flush_scheduled_work();
+ if (i == 0) {
+ iounmap(skt->virt_io + (u32)mips_io_port_base);
+ skt->virt_io = NULL;
+ }
+#ifndef CONFIG_MIPS_XXS1500
+ else {
+ iounmap(skt->virt_io + (u32)mips_io_port_base);
+ skt->virt_io = NULL;
+ }
+#endif
ops->hw_shutdown(skt);
- i--;
- } while (i > 0);
+ }
kfree(sinfo);
out:
return ret;
static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
{
int no_devices=0;
+ int ret = 0;
unsigned long flags;
/* must be called with skt_mutex held */
* missing resource information or other trouble, we need to
* do this now. */
if (no_devices) {
- int ret = pcmcia_card_add(skt);
+ ret = pcmcia_card_add(skt);
if (ret)
return;
}
/* we re-scan all devices, not just the ones connected to this
* socket. This does not matter, though. */
- bus_rescan_devices(&pcmcia_bus_type);
+ ret = bus_rescan_devices(&pcmcia_bus_type);
+ if (ret)
+ printk(KERN_INFO "pcmcia: bus_rescan_devices failed\n");
}
static inline int pcmcia_devmatch(struct pcmcia_device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ int ret;
if (!count)
return -EINVAL;
p_dev->allow_func_id_match = 1;
mutex_unlock(&p_dev->socket->skt_mutex);
- bus_rescan_devices(&pcmcia_bus_type);
+ ret = bus_rescan_devices(&pcmcia_bus_type);
+ if (ret)
+ printk(KERN_INFO "pcmcia: bus_rescan_devices failed after "
+ "allowing func_id matches\n");
return count;
}
static int __init init_pcmcia_bus(void)
{
+ int ret;
+
spin_lock_init(&pcmcia_dev_list_lock);
- bus_register(&pcmcia_bus_type);
- class_interface_register(&pcmcia_bus_interface);
+ ret = bus_register(&pcmcia_bus_type);
+ if (ret < 0) {
+ printk(KERN_WARNING "pcmcia: bus_register error: %d\n", ret);
+ return ret;
+ }
+ ret = class_interface_register(&pcmcia_bus_interface);
+ if (ret < 0) {
+ printk(KERN_WARNING
+ "pcmcia: class_interface_register error: %d\n", ret);
+ bus_unregister(&pcmcia_bus_type);
+ return ret;
+ }
pcmcia_setup_ioctl();
};
MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
+#ifdef CONFIG_PM
static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state)
{
return pcmcia_socket_dev_suspend(&dev->dev, state);
{
return pcmcia_socket_dev_resume(&dev->dev);
}
+#endif
static struct pci_driver i82092aa_pci_drv = {
.name = "i82092aa",
.id_table = i82092aa_pci_ids,
.probe = i82092aa_pci_probe,
.remove = __devexit_p(i82092aa_pci_remove),
+#ifdef CONFIG_PM
.suspend = i82092aa_socket_suspend,
.resume = i82092aa_socket_resume,
+#endif
};
static int i82092aa_module_init(void)
{
- enter("i82092aa_module_init");
- pci_register_driver(&i82092aa_pci_drv);
- leave("i82092aa_module_init");
- return 0;
+ return pci_register_driver(&i82092aa_pci_drv);
}
static void i82092aa_module_exit(void)
reg |= BCSR1_PCCVCC1;
break;
default:
- return 1;
+ goto out_unmap;
}
switch(vpp) {
if(vcc == vpp)
reg |= BCSR1_PCCVPP1;
else
- return 1;
+ goto out_unmap;
break;
case 120:
if ((vcc == 33) || (vcc == 50))
reg |= BCSR1_PCCVPP0;
else
- return 1;
+ goto out_unmap;
default:
- return 1;
+ goto out_unmap;
}
/* first, turn off all power */
iounmap(bcsr_io);
return 0;
+
+out_unmap:
+ iounmap(bcsr_io);
+ return 1;
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
return 0;
fail2:
- iounmap((void __iomem *) cf->socket.io_offset);
release_mem_region(cf->phys_cf, SZ_8K);
fail1:
+ if (cf->socket.io_offset)
+ iounmap((void __iomem *) cf->socket.io_offset);
free_irq(irq, cf);
fail0:
kfree(cf);
int count, int *eof, void *data)
{
char *p = buf;
+ int rc;
- bus_for_each_drv(&pcmcia_bus_type, NULL,
- (void *) &p, proc_read_drivers_callback);
+ rc = bus_for_each_drv(&pcmcia_bus_type, NULL,
+ (void *) &p, proc_read_drivers_callback);
+ if (rc < 0)
+ return rc;
return (p - buf);
}
* Prevent this racing with a card insertion.
*/
mutex_lock(&s->skt_mutex);
- bus_rescan_devices(&pcmcia_bus_type);
+ ret = bus_rescan_devices(&pcmcia_bus_type);
mutex_unlock(&s->skt_mutex);
+ if (ret)
+ goto err_put_module;
/* check whether the driver indeed matched. I don't care if this
* is racy or not, because it can only happen on cardmgr access
* potential conflicts, just the most obvious ones.
*/
for (i = 0; i < MAX_IO_WIN; i++)
- if ((s->io[i].res) &&
+ if ((s->io[i].res) && *base &&
((s->io[i].res->start & (align-1)) == *base))
return 1;
for (i = 0; i < MAX_IO_WIN; i++) {
kfree(socket);
}
+#ifdef CONFIG_PM
static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state)
{
return pcmcia_socket_dev_suspend(&dev->dev, state);
{
return pcmcia_socket_dev_resume(&dev->dev);
}
+#endif
static struct pci_device_id pd6729_pci_ids[] = {
{
.id_table = pd6729_pci_ids,
.probe = pd6729_pci_probe,
.remove = __devexit_p(pd6729_pci_remove),
+#ifdef CONFIG_PM
.suspend = pd6729_socket_suspend,
.resume = pd6729_socket_resume,
+#endif
};
static int pd6729_module_init(void)
return 0;
}
+EXPORT_SYMBOL(soc_common_drv_pcmcia_remove);
ret = pcmcia_register_socket(&socket->socket);
if (ret == 0) {
/* Add the yenta register attributes */
- device_create_file(&dev->dev, &dev_attr_yenta_registers);
- goto out;
+ ret = device_create_file(&dev->dev, &dev_attr_yenta_registers);
+ if (ret == 0)
+ goto out;
+
+ /* error path... */
+ pcmcia_unregister_socket(&socket->socket);
}
unmap:
return ret;
}
-
+#ifdef CONFIG_PM
static int yenta_dev_suspend (struct pci_dev *dev, pm_message_t state)
{
struct yenta_socket *socket = pci_get_drvdata(dev);
struct yenta_socket *socket = pci_get_drvdata(dev);
if (socket) {
+ int rc;
+
pci_set_power_state(dev, 0);
/* FIXME: pci_restore_state needs to have a better interface */
pci_restore_state(dev);
pci_write_config_dword(dev, 16*4, socket->saved_state[0]);
pci_write_config_dword(dev, 17*4, socket->saved_state[1]);
- pci_enable_device(dev);
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
pci_set_master(dev);
if (socket->type && socket->type->restore_state)
return pcmcia_socket_dev_resume(&dev->dev);
}
-
+#endif
#define CB_ID(vend,dev,type) \
{ \
.id_table = yenta_table,
.probe = yenta_probe,
.remove = __devexit_p(yenta_close),
+#ifdef CONFIG_PM
.suspend = yenta_dev_suspend,
.resume = yenta_dev_resume,
+#endif
};
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
-
- /* Probe if necessary. */
- if (action == UNREGISTER_PROBE)
- ret = css_probe_device(sch->schid);
break;
case REPROBE:
device_trigger_reprobe(sch);
break;
}
spin_unlock_irqrestore(&sch->lock, flags);
+ /* Probe if necessary. */
+ if (action == UNREGISTER_PROBE)
+ ret = css_probe_device(sch->schid);
return ret;
}
/* this is a simple abstraction for device_register that sets the
* correct bus type and adds the bus specific files */
-int
-ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_register(struct ccw_device *cdev)
{
struct device *dev = &cdev->dev;
int ret;
int ccw_device_cancel_halt_clear(struct ccw_device *);
-int ccw_device_register(struct ccw_device *);
void ccw_device_do_unreg_rereg(void *);
void ccw_device_call_sch_unregister(void *);
dev = bus_find_device(&ap_bus_type, NULL,
(void *)(unsigned long)qid,
__ap_scan_bus);
+ rc = ap_query_queue(qid, &queue_depth, &device_type);
+ if (dev && rc) {
+ put_device(dev);
+ device_unregister(dev);
+ continue;
+ }
if (dev) {
put_device(dev);
continue;
}
- rc = ap_query_queue(qid, &queue_depth, &device_type);
if (rc)
continue;
rc = ap_init_queue(qid);
filp->f_flags |= O_LARGEFILE;
bdev = bd_acquire(inode);
+ if (bdev == NULL)
+ return -ENOMEM;
res = do_open(bdev, filp, BD_MUTEX_NORMAL);
if (res)
up_read(s_umount);
}
spin_unlock(&dentry->d_lock);
- /* Cannot remove the first dentry, and it isn't appropriate
- * to move it to the head of the list, so give up, and try
- * later
+ /*
+ * Insert dentry at the head of the list as inserting at the
+ * tail leads to a cycle.
*/
- break;
+ list_add(&dentry->d_lru, &dentry_unused);
+ dentry_stat.nr_unused++;
}
spin_unlock(&dcache_lock);
}
static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
{
struct dentry *parent;
+ unsigned detached = 0;
BUG_ON(!IS_ROOT(dentry));
atomic_dec(&parent->d_count);
list_del(&dentry->d_u.d_child);
- dentry_stat.nr_dentry--; /* For d_free, below */
+ detached++;
inode = dentry->d_inode;
if (inode) {
* otherwise we ascend to the parent and move to the
* next sibling if there is one */
if (!parent)
- return;
+ goto out;
dentry = parent;
dentry = list_entry(dentry->d_subdirs.next,
struct dentry, d_u.d_child);
}
+out:
+ /* several dentries were freed, need to correct nr_dentry */
+ spin_lock(&dcache_lock);
+ dentry_stat.nr_dentry -= detached;
+ spin_unlock(&dcache_lock);
}
/*
else if (outside(input->block_bitmap, start, end))
ext4_warning(sb, __FUNCTION__,
"Block bitmap not in group (block %llu)",
- input->block_bitmap);
+ (unsigned long long)input->block_bitmap);
else if (outside(input->inode_bitmap, start, end))
ext4_warning(sb, __FUNCTION__,
"Inode bitmap not in group (block %llu)",
- input->inode_bitmap);
+ (unsigned long long)input->inode_bitmap);
else if (outside(input->inode_table, start, end) ||
outside(itend - 1, start, end))
ext4_warning(sb, __FUNCTION__,
"Inode table not in group (blocks %llu-%llu)",
- input->inode_table, itend - 1);
+ (unsigned long long)input->inode_table, itend - 1);
else if (input->inode_bitmap == input->block_bitmap)
ext4_warning(sb, __FUNCTION__,
"Block bitmap same as inode bitmap (%llu)",
- input->block_bitmap);
+ (unsigned long long)input->block_bitmap);
else if (inside(input->block_bitmap, input->inode_table, itend))
ext4_warning(sb, __FUNCTION__,
"Block bitmap (%llu) in inode table (%llu-%llu)",
- input->block_bitmap, input->inode_table, itend-1);
+ (unsigned long long)input->block_bitmap,
+ (unsigned long long)input->inode_table, itend - 1);
else if (inside(input->inode_bitmap, input->inode_table, itend))
ext4_warning(sb, __FUNCTION__,
"Inode bitmap (%llu) in inode table (%llu-%llu)",
- input->inode_bitmap, input->inode_table, itend-1);
+ (unsigned long long)input->inode_bitmap,
+ (unsigned long long)input->inode_table, itend - 1);
else if (inside(input->block_bitmap, start, metaend))
ext4_warning(sb, __FUNCTION__,
"Block bitmap (%llu) in GDT table"
" (%llu-%llu)",
- input->block_bitmap, start, metaend - 1);
+ (unsigned long long)input->block_bitmap,
+ start, metaend - 1);
else if (inside(input->inode_bitmap, start, metaend))
ext4_warning(sb, __FUNCTION__,
"Inode bitmap (%llu) in GDT table"
" (%llu-%llu)",
- input->inode_bitmap, start, metaend - 1);
+ (unsigned long long)input->inode_bitmap,
+ start, metaend - 1);
else if (inside(input->inode_table, start, metaend) ||
inside(itend - 1, start, metaend))
ext4_warning(sb, __FUNCTION__,
"Inode table (%llu-%llu) overlaps"
"GDT table (%llu-%llu)",
- input->inode_table, itend - 1, start, metaend - 1);
+ (unsigned long long)input->inode_table,
+ itend - 1, start, metaend - 1);
else
err = 0;
brelse(bh);
hugetlbfs_forget_inode(inode);
}
-/*
- * h_pgoff is in HPAGE_SIZE units.
- * vma->vm_pgoff is in PAGE_SIZE units.
- */
static inline void
-hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
+hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
{
struct vm_area_struct *vma;
struct prio_tree_iter iter;
- vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) {
- unsigned long h_vm_pgoff;
+ vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
unsigned long v_offset;
- h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
- v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT;
/*
- * Is this VMA fully outside the truncation point?
+ * Can the expression below overflow on 32-bit arches?
+ * No, because the prio_tree returns us only those vmas
+ * which overlap the truncated area starting at pgoff,
+ * and no vma on a 32-bit arch can span beyond the 4GB.
*/
- if (h_vm_pgoff >= h_pgoff)
+ if (vma->vm_pgoff < pgoff)
+ v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
+ else
v_offset = 0;
__unmap_hugepage_range(vma,
*/
static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
{
- unsigned long pgoff;
+ pgoff_t pgoff;
struct address_space *mapping = inode->i_mapping;
if (offset > inode->i_size)
return -EINVAL;
BUG_ON(offset & ~HPAGE_MASK);
- pgoff = offset >> HPAGE_SHIFT;
+ pgoff = offset >> PAGE_SHIFT;
inode->i_size = offset;
spin_lock(&mapping->i_mmap_lock);
do_div(size, 100);
rest++;
}
- size &= HPAGE_MASK;
pconfig->nr_blocks = (size >> HPAGE_SHIFT);
value = rest;
} else if (!strcmp(opt,"nr_inodes")) {
*/
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
+
+ /* Now that we have bh_state locked, are we really still mapped? */
+ if (!buffer_mapped(bh)) {
+ JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
+ goto no_journal;
+ }
+
if (jh->b_transaction) {
JBUFFER_TRACE(jh, "has transaction");
if (jh->b_transaction != handle->h_transaction) {
sync_dirty_buffer(bh);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
+ /* Since we dropped the lock... */
+ if (!buffer_mapped(bh)) {
+ JBUFFER_TRACE(jh, "buffer got unmapped");
+ goto no_journal;
+ }
/* The buffer may become locked again at any
time if it is redirtied */
}
}
}
} else if (transaction == journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "on committing transaction");
if (jh->b_jlist == BJ_Locked) {
/*
* The buffer is on the committing transaction's locked
* can remove it's next_transaction pointer from the
* running transaction if that is set, but nothing
* else. */
- JBUFFER_TRACE(jh, "on committing transaction");
set_buffer_freed(bh);
if (jh->b_next_transaction) {
J_ASSERT(jh->b_next_transaction ==
* i_size already for this truncate so recovery will not
* expose the disk blocks we are discarding here.) */
J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
+ JBUFFER_TRACE(jh, "on running transaction");
may_free = __dispose_buffer(jh, transaction);
}
*/
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
+
+ /* Now that we have bh_state locked, are we really still mapped? */
+ if (!buffer_mapped(bh)) {
+ JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
+ goto no_journal;
+ }
+
if (jh->b_transaction) {
JBUFFER_TRACE(jh, "has transaction");
if (jh->b_transaction != handle->h_transaction) {
sync_dirty_buffer(bh);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
+ /* Since we dropped the lock... */
+ if (!buffer_mapped(bh)) {
+ JBUFFER_TRACE(jh, "buffer got unmapped");
+ goto no_journal;
+ }
/* The buffer may become locked again at any
time if it is redirtied */
}
}
}
} else if (transaction == journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "on committing transaction");
if (jh->b_jlist == BJ_Locked) {
/*
* The buffer is on the committing transaction's locked
* can remove it's next_transaction pointer from the
* running transaction if that is set, but nothing
* else. */
- JBUFFER_TRACE(jh, "on committing transaction");
set_buffer_freed(bh);
if (jh->b_next_transaction) {
J_ASSERT(jh->b_next_transaction ==
* i_size already for this truncate so recovery will not
* expose the disk blocks we are discarding here.) */
J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
+ JBUFFER_TRACE(jh, "on running transaction");
may_free = __dispose_buffer(jh, transaction);
}
wait_on_page_writeback(page);
if (PagePrivate(page))
- try_to_release_page(page, mapping_gfp_mask(mapping));
+ try_to_release_page(page, GFP_KERNEL);
/*
* If we succeeded in removing the mapping, set LRU flag
break;
error = add_to_page_cache_lru(page, mapping, index,
- mapping_gfp_mask(mapping));
+ GFP_KERNEL);
if (unlikely(error)) {
page_cache_release(page);
if (error == -EEXIST)
{
struct file *file = sd->file;
struct address_space *mapping = file->f_mapping;
- gfp_t gfp_mask = mapping_gfp_mask(mapping);
unsigned int offset, this_len;
struct page *page;
pgoff_t index;
goto find_page;
page = buf->page;
- if (add_to_page_cache(page, mapping, index, gfp_mask)) {
+ if (add_to_page_cache(page, mapping, index, GFP_KERNEL)) {
unlock_page(page);
goto find_page;
}
* This will also lock the page
*/
ret = add_to_page_cache_lru(page, mapping, index,
- gfp_mask);
+ GFP_KERNEL);
if (unlikely(ret))
goto out;
}
+++ /dev/null
-/* defines for inline arch setup functions */
-
-#include <asm/fixmap.h>
-#include <asm/i8259.h>
-#include "cobalt.h"
-
-static inline void do_timer_interrupt_hook(void)
-{
- /* Clear the interrupt */
- co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR);
-
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode_vm(irq_regs));
-#endif
-/*
- * In the SMP case we use the local APIC timer interrupt to do the
- * profiling, except when we simulate SMP mode on a uniprocessor
- * system, in that case we have to call the local interrupt handler.
- */
-#ifndef CONFIG_X86_LOCAL_APIC
- profile_tick(CPU_PROFILING);
-#else
- if (!using_apic_timer)
- smp_local_timer_interrupt();
-#endif
-}
-
-static inline int do_timer_overflow(int count)
-{
- int i;
-
- spin_lock(&i8259A_lock);
- /*
- * This is tricky when I/O APICs are used;
- * see do_timer_interrupt().
- */
- i = inb(0x20);
- spin_unlock(&i8259A_lock);
-
- /* assumption about timer being IRQ0 */
- if (i & 0x01) {
- /*
- * We cannot detect lost timer interrupts ...
- * well, that's why we call them lost, don't we? :)
- * [hmm, on the Pentium and Alpha we can ... sort of]
- */
- count -= LATCH;
- } else {
- printk("do_slow_gettimeoffset(): hardware timer problem?\n");
- }
- return count;
-}
{
}
+static inline int apicid_to_node(int logical_apicid)
+{
+ return 0;
+}
+
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
#define BITS_TO_COMPAT_LONGS(bits) \
(((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG)
-long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask,
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
* under - it drives the swappiness decision: whether to unmap mapped
* pages.
*
- * temp_priority is used to remember the scanning priority at which
- * this zone was successfully refilled to free_pages == pages_high.
- *
- * Access to both these fields is quite racy even on uniprocessor. But
+ * Access to both this field is quite racy even on uniprocessor. But
* it is expected to average out OK.
*/
- int temp_priority;
int prev_priority;
void release_pages(struct page **pages, int nr, int cold);
#ifdef CONFIG_NUMA
-extern struct page *page_cache_alloc(struct address_space *x);
-extern struct page *page_cache_alloc_cold(struct address_space *x);
+extern struct page *__page_cache_alloc(gfp_t gfp);
#else
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return alloc_pages(gfp, 0);
+}
+#endif
+
static inline struct page *page_cache_alloc(struct address_space *x)
{
- return alloc_pages(mapping_gfp_mask(x), 0);
+ return __page_cache_alloc(mapping_gfp_mask(x));
}
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
- return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
+ return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
-#endif
typedef int filler_t(void *, struct page *);
struct pacct_struct pacct; /* per-process accounting information */
#endif
#ifdef CONFIG_TASKSTATS
- spinlock_t stats_lock;
struct taskstats *stats;
#endif
};
static inline void taskstats_tgid_init(struct signal_struct *sig)
{
- spin_lock_init(&sig->stats_lock);
sig->stats = NULL;
}
-static inline void taskstats_tgid_alloc(struct signal_struct *sig)
+static inline void taskstats_tgid_alloc(struct task_struct *tsk)
{
+ struct signal_struct *sig = tsk->signal;
struct taskstats *stats;
- unsigned long flags;
- stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
- if (!stats)
+ if (sig->stats != NULL)
return;
- spin_lock_irqsave(&sig->stats_lock, flags);
+ /* No problem if kmem_cache_zalloc() fails */
+ stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
+
+ spin_lock_irq(&tsk->sighand->siglock);
if (!sig->stats) {
sig->stats = stats;
stats = NULL;
}
- spin_unlock_irqrestore(&sig->stats_lock, flags);
+ spin_unlock_irq(&tsk->sighand->siglock);
if (stats)
kmem_cache_free(taskstats_cache, stats);
static inline void taskstats_tgid_free(struct signal_struct *sig)
{
- struct taskstats *stats = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&sig->stats_lock, flags);
- if (sig->stats) {
- stats = sig->stats;
- sig->stats = NULL;
- }
- spin_unlock_irqrestore(&sig->stats_lock, flags);
- if (stats)
- kmem_cache_free(taskstats_cache, stats);
+ if (sig->stats)
+ kmem_cache_free(taskstats_cache, sig->stats);
}
extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
extern void taskstats_init_early(void);
-extern void taskstats_tgid_alloc(struct signal_struct *);
#else
static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
{}
{}
static inline void taskstats_tgid_init(struct signal_struct *sig)
{}
-static inline void taskstats_tgid_alloc(struct signal_struct *sig)
+static inline void taskstats_tgid_alloc(struct task_struct *tsk)
{}
static inline void taskstats_tgid_free(struct signal_struct *sig)
{}
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *get_vm_area_node(unsigned long size,
- unsigned long flags, int node);
+ unsigned long flags, int node,
+ gfp_t gfp_mask);
extern struct vm_struct *remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
? -EFAULT : 0;
}
-long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask,
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size)
{
int i, j;
p = __stop_machine_run(take_cpu_down, NULL, cpu);
mutex_unlock(&cpu_bitmask_lock);
- if (IS_ERR(p)) {
+ if (IS_ERR(p) || cpu_online(cpu)) {
/* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
(void *)(long)cpu) == NOTIFY_BAD)
BUG();
- err = PTR_ERR(p);
- goto out_allowed;
- }
-
- if (cpu_online(cpu))
+ if (IS_ERR(p)) {
+ err = PTR_ERR(p);
+ goto out_allowed;
+ }
goto out_thread;
+ }
/* Wait for it to sleep (leaving idle task). */
while (!idle_cpu(cpu))
flush_sigqueue(&tsk->pending);
if (sig) {
flush_sigqueue(&sig->shared_pending);
+ taskstats_tgid_free(sig);
__cleanup_signal(sig);
}
}
if (clone_flags & CLONE_THREAD) {
atomic_inc(¤t->signal->count);
atomic_inc(¤t->signal->live);
- taskstats_tgid_alloc(current->signal);
+ taskstats_tgid_alloc(current);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
- taskstats_tgid_free(sig);
kmem_cache_free(signal_cachep, sig);
}
if (!license_is_gpl_compatible(license)) {
if (!(tainted & TAINT_PROPRIETARY_MODULE))
- printk(KERN_WARNING "%s: module license '%s' taints"
+ printk(KERN_WARNING "%s: module license '%s' taints "
"kernel.\n", mod->name, license);
add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
}
/*
* If new attributes are added, please revisit this allocation
*/
- skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL);
+ size = nlmsg_total_size(genlmsg_total_size(size));
+ skb = nlmsg_new(size, GFP_KERNEL);
if (!skb)
return -ENOMEM;
up_write(&listeners->sem);
}
-static int fill_pid(pid_t pid, struct task_struct *pidtsk,
+static int fill_pid(pid_t pid, struct task_struct *tsk,
struct taskstats *stats)
{
int rc = 0;
- struct task_struct *tsk = pidtsk;
- if (!pidtsk) {
- read_lock(&tasklist_lock);
+ if (!tsk) {
+ rcu_read_lock();
tsk = find_task_by_pid(pid);
- if (!tsk) {
- read_unlock(&tasklist_lock);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
return -ESRCH;
- }
- get_task_struct(tsk);
- read_unlock(&tasklist_lock);
} else
get_task_struct(tsk);
}
-static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
+static int fill_tgid(pid_t tgid, struct task_struct *first,
struct taskstats *stats)
{
- struct task_struct *tsk, *first;
+ struct task_struct *tsk;
unsigned long flags;
+ int rc = -ESRCH;
/*
* Add additional stats from live tasks except zombie thread group
* leaders who are already counted with the dead tasks
*/
- first = tgidtsk;
- if (!first) {
- read_lock(&tasklist_lock);
+ rcu_read_lock();
+ if (!first)
first = find_task_by_pid(tgid);
- if (!first) {
- read_unlock(&tasklist_lock);
- return -ESRCH;
- }
- get_task_struct(first);
- read_unlock(&tasklist_lock);
- } else
- get_task_struct(first);
- /* Start with stats from dead tasks */
- spin_lock_irqsave(&first->signal->stats_lock, flags);
+ if (!first || !lock_task_sighand(first, &flags))
+ goto out;
+
if (first->signal->stats)
memcpy(stats, first->signal->stats, sizeof(*stats));
- spin_unlock_irqrestore(&first->signal->stats_lock, flags);
tsk = first;
- read_lock(&tasklist_lock);
do {
- if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
+ if (tsk->exit_state)
continue;
/*
* Accounting subsystem can call its functions here to
delayacct_add_tsk(stats, tsk);
} while_each_thread(first, tsk);
- read_unlock(&tasklist_lock);
- stats->version = TASKSTATS_VERSION;
+ unlock_task_sighand(first, &flags);
+ rc = 0;
+out:
+ rcu_read_unlock();
+
+ stats->version = TASKSTATS_VERSION;
/*
* Accounting subsytems can also add calls here to modify
* fields of taskstats.
*/
-
- return 0;
+ return rc;
}
{
unsigned long flags;
- spin_lock_irqsave(&tsk->signal->stats_lock, flags);
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
if (!tsk->signal->stats)
goto ret;
*/
delayacct_add_tsk(tsk->signal->stats, tsk);
ret:
- spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
return;
}
return send_reply(rep_skb, info->snd_pid);
nla_put_failure:
- return genlmsg_cancel(rep_skb, reply);
+ rc = genlmsg_cancel(rep_skb, reply);
err:
nlmsg_free(rep_skb);
return rc;
size_t size;
int is_thread_group;
struct nlattr *na;
- unsigned long flags;
if (!family_registered || !tidstats)
return;
- spin_lock_irqsave(&tsk->signal->stats_lock, flags);
- is_thread_group = tsk->signal->stats ? 1 : 0;
- spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
-
rc = 0;
/*
* Size includes space for nested attributes
size = nla_total_size(sizeof(u32)) +
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
+ is_thread_group = (tsk->signal->stats != NULL);
if (is_thread_group)
size = 2 * size; /* PID + STATS + TGID + STATS */
nla_put_failure:
genlmsg_cancel(rep_skb, reply);
- goto ret;
err_skb:
nlmsg_free(rep_skb);
ret:
time_adjust += MAX_TICKADJ;
tick_length -= MAX_TICKADJ_SCALED;
} else {
- time_adjust = 0;
tick_length += (s64)(time_adjust * NSEC_PER_USEC /
HZ) << TICK_LENGTH_SHIFT;
+ time_adjust = 0;
}
}
}
/* calculate task elapsed time in timespec */
do_posix_clock_monotonic_gettime(&uptime);
- ts = timespec_sub(uptime, current->group_leader->start_time);
+ ts = timespec_sub(uptime, tsk->start_time);
/* rebase elapsed time to usec */
ac_etime = timespec_to_ns(&ts);
do_div(ac_etime, NSEC_PER_USEC);
stats->ac_uid = tsk->uid;
stats->ac_gid = tsk->gid;
stats->ac_pid = tsk->pid;
- stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0;
+ rcu_read_lock();
+ stats->ac_ppid = pid_alive(tsk) ?
+ rcu_dereference(tsk->real_parent)->tgid : 0;
+ rcu_read_unlock();
stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
stats->ac_minflt = tsk->min_flt;
* @wq: workqueue to use
* @work: work to queue
*
- * Returns non-zero if it was successfully added.
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
*
* We queue the work to the CPU it was submitted, but there is no
* guarantee that it will be processed by that CPU.
* @work: work to queue
* @delay: number of jiffies to wait before queueing
*
- * Returns non-zero if it was successfully added.
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int fastcall queue_delayed_work(struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay)
* @work: work to queue
* @delay: number of jiffies to wait before queueing
*
- * Returns non-zero if it was successfully added.
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay)
return s;
end = s + size - 1;
- while (end != s && isspace(*end))
+ while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
}
#ifdef CONFIG_NUMA
-struct page *page_cache_alloc(struct address_space *x)
+struct page *__page_cache_alloc(gfp_t gfp)
{
if (cpuset_do_page_mem_spread()) {
int n = cpuset_mem_spread_node();
- return alloc_pages_node(n, mapping_gfp_mask(x), 0);
+ return alloc_pages_node(n, gfp, 0);
}
- return alloc_pages(mapping_gfp_mask(x), 0);
+ return alloc_pages(gfp, 0);
}
-EXPORT_SYMBOL(page_cache_alloc);
-
-struct page *page_cache_alloc_cold(struct address_space *x)
-{
- if (cpuset_do_page_mem_spread()) {
- int n = cpuset_mem_spread_node();
- return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
- }
- return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
-}
-EXPORT_SYMBOL(page_cache_alloc_cold);
+EXPORT_SYMBOL(__page_cache_alloc);
#endif
static int __sleep_on_page_lock(void *word)
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
- gfp_t gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
page_cache_release(page);
return NULL;
}
- gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
- page = alloc_pages(gfp_mask, 0);
- if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
+ page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
+ if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
page_cache_release(page);
page = NULL;
}
retry:
page = find_lock_page(mapping, idx);
if (!page) {
+ size = i_size_read(mapping->host) >> HPAGE_SHIFT;
+ if (idx >= size)
+ goto out;
if (hugetlb_get_quota(mapping))
goto out;
page = alloc_huge_page(vma, address);
/* Account for ranges past physical memory on this node */
if (range_end_pfn > prev_end_pfn)
- hole_pages = range_end_pfn -
+ hole_pages += range_end_pfn -
max(range_start_pfn, prev_end_pfn);
return hole_pages;
zone->zone_pgdat = pgdat;
zone->free_pages = 0;
- zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+ zone->prev_priority = DEF_PRIORITY;
zone_pcp_init(zone);
INIT_LIST_HEAD(&zone->active_list);
struct page *page, *ret;
unsigned long memmap_size = sizeof(struct page) * nr_pages;
- page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
+ page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
if (page)
goto got_map_page;
return err;
}
-struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
- unsigned long start, unsigned long end, int node)
+static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end,
+ int node, gfp_t gfp_mask)
{
struct vm_struct **p, *tmp, *area;
unsigned long align = 1;
unsigned long addr;
+ BUG_ON(in_interrupt());
if (flags & VM_IOREMAP) {
int bit = fls(size);
addr = ALIGN(start, align);
size = PAGE_ALIGN(size);
- area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
+ area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
if (unlikely(!area))
return NULL;
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, flags, start, end, -1);
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
}
/**
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
}
-struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
+struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
+ int node, gfp_t gfp_mask)
{
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
+ gfp_mask);
}
/* Caller must hold vmlist_lock */
if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL;
- area = get_vm_area_node(size, VM_ALLOC, node);
+ area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
if (!area)
return NULL;
return nr_reclaimed;
}
+/*
+ * We are about to scan this zone at a certain priority level. If that priority
+ * level is smaller (ie: more urgent) than the previous priority, then note
+ * that priority level within the zone. This is done so that when the next
+ * process comes in to scan this zone, it will immediately start out at this
+ * priority level rather than having to build up its own scanning priority.
+ * Here, this priority affects only the reclaim-mapped threshold.
+ */
+static inline void note_zone_scanning_priority(struct zone *zone, int priority)
+{
+ if (priority < zone->prev_priority)
+ zone->prev_priority = priority;
+}
+
static inline int zone_is_near_oom(struct zone *zone)
{
return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
* But we had to alter page->flags anyway.
*/
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
- struct scan_control *sc)
+ struct scan_control *sc, int priority)
{
unsigned long pgmoved;
int pgdeactivate = 0;
* `distress' is a measure of how much trouble we're having
* reclaiming pages. 0 -> no problems. 100 -> great trouble.
*/
- distress = 100 >> zone->prev_priority;
+ distress = 100 >> min(zone->prev_priority, priority);
/*
* The point of this algorithm is to decide when to start
nr_to_scan = min(nr_active,
(unsigned long)sc->swap_cluster_max);
nr_active -= nr_to_scan;
- shrink_active_list(nr_to_scan, zone, sc);
+ shrink_active_list(nr_to_scan, zone, sc, priority);
}
if (nr_inactive) {
if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
continue;
- zone->temp_priority = priority;
- if (zone->prev_priority > priority)
- zone->prev_priority = priority;
+ note_zone_scanning_priority(zone, priority);
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
continue;
- zone->temp_priority = DEF_PRIORITY;
lru_pages += zone->nr_active + zone->nr_inactive;
}
if (!sc.all_unreclaimable)
ret = 1;
out:
+ /*
+ * Now that we've scanned all the zones at this priority level, note
+ * that level within the zone so that the next thread which performs
+ * scanning of this zone will immediately start out at this priority
+ * level. This affects only the decision whether or not to bring
+ * mapped pages onto the inactive list.
+ */
+ if (priority < 0)
+ priority = 0;
for (i = 0; zones[i] != 0; i++) {
struct zone *zone = zones[i];
if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
continue;
- zone->prev_priority = zone->temp_priority;
+ zone->prev_priority = priority;
}
return ret;
}
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
};
+ /*
+ * temp_priority is used to remember the scanning priority at which
+ * this zone was successfully refilled to free_pages == pages_high.
+ */
+ int temp_priority[MAX_NR_ZONES];
loop_again:
total_scanned = 0;
sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);
- for (i = 0; i < pgdat->nr_zones; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- zone->temp_priority = DEF_PRIORITY;
- }
+ for (i = 0; i < pgdat->nr_zones; i++)
+ temp_priority[i] = DEF_PRIORITY;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
if (!zone_watermark_ok(zone, order, zone->pages_high,
end_zone, 0))
all_zones_ok = 0;
- zone->temp_priority = priority;
- if (zone->prev_priority > priority)
- zone->prev_priority = priority;
+ temp_priority[i] = priority;
sc.nr_scanned = 0;
+ note_zone_scanning_priority(zone, priority);
nr_reclaimed += shrink_zone(priority, zone, &sc);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
break;
}
out:
+ /*
+ * Note within each zone the priority level at which this zone was
+ * brought into a happy state. So that the next thread which scans this
+ * zone will start out at that priority level.
+ */
for (i = 0; i < pgdat->nr_zones; i++) {
struct zone *zone = pgdat->node_zones + i;
- zone->prev_priority = zone->temp_priority;
+ zone->prev_priority = temp_priority[i];
}
if (!all_zones_ok) {
cond_resched();
if (zone->nr_scan_active >= nr_pages || pass > 3) {
zone->nr_scan_active = 0;
nr_to_scan = min(nr_pages, zone->nr_active);
- shrink_active_list(nr_to_scan, zone, sc);
+ shrink_active_list(nr_to_scan, zone, sc, prio);
}
}
*/
priority = ZONE_RECLAIM_PRIORITY;
do {
+ note_zone_scanning_priority(zone, priority);
nr_reclaimed += shrink_zone(priority, zone, &sc);
priority--;
} while (priority >= 0 && nr_reclaimed < nr_pages);
seq_printf(m,
"\n all_unreclaimable: %u"
"\n prev_priority: %i"
- "\n temp_priority: %i"
"\n start_pfn: %lu",
zone->all_unreclaimable,
zone->prev_priority,
- zone->temp_priority,
zone->zone_start_pfn);
spin_unlock_irqrestore(&zone->lock, flags);
seq_putc(m, '\n');