* considered part of another segment, since that might
* change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
if (high || highprv)
goto new_hw_segment;
if (cluster) {
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->unplug_timer);
- kblockd_flush();
}
EXPORT_SYMBOL(blk_sync_queue);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+ q->sg_reserved_size = INT_MAX;
+
/*
* all done
*/
bio->bi_rw |= (1 << BIO_RW);
blk_rq_bio_prep(q, rq, bio);
+ blk_queue_bounce(q, &rq->bio);
rq->buffer = rq->data = NULL;
return 0;
}
* If a CPU goes away, splice its entries to the current CPU
* and trigger a run of the softirq
*/
- if (action == CPU_DEAD) {
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
int cpu = (unsigned long) hcpu;
local_irq_disable();
EXPORT_SYMBOL(kblockd_schedule_work);
-void kblockd_flush(void)
+void kblockd_flush_work(struct work_struct *work)
{
- flush_workqueue(kblockd_workqueue);
+ cancel_work_sync(work);
}
-EXPORT_SYMBOL(kblockd_flush);
+EXPORT_SYMBOL(kblockd_flush_work);
int __init blk_dev_init(void)
{
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
register_hotcpu_notifier(&blk_cpu_notifier);
- blk_max_low_pfn = max_low_pfn;
- blk_max_pfn = max_pfn;
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
return 0;
}
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic_root.rb_node = NULL;
+ ret->ioc_data = NULL;
/* make sure set_task_ioprio() sees the settings above */
smp_wmb();
tsk->io_context = ret;