[S390] Missing initialization in common i/o layer.
[powerpc.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "css.h"
22 #include "device.h"
23 #include "chsc.h"
24 #include "ioasm.h"
25
26 int
27 device_is_online(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_ONLINE);
35 }
36
37 int
38 device_is_disconnected(struct subchannel *sch)
39 {
40         struct ccw_device *cdev;
41
42         if (!sch->dev.driver_data)
43                 return 0;
44         cdev = sch->dev.driver_data;
45         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
46                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
47 }
48
49 void
50 device_set_disconnected(struct subchannel *sch)
51 {
52         struct ccw_device *cdev;
53
54         if (!sch->dev.driver_data)
55                 return;
56         cdev = sch->dev.driver_data;
57         ccw_device_set_timeout(cdev, 0);
58         cdev->private->flags.fake_irb = 0;
59         cdev->private->state = DEV_STATE_DISCONNECTED;
60 }
61
62 void
63 device_set_waiting(struct subchannel *sch)
64 {
65         struct ccw_device *cdev;
66
67         if (!sch->dev.driver_data)
68                 return;
69         cdev = sch->dev.driver_data;
70         ccw_device_set_timeout(cdev, 10*HZ);
71         cdev->private->state = DEV_STATE_WAIT4IO;
72 }
73
74 /*
75  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
76  */
77 static void
78 ccw_device_timeout(unsigned long data)
79 {
80         struct ccw_device *cdev;
81
82         cdev = (struct ccw_device *) data;
83         spin_lock_irq(cdev->ccwlock);
84         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
85         spin_unlock_irq(cdev->ccwlock);
86 }
87
88 /*
89  * Set timeout
90  */
91 void
92 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
93 {
94         if (expires == 0) {
95                 del_timer(&cdev->private->timer);
96                 return;
97         }
98         if (timer_pending(&cdev->private->timer)) {
99                 if (mod_timer(&cdev->private->timer, jiffies + expires))
100                         return;
101         }
102         cdev->private->timer.function = ccw_device_timeout;
103         cdev->private->timer.data = (unsigned long) cdev;
104         cdev->private->timer.expires = jiffies + expires;
105         add_timer(&cdev->private->timer);
106 }
107
108 /* Kill any pending timers after machine check. */
109 void
110 device_kill_pending_timer(struct subchannel *sch)
111 {
112         struct ccw_device *cdev;
113
114         if (!sch->dev.driver_data)
115                 return;
116         cdev = sch->dev.driver_data;
117         ccw_device_set_timeout(cdev, 0);
118 }
119
120 /*
121  * Cancel running i/o. This is called repeatedly since halt/clear are
122  * asynchronous operations. We do one try with cio_cancel, two tries
123  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
124  * Returns 0 if device now idle, -ENODEV for device not operational and
125  * -EBUSY if an interrupt is expected (either from halt/clear or from a
126  * status pending).
127  */
128 int
129 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
130 {
131         struct subchannel *sch;
132         int ret;
133
134         sch = to_subchannel(cdev->dev.parent);
135         ret = stsch(sch->schid, &sch->schib);
136         if (ret || !sch->schib.pmcw.dnv)
137                 return -ENODEV; 
138         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
139                 /* Not operational or no activity -> done. */
140                 return 0;
141         /* Stage 1: cancel io. */
142         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
143             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
144                 ret = cio_cancel(sch);
145                 if (ret != -EINVAL)
146                         return ret;
147                 /* cancel io unsuccessful. From now on it is asynchronous. */
148                 cdev->private->iretry = 3;      /* 3 halt retries. */
149         }
150         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
151                 /* Stage 2: halt io. */
152                 if (cdev->private->iretry) {
153                         cdev->private->iretry--;
154                         ret = cio_halt(sch);
155                         if (ret != -EBUSY)
156                                 return (ret == 0) ? -EBUSY : ret;
157                 }
158                 /* halt io unsuccessful. */
159                 cdev->private->iretry = 255;    /* 255 clear retries. */
160         }
161         /* Stage 3: clear io. */
162         if (cdev->private->iretry) {
163                 cdev->private->iretry--;
164                 ret = cio_clear (sch);
165                 return (ret == 0) ? -EBUSY : ret;
166         }
167         panic("Can't stop i/o on subchannel.\n");
168 }
169
170 static int
171 ccw_device_handle_oper(struct ccw_device *cdev)
172 {
173         struct subchannel *sch;
174
175         sch = to_subchannel(cdev->dev.parent);
176         cdev->private->flags.recog_done = 1;
177         /*
178          * Check if cu type and device type still match. If
179          * not, it is certainly another device and we have to
180          * de- and re-register. Also check here for non-matching devno.
181          */
182         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
183             cdev->id.cu_model != cdev->private->senseid.cu_model ||
184             cdev->id.dev_type != cdev->private->senseid.dev_type ||
185             cdev->id.dev_model != cdev->private->senseid.dev_model ||
186             cdev->private->devno != sch->schib.pmcw.dev) {
187                 PREPARE_WORK(&cdev->private->kick_work,
188                              ccw_device_do_unreg_rereg, (void *)cdev);
189                 queue_work(ccw_device_work, &cdev->private->kick_work);
190                 return 0;
191         }
192         cdev->private->flags.donotify = 1;
193         return 1;
194 }
195
196 /*
197  * The machine won't give us any notification by machine check if a chpid has
198  * been varied online on the SE so we have to find out by magic (i. e. driving
199  * the channel subsystem to device selection and updating our path masks).
200  */
201 static inline void
202 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
203 {
204         int mask, i;
205
206         for (i = 0; i<8; i++) {
207                 mask = 0x80 >> i;
208                 if (!(sch->lpm & mask))
209                         continue;
210                 if (old_lpm & mask)
211                         continue;
212                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
213         }
214 }
215
216 /*
217  * Stop device recognition.
218  */
219 static void
220 ccw_device_recog_done(struct ccw_device *cdev, int state)
221 {
222         struct subchannel *sch;
223         int notify, old_lpm, same_dev;
224
225         sch = to_subchannel(cdev->dev.parent);
226
227         ccw_device_set_timeout(cdev, 0);
228         cio_disable_subchannel(sch);
229         /*
230          * Now that we tried recognition, we have performed device selection
231          * through ssch() and the path information is up to date.
232          */
233         old_lpm = sch->lpm;
234         stsch(sch->schid, &sch->schib);
235         sch->lpm = sch->schib.pmcw.pim &
236                 sch->schib.pmcw.pam &
237                 sch->schib.pmcw.pom &
238                 sch->opm;
239         /* Check since device may again have become not operational. */
240         if (!sch->schib.pmcw.dnv)
241                 state = DEV_STATE_NOT_OPER;
242         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
243                 /* Force reprobe on all chpids. */
244                 old_lpm = 0;
245         if (sch->lpm != old_lpm)
246                 __recover_lost_chpids(sch, old_lpm);
247         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
248                 if (state == DEV_STATE_NOT_OPER) {
249                         cdev->private->flags.recog_done = 1;
250                         cdev->private->state = DEV_STATE_DISCONNECTED;
251                         return;
252                 }
253                 /* Boxed devices don't need extra treatment. */
254         }
255         notify = 0;
256         same_dev = 0; /* Keep the compiler quiet... */
257         switch (state) {
258         case DEV_STATE_NOT_OPER:
259                 CIO_DEBUG(KERN_WARNING, 2,
260                           "SenseID : unknown device %04x on subchannel "
261                           "0.%x.%04x\n", cdev->private->devno,
262                           sch->schid.ssid, sch->schid.sch_no);
263                 break;
264         case DEV_STATE_OFFLINE:
265                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
266                         same_dev = ccw_device_handle_oper(cdev);
267                         notify = 1;
268                 }
269                 /* fill out sense information */
270                 memset(&cdev->id, 0, sizeof(cdev->id));
271                 cdev->id.cu_type   = cdev->private->senseid.cu_type;
272                 cdev->id.cu_model  = cdev->private->senseid.cu_model;
273                 cdev->id.dev_type  = cdev->private->senseid.dev_type;
274                 cdev->id.dev_model = cdev->private->senseid.dev_model;
275                 if (notify) {
276                         cdev->private->state = DEV_STATE_OFFLINE;
277                         if (same_dev) {
278                                 /* Get device online again. */
279                                 ccw_device_online(cdev);
280                                 wake_up(&cdev->private->wait_q);
281                         }
282                         return;
283                 }
284                 /* Issue device info message. */
285                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
286                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
287                           "%04X/%02X\n",
288                           cdev->private->ssid, cdev->private->devno,
289                           cdev->id.cu_type, cdev->id.cu_model,
290                           cdev->id.dev_type, cdev->id.dev_model);
291                 break;
292         case DEV_STATE_BOXED:
293                 CIO_DEBUG(KERN_WARNING, 2,
294                           "SenseID : boxed device %04x on subchannel "
295                           "0.%x.%04x\n", cdev->private->devno,
296                           sch->schid.ssid, sch->schid.sch_no);
297                 break;
298         }
299         cdev->private->state = state;
300         io_subchannel_recog_done(cdev);
301         if (state != DEV_STATE_NOT_OPER)
302                 wake_up(&cdev->private->wait_q);
303 }
304
305 /*
306  * Function called from device_id.c after sense id has completed.
307  */
308 void
309 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
310 {
311         switch (err) {
312         case 0:
313                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
314                 break;
315         case -ETIME:            /* Sense id stopped by timeout. */
316                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
317                 break;
318         default:
319                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
320                 break;
321         }
322 }
323
324 static void
325 ccw_device_oper_notify(void *data)
326 {
327         struct ccw_device *cdev;
328         struct subchannel *sch;
329         int ret;
330
331         cdev = (struct ccw_device *)data;
332         sch = to_subchannel(cdev->dev.parent);
333         ret = (sch->driver && sch->driver->notify) ?
334                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
335         if (!ret)
336                 /* Driver doesn't want device back. */
337                 ccw_device_do_unreg_rereg((void *)cdev);
338         else {
339                 /* Reenable channel measurements, if needed. */
340                 cmf_reenable(cdev);
341                 wake_up(&cdev->private->wait_q);
342         }
343 }
344
345 /*
346  * Finished with online/offline processing.
347  */
348 static void
349 ccw_device_done(struct ccw_device *cdev, int state)
350 {
351         struct subchannel *sch;
352
353         sch = to_subchannel(cdev->dev.parent);
354
355         if (state != DEV_STATE_ONLINE)
356                 cio_disable_subchannel(sch);
357
358         /* Reset device status. */
359         memset(&cdev->private->irb, 0, sizeof(struct irb));
360
361         cdev->private->state = state;
362
363
364         if (state == DEV_STATE_BOXED)
365                 CIO_DEBUG(KERN_WARNING, 2,
366                           "Boxed device %04x on subchannel %04x\n",
367                           cdev->private->devno, sch->schid.sch_no);
368
369         if (cdev->private->flags.donotify) {
370                 cdev->private->flags.donotify = 0;
371                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
372                              (void *)cdev);
373                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
374         }
375         wake_up(&cdev->private->wait_q);
376
377         if (css_init_done && state != DEV_STATE_ONLINE)
378                 put_device (&cdev->dev);
379 }
380
381 static inline int cmp_pgid(struct pgid *p1, struct pgid *p2)
382 {
383         char *c1;
384         char *c2;
385
386         c1 = (char *)p1;
387         c2 = (char *)p2;
388
389         return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
390 }
391
392 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
393 {
394         int i;
395         int last;
396
397         last = 0;
398         for (i = 0; i < 8; i++) {
399                 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
400                         /* No PGID yet */
401                         continue;
402                 if (cdev->private->pgid[last].inf.ps.state1 ==
403                     SNID_STATE1_RESET) {
404                         /* First non-zero PGID */
405                         last = i;
406                         continue;
407                 }
408                 if (cmp_pgid(&cdev->private->pgid[i],
409                              &cdev->private->pgid[last]) == 0)
410                         /* Non-conflicting PGIDs */
411                         continue;
412
413                 /* PGID mismatch, can't pathgroup. */
414                 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
415                               "0.%x.%04x, can't pathgroup\n",
416                               cdev->private->ssid, cdev->private->devno);
417                 cdev->private->options.pgroup = 0;
418                 return;
419         }
420         if (cdev->private->pgid[last].inf.ps.state1 ==
421             SNID_STATE1_RESET)
422                 /* No previous pgid found */
423                 memcpy(&cdev->private->pgid[0], &css[0]->global_pgid,
424                        sizeof(struct pgid));
425         else
426                 /* Use existing pgid */
427                 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
428                        sizeof(struct pgid));
429 }
430
431 /*
432  * Function called from device_pgid.c after sense path ground has completed.
433  */
434 void
435 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
436 {
437         struct subchannel *sch;
438
439         sch = to_subchannel(cdev->dev.parent);
440         switch (err) {
441         case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
442                 cdev->private->options.pgroup = 0;
443                 break;
444         case 0: /* success */
445         case -EACCES: /* partial success, some paths not operational */
446                 /* Check if all pgids are equal or 0. */
447                 __ccw_device_get_common_pgid(cdev);
448                 break;
449         case -ETIME:            /* Sense path group id stopped by timeout. */
450         case -EUSERS:           /* device is reserved for someone else. */
451                 ccw_device_done(cdev, DEV_STATE_BOXED);
452                 return;
453         default:
454                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
455                 return;
456         }
457         /* Start Path Group verification. */
458         sch->vpm = 0;   /* Start with no path groups set. */
459         cdev->private->state = DEV_STATE_VERIFY;
460         ccw_device_verify_start(cdev);
461 }
462
463 /*
464  * Start device recognition.
465  */
466 int
467 ccw_device_recognition(struct ccw_device *cdev)
468 {
469         struct subchannel *sch;
470         int ret;
471
472         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
473             (cdev->private->state != DEV_STATE_BOXED))
474                 return -EINVAL;
475         sch = to_subchannel(cdev->dev.parent);
476         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
477         if (ret != 0)
478                 /* Couldn't enable the subchannel for i/o. Sick device. */
479                 return ret;
480
481         /* After 60s the device recognition is considered to have failed. */
482         ccw_device_set_timeout(cdev, 60*HZ);
483
484         /*
485          * We used to start here with a sense pgid to find out whether a device
486          * is locked by someone else. Unfortunately, the sense pgid command
487          * code has other meanings on devices predating the path grouping
488          * algorithm, so we start with sense id and box the device after an
489          * timeout (or if sense pgid during path verification detects the device
490          * is locked, as may happen on newer devices).
491          */
492         cdev->private->flags.recog_done = 0;
493         cdev->private->state = DEV_STATE_SENSE_ID;
494         ccw_device_sense_id_start(cdev);
495         return 0;
496 }
497
498 /*
499  * Handle timeout in device recognition.
500  */
501 static void
502 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
503 {
504         int ret;
505
506         ret = ccw_device_cancel_halt_clear(cdev);
507         switch (ret) {
508         case 0:
509                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
510                 break;
511         case -ENODEV:
512                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
513                 break;
514         default:
515                 ccw_device_set_timeout(cdev, 3*HZ);
516         }
517 }
518
519
520 static void
521 ccw_device_nopath_notify(void *data)
522 {
523         struct ccw_device *cdev;
524         struct subchannel *sch;
525         int ret;
526
527         cdev = (struct ccw_device *)data;
528         sch = to_subchannel(cdev->dev.parent);
529         /* Extra sanity. */
530         if (sch->lpm)
531                 return;
532         ret = (sch->driver && sch->driver->notify) ?
533                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
534         if (!ret) {
535                 if (get_device(&sch->dev)) {
536                         /* Driver doesn't want to keep device. */
537                         cio_disable_subchannel(sch);
538                         if (get_device(&cdev->dev)) {
539                                 PREPARE_WORK(&cdev->private->kick_work,
540                                              ccw_device_call_sch_unregister,
541                                              (void *)cdev);
542                                 queue_work(ccw_device_work,
543                                            &cdev->private->kick_work);
544                         } else
545                                 put_device(&sch->dev);
546                 }
547         } else {
548                 cio_disable_subchannel(sch);
549                 ccw_device_set_timeout(cdev, 0);
550                 cdev->private->flags.fake_irb = 0;
551                 cdev->private->state = DEV_STATE_DISCONNECTED;
552                 wake_up(&cdev->private->wait_q);
553         }
554 }
555
556 void
557 ccw_device_verify_done(struct ccw_device *cdev, int err)
558 {
559         cdev->private->flags.doverify = 0;
560         switch (err) {
561         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
562                 cdev->private->options.pgroup = 0;
563         case 0:
564                 ccw_device_done(cdev, DEV_STATE_ONLINE);
565                 /* Deliver fake irb to device driver, if needed. */
566                 if (cdev->private->flags.fake_irb) {
567                         memset(&cdev->private->irb, 0, sizeof(struct irb));
568                         cdev->private->irb.scsw.cc = 1;
569                         cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
570                         cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
571                         cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
572                         cdev->private->flags.fake_irb = 0;
573                         if (cdev->handler)
574                                 cdev->handler(cdev, cdev->private->intparm,
575                                               &cdev->private->irb);
576                         memset(&cdev->private->irb, 0, sizeof(struct irb));
577                 }
578                 break;
579         case -ETIME:
580                 ccw_device_done(cdev, DEV_STATE_BOXED);
581                 break;
582         default:
583                 PREPARE_WORK(&cdev->private->kick_work,
584                              ccw_device_nopath_notify, (void *)cdev);
585                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
586                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
587                 break;
588         }
589 }
590
591 /*
592  * Get device online.
593  */
594 int
595 ccw_device_online(struct ccw_device *cdev)
596 {
597         struct subchannel *sch;
598         int ret;
599
600         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
601             (cdev->private->state != DEV_STATE_BOXED))
602                 return -EINVAL;
603         sch = to_subchannel(cdev->dev.parent);
604         if (css_init_done && !get_device(&cdev->dev))
605                 return -ENODEV;
606         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
607         if (ret != 0) {
608                 /* Couldn't enable the subchannel for i/o. Sick device. */
609                 if (ret == -ENODEV)
610                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
611                 return ret;
612         }
613         /* Do we want to do path grouping? */
614         if (!cdev->private->options.pgroup) {
615                 /* Start initial path verification. */
616                 cdev->private->state = DEV_STATE_VERIFY;
617                 ccw_device_verify_start(cdev);
618                 return 0;
619         }
620         /* Do a SensePGID first. */
621         cdev->private->state = DEV_STATE_SENSE_PGID;
622         ccw_device_sense_pgid_start(cdev);
623         return 0;
624 }
625
626 void
627 ccw_device_disband_done(struct ccw_device *cdev, int err)
628 {
629         switch (err) {
630         case 0:
631                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
632                 break;
633         case -ETIME:
634                 ccw_device_done(cdev, DEV_STATE_BOXED);
635                 break;
636         default:
637                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
638                 break;
639         }
640 }
641
642 /*
643  * Shutdown device.
644  */
645 int
646 ccw_device_offline(struct ccw_device *cdev)
647 {
648         struct subchannel *sch;
649
650         sch = to_subchannel(cdev->dev.parent);
651         if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
652                 return -ENODEV;
653         if (cdev->private->state != DEV_STATE_ONLINE) {
654                 if (sch->schib.scsw.actl != 0)
655                         return -EBUSY;
656                 return -EINVAL;
657         }
658         if (sch->schib.scsw.actl != 0)
659                 return -EBUSY;
660         /* Are we doing path grouping? */
661         if (!cdev->private->options.pgroup) {
662                 /* No, set state offline immediately. */
663                 sch->vpm = 0;
664                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
665                 return 0;
666         }
667         /* Start Set Path Group commands. */
668         cdev->private->state = DEV_STATE_DISBAND_PGID;
669         ccw_device_disband_start(cdev);
670         return 0;
671 }
672
673 /*
674  * Handle timeout in device online/offline process.
675  */
676 static void
677 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
678 {
679         int ret;
680
681         ret = ccw_device_cancel_halt_clear(cdev);
682         switch (ret) {
683         case 0:
684                 ccw_device_done(cdev, DEV_STATE_BOXED);
685                 break;
686         case -ENODEV:
687                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
688                 break;
689         default:
690                 ccw_device_set_timeout(cdev, 3*HZ);
691         }
692 }
693
694 /*
695  * Handle not oper event in device recognition.
696  */
697 static void
698 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
699 {
700         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
701 }
702
703 /*
704  * Handle not operational event while offline.
705  */
706 static void
707 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
708 {
709         struct subchannel *sch;
710
711         cdev->private->state = DEV_STATE_NOT_OPER;
712         sch = to_subchannel(cdev->dev.parent);
713         if (get_device(&cdev->dev)) {
714                 PREPARE_WORK(&cdev->private->kick_work,
715                              ccw_device_call_sch_unregister, (void *)cdev);
716                 queue_work(ccw_device_work, &cdev->private->kick_work);
717         }
718         wake_up(&cdev->private->wait_q);
719 }
720
721 /*
722  * Handle not operational event while online.
723  */
724 static void
725 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
726 {
727         struct subchannel *sch;
728
729         sch = to_subchannel(cdev->dev.parent);
730         if (sch->driver->notify &&
731             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
732                         ccw_device_set_timeout(cdev, 0);
733                         cdev->private->flags.fake_irb = 0;
734                         cdev->private->state = DEV_STATE_DISCONNECTED;
735                         wake_up(&cdev->private->wait_q);
736                         return;
737         }
738         cdev->private->state = DEV_STATE_NOT_OPER;
739         cio_disable_subchannel(sch);
740         if (sch->schib.scsw.actl != 0) {
741                 // FIXME: not-oper indication to device driver ?
742                 ccw_device_call_handler(cdev);
743         }
744         if (get_device(&cdev->dev)) {
745                 PREPARE_WORK(&cdev->private->kick_work,
746                              ccw_device_call_sch_unregister, (void *)cdev);
747                 queue_work(ccw_device_work, &cdev->private->kick_work);
748         }
749         wake_up(&cdev->private->wait_q);
750 }
751
752 /*
753  * Handle path verification event.
754  */
755 static void
756 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
757 {
758         struct subchannel *sch;
759
760         if (cdev->private->state == DEV_STATE_W4SENSE) {
761                 cdev->private->flags.doverify = 1;
762                 return;
763         }
764         sch = to_subchannel(cdev->dev.parent);
765         /*
766          * Since we might not just be coming from an interrupt from the
767          * subchannel we have to update the schib.
768          */
769         stsch(sch->schid, &sch->schib);
770
771         if (sch->schib.scsw.actl != 0 ||
772             (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
773             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
774                 /*
775                  * No final status yet or final status not yet delivered
776                  * to the device driver. Can't do path verfication now,
777                  * delay until final status was delivered.
778                  */
779                 cdev->private->flags.doverify = 1;
780                 return;
781         }
782         /* Device is idle, we can do the path verification. */
783         cdev->private->state = DEV_STATE_VERIFY;
784         ccw_device_verify_start(cdev);
785 }
786
787 /*
788  * Got an interrupt for a normal io (state online).
789  */
790 static void
791 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
792 {
793         struct irb *irb;
794
795         irb = (struct irb *) __LC_IRB;
796         /* Check for unsolicited interrupt. */
797         if ((irb->scsw.stctl ==
798                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
799             && (!irb->scsw.cc)) {
800                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
801                     !irb->esw.esw0.erw.cons) {
802                         /* Unit check but no sense data. Need basic sense. */
803                         if (ccw_device_do_sense(cdev, irb) != 0)
804                                 goto call_handler_unsol;
805                         memcpy(&cdev->private->irb, irb, sizeof(struct irb));
806                         cdev->private->state = DEV_STATE_W4SENSE;
807                         cdev->private->intparm = 0;
808                         return;
809                 }
810 call_handler_unsol:
811                 if (cdev->handler)
812                         cdev->handler (cdev, 0, irb);
813                 return;
814         }
815         /* Accumulate status and find out if a basic sense is needed. */
816         ccw_device_accumulate_irb(cdev, irb);
817         if (cdev->private->flags.dosense) {
818                 if (ccw_device_do_sense(cdev, irb) == 0) {
819                         cdev->private->state = DEV_STATE_W4SENSE;
820                 }
821                 return;
822         }
823         /* Call the handler. */
824         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
825                 /* Start delayed path verification. */
826                 ccw_device_online_verify(cdev, 0);
827 }
828
829 /*
830  * Got an timeout in online state.
831  */
832 static void
833 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
834 {
835         int ret;
836
837         ccw_device_set_timeout(cdev, 0);
838         ret = ccw_device_cancel_halt_clear(cdev);
839         if (ret == -EBUSY) {
840                 ccw_device_set_timeout(cdev, 3*HZ);
841                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
842                 return;
843         }
844         if (ret == -ENODEV) {
845                 struct subchannel *sch;
846
847                 sch = to_subchannel(cdev->dev.parent);
848                 if (!sch->lpm) {
849                         PREPARE_WORK(&cdev->private->kick_work,
850                                      ccw_device_nopath_notify, (void *)cdev);
851                         queue_work(ccw_device_notify_work,
852                                    &cdev->private->kick_work);
853                 } else
854                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
855         } else if (cdev->handler)
856                 cdev->handler(cdev, cdev->private->intparm,
857                               ERR_PTR(-ETIMEDOUT));
858 }
859
860 /*
861  * Got an interrupt for a basic sense.
862  */
863 void
864 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
865 {
866         struct irb *irb;
867
868         irb = (struct irb *) __LC_IRB;
869         /* Check for unsolicited interrupt. */
870         if (irb->scsw.stctl ==
871                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
872                 if (irb->scsw.cc == 1)
873                         /* Basic sense hasn't started. Try again. */
874                         ccw_device_do_sense(cdev, irb);
875                 else {
876                         printk("Huh? %s(%s): unsolicited interrupt...\n",
877                                __FUNCTION__, cdev->dev.bus_id);
878                         if (cdev->handler)
879                                 cdev->handler (cdev, 0, irb);
880                 }
881                 return;
882         }
883         /*
884          * Check if a halt or clear has been issued in the meanwhile. If yes,
885          * only deliver the halt/clear interrupt to the device driver as if it
886          * had killed the original request.
887          */
888         if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
889                 cdev->private->flags.dosense = 0;
890                 memset(&cdev->private->irb, 0, sizeof(struct irb));
891                 ccw_device_accumulate_irb(cdev, irb);
892                 goto call_handler;
893         }
894         /* Add basic sense info to irb. */
895         ccw_device_accumulate_basic_sense(cdev, irb);
896         if (cdev->private->flags.dosense) {
897                 /* Another basic sense is needed. */
898                 ccw_device_do_sense(cdev, irb);
899                 return;
900         }
901 call_handler:
902         cdev->private->state = DEV_STATE_ONLINE;
903         /* Call the handler. */
904         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
905                 /* Start delayed path verification. */
906                 ccw_device_online_verify(cdev, 0);
907 }
908
909 static void
910 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
911 {
912         struct irb *irb;
913
914         irb = (struct irb *) __LC_IRB;
915         /* Accumulate status. We don't do basic sense. */
916         ccw_device_accumulate_irb(cdev, irb);
917         /* Remember to clear irb to avoid residuals. */
918         memset(&cdev->private->irb, 0, sizeof(struct irb));
919         /* Try to start delayed device verification. */
920         ccw_device_online_verify(cdev, 0);
921         /* Note: Don't call handler for cio initiated clear! */
922 }
923
924 static void
925 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
926 {
927         struct subchannel *sch;
928
929         sch = to_subchannel(cdev->dev.parent);
930         ccw_device_set_timeout(cdev, 0);
931         /* OK, i/o is dead now. Call interrupt handler. */
932         cdev->private->state = DEV_STATE_ONLINE;
933         if (cdev->handler)
934                 cdev->handler(cdev, cdev->private->intparm,
935                               ERR_PTR(-ETIMEDOUT));
936         if (!sch->lpm) {
937                 PREPARE_WORK(&cdev->private->kick_work,
938                              ccw_device_nopath_notify, (void *)cdev);
939                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
940         } else if (cdev->private->flags.doverify)
941                 /* Start delayed path verification. */
942                 ccw_device_online_verify(cdev, 0);
943 }
944
945 static void
946 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
947 {
948         int ret;
949
950         ret = ccw_device_cancel_halt_clear(cdev);
951         if (ret == -EBUSY) {
952                 ccw_device_set_timeout(cdev, 3*HZ);
953                 return;
954         }
955         if (ret == -ENODEV) {
956                 struct subchannel *sch;
957
958                 sch = to_subchannel(cdev->dev.parent);
959                 if (!sch->lpm) {
960                         PREPARE_WORK(&cdev->private->kick_work,
961                                      ccw_device_nopath_notify, (void *)cdev);
962                         queue_work(ccw_device_notify_work,
963                                    &cdev->private->kick_work);
964                 } else
965                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
966                 return;
967         }
968         //FIXME: Can we get here?
969         cdev->private->state = DEV_STATE_ONLINE;
970         if (cdev->handler)
971                 cdev->handler(cdev, cdev->private->intparm,
972                               ERR_PTR(-ETIMEDOUT));
973 }
974
975 static void
976 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
977 {
978         struct irb *irb;
979         struct subchannel *sch;
980
981         irb = (struct irb *) __LC_IRB;
982         /*
983          * Accumulate status and find out if a basic sense is needed.
984          * This is fine since we have already adapted the lpm.
985          */
986         ccw_device_accumulate_irb(cdev, irb);
987         if (cdev->private->flags.dosense) {
988                 if (ccw_device_do_sense(cdev, irb) == 0) {
989                         cdev->private->state = DEV_STATE_W4SENSE;
990                 }
991                 return;
992         }
993
994         /* Iff device is idle, reset timeout. */
995         sch = to_subchannel(cdev->dev.parent);
996         if (!stsch(sch->schid, &sch->schib))
997                 if (sch->schib.scsw.actl == 0)
998                         ccw_device_set_timeout(cdev, 0);
999         /* Call the handler. */
1000         ccw_device_call_handler(cdev);
1001         if (!sch->lpm) {
1002                 PREPARE_WORK(&cdev->private->kick_work,
1003                              ccw_device_nopath_notify, (void *)cdev);
1004                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
1005         } else if (cdev->private->flags.doverify)
1006                 ccw_device_online_verify(cdev, 0);
1007 }
1008
1009 static void
1010 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1011 {
1012         int ret;
1013         struct subchannel *sch;
1014
1015         sch = to_subchannel(cdev->dev.parent);
1016         ccw_device_set_timeout(cdev, 0);
1017         ret = ccw_device_cancel_halt_clear(cdev);
1018         if (ret == -EBUSY) {
1019                 ccw_device_set_timeout(cdev, 3*HZ);
1020                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
1021                 return;
1022         }
1023         if (ret == -ENODEV) {
1024                 if (!sch->lpm) {
1025                         PREPARE_WORK(&cdev->private->kick_work,
1026                                      ccw_device_nopath_notify, (void *)cdev);
1027                         queue_work(ccw_device_notify_work,
1028                                    &cdev->private->kick_work);
1029                 } else
1030                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1031                 return;
1032         }
1033         if (cdev->handler)
1034                 cdev->handler(cdev, cdev->private->intparm,
1035                               ERR_PTR(-ETIMEDOUT));
1036         if (!sch->lpm) {
1037                 PREPARE_WORK(&cdev->private->kick_work,
1038                              ccw_device_nopath_notify, (void *)cdev);
1039                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
1040         } else if (cdev->private->flags.doverify)
1041                 /* Start delayed path verification. */
1042                 ccw_device_online_verify(cdev, 0);
1043 }
1044
1045 static void
1046 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
1047 {
1048         /* When the I/O has terminated, we have to start verification. */
1049         cdev->private->flags.doverify = 1;
1050 }
1051
1052 static void
1053 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1054 {
1055         struct irb *irb;
1056
1057         switch (dev_event) {
1058         case DEV_EVENT_INTERRUPT:
1059                 irb = (struct irb *) __LC_IRB;
1060                 /* Check for unsolicited interrupt. */
1061                 if ((irb->scsw.stctl ==
1062                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1063                     (!irb->scsw.cc))
1064                         /* FIXME: we should restart stlck here, but this
1065                          * is extremely unlikely ... */
1066                         goto out_wakeup;
1067
1068                 ccw_device_accumulate_irb(cdev, irb);
1069                 /* We don't care about basic sense etc. */
1070                 break;
1071         default: /* timeout */
1072                 break;
1073         }
1074 out_wakeup:
1075         wake_up(&cdev->private->wait_q);
1076 }
1077
1078 static void
1079 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1080 {
1081         struct subchannel *sch;
1082
1083         sch = to_subchannel(cdev->dev.parent);
1084         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1085                 /* Couldn't enable the subchannel for i/o. Sick device. */
1086                 return;
1087
1088         /* After 60s the device recognition is considered to have failed. */
1089         ccw_device_set_timeout(cdev, 60*HZ);
1090
1091         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1092         ccw_device_sense_id_start(cdev);
1093 }
1094
1095 void
1096 device_trigger_reprobe(struct subchannel *sch)
1097 {
1098         struct ccw_device *cdev;
1099
1100         if (!sch->dev.driver_data)
1101                 return;
1102         cdev = sch->dev.driver_data;
1103         if (cdev->private->state != DEV_STATE_DISCONNECTED)
1104                 return;
1105
1106         /* Update some values. */
1107         if (stsch(sch->schid, &sch->schib))
1108                 return;
1109
1110         /*
1111          * The pim, pam, pom values may not be accurate, but they are the best
1112          * we have before performing device selection :/
1113          */
1114         sch->lpm = sch->schib.pmcw.pim &
1115                 sch->schib.pmcw.pam &
1116                 sch->schib.pmcw.pom &
1117                 sch->opm;
1118         /* Re-set some bits in the pmcw that were lost. */
1119         sch->schib.pmcw.isc = 3;
1120         sch->schib.pmcw.csense = 1;
1121         sch->schib.pmcw.ena = 0;
1122         if ((sch->lpm & (sch->lpm - 1)) != 0)
1123                 sch->schib.pmcw.mp = 1;
1124         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1125         /* We should also udate ssd info, but this has to wait. */
1126         ccw_device_start_id(cdev, 0);
1127 }
1128
1129 static void
1130 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1131 {
1132         struct subchannel *sch;
1133
1134         sch = to_subchannel(cdev->dev.parent);
1135         /*
1136          * An interrupt in state offline means a previous disable was not
1137          * successful. Try again.
1138          */
1139         cio_disable_subchannel(sch);
1140 }
1141
1142 static void
1143 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1144 {
1145         retry_set_schib(cdev);
1146         cdev->private->state = DEV_STATE_ONLINE;
1147         dev_fsm_event(cdev, dev_event);
1148 }
1149
1150 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1151                                        enum dev_event dev_event)
1152 {
1153         cmf_retry_copy_block(cdev);
1154         cdev->private->state = DEV_STATE_ONLINE;
1155         dev_fsm_event(cdev, dev_event);
1156 }
1157
1158 static void
1159 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1160 {
1161         ccw_device_set_timeout(cdev, 0);
1162         if (dev_event == DEV_EVENT_NOTOPER)
1163                 cdev->private->state = DEV_STATE_NOT_OPER;
1164         else
1165                 cdev->private->state = DEV_STATE_OFFLINE;
1166         wake_up(&cdev->private->wait_q);
1167 }
1168
1169 static void
1170 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1171 {
1172         int ret;
1173
1174         ret = ccw_device_cancel_halt_clear(cdev);
1175         switch (ret) {
1176         case 0:
1177                 cdev->private->state = DEV_STATE_OFFLINE;
1178                 wake_up(&cdev->private->wait_q);
1179                 break;
1180         case -ENODEV:
1181                 cdev->private->state = DEV_STATE_NOT_OPER;
1182                 wake_up(&cdev->private->wait_q);
1183                 break;
1184         default:
1185                 ccw_device_set_timeout(cdev, HZ/10);
1186         }
1187 }
1188
1189 /*
1190  * No operation action. This is used e.g. to ignore a timeout event in
1191  * state offline.
1192  */
1193 static void
1194 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1195 {
1196 }
1197
1198 /*
1199  * Bug operation action. 
1200  */
1201 static void
1202 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1203 {
1204         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1205                cdev->private->state, dev_event);
1206         BUG();
1207 }
1208
1209 /*
1210  * device statemachine
1211  */
1212 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1213         [DEV_STATE_NOT_OPER] = {
1214                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1215                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1216                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1217                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1218         },
1219         [DEV_STATE_SENSE_PGID] = {
1220                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1221                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1222                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1223                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1224         },
1225         [DEV_STATE_SENSE_ID] = {
1226                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1227                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1228                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1229                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1230         },
1231         [DEV_STATE_OFFLINE] = {
1232                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1233                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1234                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1235                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1236         },
1237         [DEV_STATE_VERIFY] = {
1238                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1239                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1240                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1241                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1242         },
1243         [DEV_STATE_ONLINE] = {
1244                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1245                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1246                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1247                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1248         },
1249         [DEV_STATE_W4SENSE] = {
1250                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1251                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1252                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1253                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1254         },
1255         [DEV_STATE_DISBAND_PGID] = {
1256                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1257                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1258                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1259                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1260         },
1261         [DEV_STATE_BOXED] = {
1262                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1263                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1264                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1265                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1266         },
1267         /* states to wait for i/o completion before doing something */
1268         [DEV_STATE_CLEAR_VERIFY] = {
1269                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1270                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1271                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1272                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1273         },
1274         [DEV_STATE_TIMEOUT_KILL] = {
1275                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1276                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1277                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1278                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1279         },
1280         [DEV_STATE_WAIT4IO] = {
1281                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1282                 [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
1283                 [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
1284                 [DEV_EVENT_VERIFY]      = ccw_device_wait4io_verify,
1285         },
1286         [DEV_STATE_QUIESCE] = {
1287                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1288                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1289                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1290                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1291         },
1292         /* special states for devices gone not operational */
1293         [DEV_STATE_DISCONNECTED] = {
1294                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1295                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1296                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1297                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1298         },
1299         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1300                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1301                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1302                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1303                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1304         },
1305         [DEV_STATE_CMFCHANGE] = {
1306                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1307                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1308                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1309                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1310         },
1311         [DEV_STATE_CMFUPDATE] = {
1312                 [DEV_EVENT_NOTOPER]     = ccw_device_update_cmfblock,
1313                 [DEV_EVENT_INTERRUPT]   = ccw_device_update_cmfblock,
1314                 [DEV_EVENT_TIMEOUT]     = ccw_device_update_cmfblock,
1315                 [DEV_EVENT_VERIFY]      = ccw_device_update_cmfblock,
1316         },
1317 };
1318
1319 /*
1320  * io_subchannel_irq is called for "real" interrupts or for status
1321  * pending conditions on msch.
1322  */
1323 void
1324 io_subchannel_irq (struct device *pdev)
1325 {
1326         struct ccw_device *cdev;
1327
1328         cdev = to_subchannel(pdev)->dev.driver_data;
1329
1330         CIO_TRACE_EVENT (3, "IRQ");
1331         CIO_TRACE_EVENT (3, pdev->bus_id);
1332         if (cdev)
1333                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1334 }
1335
1336 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);