2 * Copyright (c) 2011-2014, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/nvme.h>
18 #include <linux/cdev.h>
19 #include <linux/pci.h>
20 #include <linux/kref.h>
21 #include <linux/blk-mq.h>
22 #include <linux/lightnvm.h>
23 #include <linux/sed-opal.h>
25 extern unsigned int nvme_io_timeout;
26 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
28 extern unsigned int admin_timeout;
29 #define ADMIN_TIMEOUT (admin_timeout * HZ)
31 #define NVME_DEFAULT_KATO 5
32 #define NVME_KATO_GRACE 10
34 extern struct workqueue_struct *nvme_wq;
35 extern struct workqueue_struct *nvme_reset_wq;
36 extern struct workqueue_struct *nvme_delete_wq;
44 * List of workarounds for devices that required behavior not specified in
49 * Prefers I/O aligned to a stripe size specified in a vendor
50 * specific Identify field.
52 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
55 * The controller doesn't handle Identify value others than 0 or 1
58 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
61 * The controller deterministically returns O's on reads to
62 * logical blocks that deallocate was called on.
64 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
67 * The controller needs a delay before starts checking the device
68 * readiness, which is done by reading the NVME_CSTS_RDY bit.
70 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
73 * APST should not be used.
75 NVME_QUIRK_NO_APST = (1 << 4),
78 * The deepest sleep state should not be used.
80 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
83 * Supports the LighNVM command set if indicated in vs[1].
85 NVME_QUIRK_LIGHTNVM = (1 << 6),
89 * Common request structure for NVMe passthrough. All drivers must have
90 * this structure as the first member of their request-private data.
93 struct nvme_command *cmd;
94 union nvme_result result;
101 * Mark a bio as coming in through the mpath node.
103 #define REQ_NVME_MPATH REQ_DRV
106 NVME_REQ_CANCELLED = (1 << 0),
109 static inline struct nvme_request *nvme_req(struct request *req)
111 return blk_mq_rq_to_pdu(req);
114 /* The below value is the specific amount of delay needed before checking
115 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
116 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
119 #define NVME_QUIRK_DELAY_AMOUNT 2300
121 enum nvme_ctrl_state {
124 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
126 NVME_CTRL_CONNECTING,
132 enum nvme_ctrl_state state;
135 const struct nvme_ctrl_ops *ops;
136 struct request_queue *admin_q;
137 struct request_queue *connect_q;
140 struct blk_mq_tag_set *tagset;
141 struct blk_mq_tag_set *admin_tagset;
142 struct list_head namespaces;
143 struct mutex namespaces_mutex;
144 struct device ctrl_device;
145 struct device *device; /* char device */
147 struct work_struct reset_work;
148 struct work_struct delete_work;
150 struct nvme_subsystem *subsys;
151 struct list_head subsys_entry;
153 struct opal_dev *opal_dev;
169 atomic_t abort_limit;
177 unsigned int shutdown_timeout;
180 unsigned long quirks;
181 struct nvme_id_power_state psd[32];
182 struct nvme_effects_log *effects;
183 struct work_struct scan_work;
184 struct work_struct async_event_work;
185 struct delayed_work ka_work;
186 struct nvme_command ka_cmd;
187 struct work_struct fw_act_work;
189 /* Power saving configuration */
190 u64 ps_max_latency_us;
206 struct nvmf_ctrl_options *opts;
209 struct nvme_subsystem {
213 * Because we unregister the device on the last put we need
214 * a separate refcount.
217 struct list_head entry;
219 struct list_head ctrls;
220 struct list_head nsheads;
221 char subnqn[NVMF_NQN_SIZE];
224 char firmware_rev[8];
231 * Container structure for uniqueue namespace identifiers.
240 * Anchor structure for namespaces. There is one for each namespace in a
241 * NVMe subsystem that any of our controllers can see, and the namespace
242 * structure for each controller is chained of it. For private namespaces
243 * there is a 1:1 relation to our namespace structures, that is ->list
244 * only ever has a single entry for private namespaces.
246 struct nvme_ns_head {
247 #ifdef CONFIG_NVME_MULTIPATH
248 struct gendisk *disk;
249 struct nvme_ns __rcu *current_path;
250 struct bio_list requeue_list;
251 spinlock_t requeue_lock;
252 struct work_struct requeue_work;
254 struct list_head list;
255 struct srcu_struct srcu;
256 struct nvme_subsystem *subsys;
258 struct nvme_ns_ids ids;
259 struct list_head entry;
265 struct list_head list;
267 struct nvme_ctrl *ctrl;
268 struct request_queue *queue;
269 struct gendisk *disk;
270 struct list_head siblings;
271 struct nvm_dev *ndev;
273 struct nvme_ns_head *head;
282 #define NVME_NS_REMOVING 0
283 #define NVME_NS_DEAD 1
287 struct nvme_ctrl_ops {
289 struct module *module;
291 #define NVME_F_FABRICS (1 << 0)
292 #define NVME_F_METADATA_SUPPORTED (1 << 1)
293 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
294 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
295 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
296 void (*free_ctrl)(struct nvme_ctrl *ctrl);
297 void (*submit_async_event)(struct nvme_ctrl *ctrl);
298 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
299 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
300 int (*reinit_request)(void *data, struct request *rq);
303 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
307 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
309 return val & NVME_CSTS_RDY;
312 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
314 if (!ctrl->subsystem)
316 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
319 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
321 return (sector >> (ns->lba_shift - 9));
324 static inline void nvme_cleanup_cmd(struct request *req)
326 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
327 kfree(page_address(req->special_vec.bv_page) +
328 req->special_vec.bv_offset);
332 static inline void nvme_end_request(struct request *req, __le16 status,
333 union nvme_result result)
335 struct nvme_request *rq = nvme_req(req);
337 rq->status = le16_to_cpu(status) >> 1;
339 blk_mq_complete_request(req);
342 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
344 get_device(ctrl->device);
347 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
349 put_device(ctrl->device);
352 void nvme_complete_rq(struct request *req);
353 void nvme_cancel_request(struct request *req, void *data, bool reserved);
354 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
355 enum nvme_ctrl_state new_state);
356 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
357 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
358 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
359 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
360 const struct nvme_ctrl_ops *ops, unsigned long quirks);
361 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
362 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
363 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
364 void nvme_put_ctrl(struct nvme_ctrl *ctrl);
365 int nvme_init_identify(struct nvme_ctrl *ctrl);
367 void nvme_queue_scan(struct nvme_ctrl *ctrl);
368 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
370 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
373 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
374 union nvme_result *res);
376 void nvme_stop_queues(struct nvme_ctrl *ctrl);
377 void nvme_start_queues(struct nvme_ctrl *ctrl);
378 void nvme_kill_queues(struct nvme_ctrl *ctrl);
379 void nvme_unfreeze(struct nvme_ctrl *ctrl);
380 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
381 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
382 void nvme_start_freeze(struct nvme_ctrl *ctrl);
383 int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set);
385 #define NVME_QID_ANY -1
386 struct request *nvme_alloc_request(struct request_queue *q,
387 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
388 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
389 struct nvme_command *cmd);
390 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
391 void *buf, unsigned bufflen);
392 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
393 union nvme_result *result, void *buffer, unsigned bufflen,
394 unsigned timeout, int qid, int at_head,
395 blk_mq_req_flags_t flags);
396 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
397 void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
398 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
399 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
400 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
401 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
402 int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
404 extern const struct attribute_group nvme_ns_id_attr_group;
405 extern const struct block_device_operations nvme_ns_head_ops;
407 #ifdef CONFIG_NVME_MULTIPATH
408 void nvme_failover_req(struct request *req);
409 bool nvme_req_needs_failover(struct request *req, blk_status_t error);
410 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
411 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
412 void nvme_mpath_add_disk(struct nvme_ns_head *head);
413 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
415 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
417 struct nvme_ns_head *head = ns->head;
419 if (head && ns == srcu_dereference(head->current_path, &head->srcu))
420 rcu_assign_pointer(head->current_path, NULL);
422 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
424 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
426 struct nvme_ns_head *head = ns->head;
428 if (head->disk && list_empty(&head->list))
429 kblockd_schedule_work(&head->requeue_work);
433 static inline void nvme_failover_req(struct request *req)
436 static inline bool nvme_req_needs_failover(struct request *req,
441 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
444 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
445 struct nvme_ns_head *head)
449 static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
452 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
455 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
458 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
461 #endif /* CONFIG_NVME_MULTIPATH */
464 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
465 void nvme_nvm_unregister(struct nvme_ns *ns);
466 int nvme_nvm_register_sysfs(struct nvme_ns *ns);
467 void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
468 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
470 static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
476 static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
477 static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
481 static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
482 static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
487 #endif /* CONFIG_NVM */
489 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
491 return dev_to_disk(dev)->private_data;
494 int __init nvme_core_init(void);
495 void nvme_core_exit(void);