int desc; /* the current descriptor */
struct ltq_dma_desc *desc_base; /* the descriptor base */
int phys; /* physical addr */
+ struct device *dev;
};
enum {
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_zalloc_coherent(NULL,
+ ch->desc_base = dma_zalloc_coherent(ch->dev,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
if (!ch->desc_base)
return;
ltq_dma_close(ch);
- dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
ch->desc_base, ch->phys);
}
EXPORT_SYMBOL_GPL(ltq_dma_free);
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
- extern char __initramfs_start[];
- extern unsigned long __initramfs_size;
unsigned long size;
- if (__initramfs_size > 0) {
- initrd_start = (unsigned long)(&__initramfs_start);
- initrd_end = initrd_start + __initramfs_size;
- }
-
if (initrd_start >= initrd_end) {
printk(KERN_INFO "initrd not found or empty");
goto disable;
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+ if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+ usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
parser = vzalloc(sizeof(struct hid_parser));
if (!parser) {
ret = -ENOMEM;
- goto err;
+ goto alloc_err;
}
parser->device = device;
hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
+ kfree(parser->collection_stack);
vfree(parser);
device->status |= HID_STAT_PARSED;
return 0;
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
+ kfree(parser->collection_stack);
+alloc_err:
vfree(parser);
hid_close_report(device);
return ret;
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
#define USB_VENDOR_ID_APPLE 0x05ac
+#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
#define I2C_VENDOR_ID_HANTICK 0x0911
#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
-#define I2C_VENDOR_ID_RAYD 0x2386
-#define I2C_PRODUCT_ID_RAYD_3118 0x3118
-
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
input_dev->dev.parent = &hid->dev;
hidinput->input = input_dev;
+ hidinput->application = application;
list_add_tail(&hidinput->list, &hid->inputs);
INIT_LIST_HEAD(&hidinput->reports);
struct hid_input *hidinput;
list_for_each_entry(hidinput, &hid->inputs, list) {
- if (hidinput->report &&
- hidinput->report->application == report->application)
+ if (hidinput->application == report->application)
return hidinput;
}
input_unregister_device(hidinput->input);
else
input_free_device(hidinput->input);
+ kfree(hidinput->name);
kfree(hidinput);
}
struct hid_usage *usage,
enum latency_mode latency,
bool surface_switch,
- bool button_switch)
+ bool button_switch,
+ bool *inputmode_found)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
switch (usage->hid) {
case HID_DG_INPUTMODE:
+ /*
+ * Some elan panels wrongly declare 2 input mode features,
+ * and silently ignore when we set the value in the second
+ * field. Skip the second feature and hope for the best.
+ */
+ if (*inputmode_found)
+ return false;
+
if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
report_len = hid_report_len(report);
buf = hid_alloc_report_buf(report, GFP_KERNEL);
}
field->value[index] = td->inputmode_value;
+ *inputmode_found = true;
return true;
case HID_DG_CONTACTMAX:
struct hid_usage *usage;
int i, j;
bool update_report;
+ bool inputmode_found = false;
rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list) {
usage,
latency,
surface_switch,
- button_switch))
+ button_switch,
+ &inputmode_found))
update_report = true;
}
}
*/
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
}
EXPORT_SYMBOL_GPL(sensor_hub_device_close);
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /*
+ * Checks if the report descriptor of Thinkpad Helix 2 has a logical
+ * minimum for magnetic flux axis greater than the maximum.
+ */
+ if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
+ *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
+ rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
+ rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
+ rdesc[921] == 0x07 && rdesc[922] == 0x00) {
+ /* Sets negative logical minimum for mag x, y and z */
+ rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
+ rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
+ rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
+ rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
+ }
+
+ return rdesc;
+}
+
static int sensor_hub_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
.probe = sensor_hub_probe,
.remove = sensor_hub_remove,
.raw_event = sensor_hub_raw_event,
+ .report_fixup = sensor_hub_report_fixup,
#ifdef CONFIG_PM
.suspend = sensor_hub_suspend,
.resume = sensor_hub_resume,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
- { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
- I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ 0, 0 }
pm_runtime_enable(dev);
enable_irq(client->irq);
- ret = i2c_hid_hwreset(client);
+
+ /* Instead of resetting device, simply powers the device on. This
+ * solves "incomplete reports" on Raydium devices 2386:3118 and
+ * 2386:4B33
+ */
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
if (ret)
return ret;
- /* RAYDIUM device (2386:3118) need to re-send report descr cmd
+ /* Some devices need to re-send report descr cmd
* after resume, after this it will be back normal.
* otherwise it issues too many incomplete reports.
*/
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
#define CNL_H_DEVICE_ID 0xA37C
+#define SPT_H_DEVICE_ID 0xA135
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
dgid = (union ib_gid *) &addr->sib_addr;
pkey = ntohs(addr->sib_pkey);
+ mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
cma_dev = cur_dev;
sgid = gid;
id_priv->id.port_num = p;
+ goto found;
}
}
}
}
-
- if (!cma_dev)
- return -ENODEV;
+ mutex_unlock(&lock);
+ return -ENODEV;
found:
cma_attach_to_dev(id_priv, cma_dev);
- addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
- memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+ mutex_unlock(&lock);
+ addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+ memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
return 0;
}
WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
if (!uverbs_destroy_uobject(obj, reason))
ret = 0;
+ else
+ atomic_set(&obj->usecnt, 0);
}
return ret;
}
static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr);
+static const struct file_operations ucma_fops;
+
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
f = fdget(cmd.fd);
if (!f.file)
return -ENOENT;
+ if (f.file->f_op != &ucma_fops) {
+ ret = -EINVAL;
+ goto file_put;
+ }
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
if (ib_uverbs_create_uapi(device, uverbs_dev))
- goto err;
+ goto err_uapi;
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
err_class:
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-
err_cdev:
cdev_del(&uverbs_dev->cdev);
+err_uapi:
clear_bit(devnum, dev_map);
-
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
"Failed to destroy Shadow QP");
return rc;
}
+ bnxt_qplib_free_qp_res(&rdev->qplib_res,
+ &rdev->qp1_sqp->qplib_qp);
mutex_lock(&rdev->qp_lock);
list_del(&rdev->qp1_sqp->list);
atomic_dec(&rdev->qp_count);
struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *rq = &qp->rq;
- struct bnxt_qplib_q *sq = &qp->rq;
+ struct bnxt_qplib_q *sq = &qp->sq;
int rc = 0;
if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
schp = to_c4iw_cq(qhp->ibqp.send_cq);
if (qhp->ibqp.uobject) {
+
+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+ if (qhp->wq.flushed)
+ return;
+
+ qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
- props->max_send_sge = dev->dev->caps.max_sq_sg;
- props->max_recv_sge = dev->dev->caps.max_rq_sg;
- props->max_sge_rd = MLX4_MAX_SGE_RD;
+ props->max_send_sge =
+ min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+ props->max_recv_sge =
+ min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+ props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->quotas.mpt;
skb_queue_head_init(&skqueue);
+ netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
+ netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
cqe = &admin_queue->cq.entries[head_masked];
/* Go over all the completions */
- while ((cqe->acq_common_descriptor.flags &
+ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Do not read the rest of the completion entry before the
* phase bit was validated
*/
- rmb();
+ dma_rmb();
ena_com_handle_single_admin_completion(admin_queue, cqe);
head_masked++;
mmio_read_reg |= mmio_read->seq_num &
ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
- /* make sure read_resp->req_id get updated before the hw can write
- * there
- */
- wmb();
-
- writel_relaxed(mmio_read_reg,
- ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+ writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- mmiowb();
for (i = 0; i < timeout; i++) {
- if (read_resp->req_id == mmio_read->seq_num)
+ if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
break;
udelay(1);
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
- phase) {
+ while ((READ_ONCE(aenq_common->flags) &
+ ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Make sure the phase bit (ownership) is as expected before
+ * reading the rest of the descriptor.
+ */
+ dma_rmb();
+
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom,
(u64)aenq_common->timestamp_low +
if (desc_phase != expected_phase)
return NULL;
+ /* Make sure we read the rest of the descriptor after the phase bit
+ * has been read
+ */
+ dma_rmb();
+
return cdesc;
}
if (cdesc_phase != expected_phase)
return -EAGAIN;
+ dma_rmb();
if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id);
return -EINVAL;
return io_sq->q_depth - 1 - cnt;
}
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
- bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
u16 tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
- if (relaxed)
- writel_relaxed(tail, io_sq->db_addr);
- else
- writel(tail, io_sq->db_addr);
+ writel(tail, io_sq->db_addr);
return 0;
}
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
return -ENOMEM;
}
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
u64_stats_update_begin(&rx_ring->syncp);
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma;
- ena_buf->len = PAGE_SIZE;
+ ena_buf->len = ENA_PAGE_SIZE;
return 0;
}
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
rx_ring->qid, i, num);
}
- if (likely(i)) {
- /* Add memory barrier to make sure the desc were written before
- * issue a doorbell
- */
- wmb();
- ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
- mmiowb();
- }
+ /* ena_com_write_sq_doorbell issues a wmb() */
+ if (likely(i))
+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
rx_ring->next_to_use = next_to_use;
do {
dma_unmap_page(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ ENA_PAGE_SIZE, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
- rx_info->page_offset, len, PAGE_SIZE);
+ rx_info->page_offset, len, ENA_PAGE_SIZE);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n",
"Destroy failure, restarting device\n");
ena_dump_stats_to_dmesg(adapter);
/* rtnl lock already obtained in dev_ioctl() layer */
- ena_destroy_device(adapter);
+ ena_destroy_device(adapter, false);
ena_restore_device(adapter);
}
tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
tx_ring->ring_size);
- /* This WMB is aimed to:
- * 1 - perform smp barrier before reading next_to_completion
- * 2 - make sure the desc were written before trigger DB
- */
- wmb();
-
/* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
* stop the queue but meanwhile clean_tx_irq updates
* next_to_completion and terminates.
* The queue will remain stopped forever.
- * To solve this issue this function perform rmb, check
- * the wakeup condition and wake up the queue if needed.
+ * To solve this issue add a mb() to make sure that
+ * netif_tx_stop_queue() write is vissible before checking if
+ * there is additional space in the queue.
*/
- smp_rmb();
+ smp_mb();
if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
> ENA_TX_WAKEUP_THRESH) {
}
if (netif_xmit_stopped(txq) || !skb->xmit_more) {
- /* trigger the dma engine */
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+ /* trigger the dma engine. ena_com_write_sq_doorbell()
+ * has a mb
+ */
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.doorbells++;
u64_stats_update_end(&tx_ring->syncp);
return rc;
}
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ return;
+
netif_carrier_off(netdev);
del_timer_sync(&adapter->timer_service);
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
- ena_com_set_admin_running_state(ena_dev, false);
+ if (!graceful)
+ ena_com_set_admin_running_state(ena_dev, false);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
}
static int ena_restore_device(struct ena_adapter *adapter)
}
}
+ set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_err(&pdev->dev, "Device reset completed successfully\n");
return;
}
rtnl_lock();
- ena_destroy_device(adapter);
+ ena_destroy_device(adapter, false);
ena_restore_device(adapter);
rtnl_unlock();
}
netdev->rx_cpu_rmap = NULL;
}
#endif /* CONFIG_RFS_ACCEL */
-
- unregister_netdev(netdev);
del_timer_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
- /* Reset the device only if the device is running. */
- if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- ena_com_dev_reset(ena_dev, adapter->reset_reason);
+ unregister_netdev(netdev);
- ena_free_mgmnt_irq(adapter);
+ /* If the device is running then we want to make sure the device will be
+ * reset to make sure no more events will be issued by the device.
+ */
+ if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- ena_disable_msix(adapter);
+ rtnl_lock();
+ ena_destroy_device(adapter, true);
+ rtnl_unlock();
free_netdev(netdev);
- ena_com_mmio_reg_read_request_destroy(ena_dev);
-
- ena_com_abort_admin_commands(ena_dev);
-
- ena_com_wait_for_abort_completion(ena_dev);
-
- ena_com_admin_destroy(ena_dev);
-
ena_com_rss_destroy(ena_dev);
ena_com_delete_debug_area(ena_dev);
"ignoring device reset request as the device is being suspended\n");
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
- ena_destroy_device(adapter);
+ ena_destroy_device(adapter, true);
rtnl_unlock();
return 0;
}
int ena_get_sset_count(struct net_device *netdev, int sset);
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
#endif /* !(ENA_H) */
port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
}
}
- return status;
+ goto err;
}
pcie = be_get_pcie_desc(resp->func_param, desc_count,
struct ltq_etop_chan *ch = &priv->ch[i];
ch->idx = ch->dma.nr = i;
+ ch->dma.dev = &priv->pdev->dev;
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
- set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
- if (intf->attach)
- set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
if (dev_ctx->context) {
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ if (intf->attach)
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
goto out;
- intf->attach(dev, dev_ctx->context);
+ if (intf->attach(dev, dev_ctx->context))
+ goto out;
+
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
goto out;
dev_ctx->context = intf->add(dev);
+ if (!dev_ctx->context)
+ goto out;
+
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
}
}
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
{
- return (u16)((dev->pdev->bus->number << 8) |
+ return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+ (dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
- u16 pci_id = mlx5_gen_pci_id(dev);
+ u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
- MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+ MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
if (err)
goto miss_rule_err;
+ kvfree(flow_group_in);
return 0;
miss_rule_err:
return version;
}
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+ u32 *match_value,
+ bool take_write)
+{
+ struct fs_fte *fte_tmp;
+
+ if (take_write)
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ else
+ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+ rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ fte_tmp = NULL;
+ goto out;
+ }
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+ if (take_write)
+ up_write_ref_node(&g->node);
+ else
+ up_read_ref_node(&g->node);
+ return fte_tmp;
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
if (IS_ERR(fte))
return ERR_PTR(-ENOMEM);
- list_for_each_entry(iter, match_head, list) {
- nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
- }
-
search_again_locked:
version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
struct fs_fte *fte_tmp;
g = iter->g;
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
- rhash_fte);
- if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+ fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+ if (!fte_tmp)
continue;
-
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
- if (!take_write) {
- list_for_each_entry(iter, match_head, list)
- up_read_ref_node(&iter->g->node);
- } else {
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
- }
-
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp);
up_write_ref_node(&fte_tmp->node);
return rule;
}
- /* No group with matching fte found. Try to add a new fte to any
- * matching fg.
- */
-
- if (!take_write) {
- list_for_each_entry(iter, match_head, list)
- up_read_ref_node(&iter->g->node);
- list_for_each_entry(iter, match_head, list)
- nested_down_write_ref_node(&iter->g->node,
- FS_LOCK_PARENT);
- take_write = true;
- }
-
/* Check the ft version, for case that new flow group
* was added while the fgs weren't locked
*/
/* Check the fgs version, for case the new FTE with the
* same values was added while the fgs weren't locked
*/
- if (version != matched_fgs_get_version(match_head))
+ if (version != matched_fgs_get_version(match_head)) {
+ take_write = true;
goto search_again_locked;
+ }
list_for_each_entry(iter, match_head, list) {
g = iter->g;
if (!g->node.active)
continue;
+
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
err = insert_fte(g, fte);
if (err) {
+ up_write_ref_node(&g->node);
if (err == -ENOSPC)
continue;
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return ERR_PTR(err);
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
+ up_write_ref_node(&g->node);
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node);
}
rule = ERR_PTR(-ENOENT);
out:
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
if (err) {
if (take_write)
up_write_ref_node(&ft->node);
+ else
+ up_read_ref_node(&ft->node);
return ERR_PTR(err);
}
add_timer(&health->timer);
}
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
+
+ if (disable_health) {
+ spin_lock_irqsave(&health->wq_lock, flags);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
+ }
del_timer_sync(&health->timer);
}
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
- if (!priv->dbg_root)
+ if (!priv->dbg_root) {
+ dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
return -ENOMEM;
+ }
err = mlx5_pci_enable_device(dev);
if (err) {
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
- debugfs_remove(priv->dbg_root);
+ debugfs_remove_recursive(priv->dbg_root);
}
static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cleanup_once(dev);
err_stop_poll:
- mlx5_stop_health_poll(dev);
+ mlx5_stop_health_poll(dev, boot);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out_err;
mlx5_free_irq_vectors(dev);
if (cleanup)
mlx5_cleanup_once(dev);
- mlx5_stop_health_poll(dev);
+ mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
* with the HCA, so the health polll is no longer needed.
*/
mlx5_drain_health_wq(dev);
- mlx5_stop_health_poll(dev);
+ mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
return (u32)wq->fbc.sz_m1 + 1;
}
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
{
- return (u32)wq->fbc.frag_sz_m1 + 1;
+ return wq->fbc.frag_sz_m1 + 1;
}
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
- u32 sq_strides_offset;
+ u16 sq_strides_offset;
u32 rq_pg_remainder;
int err;
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(1, 0xff, 0),
};
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
struct nfp_repr *repr = netdev_priv(netdev);
+
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
+ if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+ return -EOPNOTSUPP;
+
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
+#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
FLOW_DISSECTOR_KEY_VLAN,
target);
/* Populate the tci field. */
- if (flow_vlan->vlan_id) {
+ if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
flow_vlan->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
key_size += sizeof(struct nfp_flower_mac_mpls);
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *flow_vlan;
+
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ flow->mask);
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
+ flow_vlan->vlan_priority)
+ return -EOPNOTSUPP;
+ }
+
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
{
__be16 rx_data;
__be16 tx_data;
- struct spi_transfer *transfer;
- struct spi_message *msg;
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
int ret;
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
+ *result = 0;
+
+ transfer[0].tx_buf = &tx_data;
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].rx_buf = &rx_data;
+ transfer[1].len = QCASPI_CMD_LEN;
+
+ spi_message_add_tail(&transfer[0], &msg);
if (qca->legacy_mode) {
- msg = &qca->spi_msg1;
- transfer = &qca->spi_xfer1;
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- spi_sync(qca->spi_dev, msg);
- } else {
- msg = &qca->spi_msg2;
- transfer = &qca->spi_xfer2[0];
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
+ spi_sync(qca->spi_dev, &msg);
+ spi_message_init(&msg);
}
- transfer->tx_buf = NULL;
- transfer->rx_buf = &rx_data;
- transfer->len = QCASPI_CMD_LEN;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
- ret = msg->status;
+ ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
{
__be16 tx_data[2];
- struct spi_transfer *transfer;
- struct spi_message *msg;
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
tx_data[1] = cpu_to_be16(value);
+ transfer[0].tx_buf = &tx_data[0];
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].tx_buf = &tx_data[1];
+ transfer[1].len = QCASPI_CMD_LEN;
+
+ spi_message_add_tail(&transfer[0], &msg);
if (qca->legacy_mode) {
- msg = &qca->spi_msg1;
- transfer = &qca->spi_xfer1;
- transfer->tx_buf = &tx_data[0];
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- spi_sync(qca->spi_dev, msg);
- } else {
- msg = &qca->spi_msg2;
- transfer = &qca->spi_xfer2[0];
- transfer->tx_buf = &tx_data[0];
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
+ spi_sync(qca->spi_dev, &msg);
+ spi_message_init(&msg);
}
- transfer->tx_buf = &tx_data[1];
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
- ret = msg->status;
+ ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
{
__be16 cmd;
- struct spi_message *msg = &qca->spi_msg2;
- struct spi_transfer *transfer = &qca->spi_xfer2[0];
+ struct spi_message msg;
+ struct spi_transfer transfer[2];
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
+
cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
- transfer->tx_buf = &cmd;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
- transfer->tx_buf = src;
- transfer->rx_buf = NULL;
- transfer->len = len;
+ transfer[0].tx_buf = &cmd;
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].tx_buf = src;
+ transfer[1].len = len;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
- if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+ if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
qcaspi_spi_error(qca);
return 0;
}
static u32
qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
{
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
+ struct spi_message msg;
+ struct spi_transfer transfer;
int ret;
- transfer->tx_buf = src;
- transfer->rx_buf = NULL;
- transfer->len = len;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
+
+ transfer.tx_buf = src;
+ transfer.len = len;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer, &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
- if (ret || (msg->actual_length != len)) {
+ if (ret || (msg.actual_length != len)) {
qcaspi_spi_error(qca);
return 0;
}
static u32
qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
{
- struct spi_message *msg = &qca->spi_msg2;
+ struct spi_message msg;
__be16 cmd;
- struct spi_transfer *transfer = &qca->spi_xfer2[0];
+ struct spi_transfer transfer[2];
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
+
cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
- transfer->tx_buf = &cmd;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
- transfer->tx_buf = NULL;
- transfer->rx_buf = dst;
- transfer->len = len;
+ transfer[0].tx_buf = &cmd;
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].rx_buf = dst;
+ transfer[1].len = len;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
- if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+ if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
qcaspi_spi_error(qca);
return 0;
}
static u32
qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
{
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
+ struct spi_message msg;
+ struct spi_transfer transfer;
int ret;
- transfer->tx_buf = NULL;
- transfer->rx_buf = dst;
- transfer->len = len;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
- ret = spi_sync(qca->spi_dev, msg);
+ transfer.rx_buf = dst;
+ transfer.len = len;
- if (ret || (msg->actual_length != len)) {
+ spi_message_add_tail(&transfer, &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
+
+ if (ret || (msg.actual_length != len)) {
qcaspi_spi_error(qca);
return 0;
}
qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
{
__be16 tx_data;
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
+ struct spi_message msg;
+ struct spi_transfer transfer;
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
tx_data = cpu_to_be16(cmd);
- transfer->len = sizeof(tx_data);
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
+ transfer.len = sizeof(cmd);
+ transfer.tx_buf = &tx_data;
+ spi_message_add_tail(&transfer, &msg);
- ret = spi_sync(qca->spi_dev, msg);
+ ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
- ret = msg->status;
+ ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));
- memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
- memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
-
- spi_message_init(&qca->spi_msg1);
- spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
-
- spi_message_init(&qca->spi_msg2);
- spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
- spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
-
memset(&qca->txr, 0, sizeof(qca->txr));
qca->txr.count = TX_RING_MAX_LEN;
}
struct tx_ring txr;
struct qcaspi_stats stats;
- struct spi_message spi_msg1;
- struct spi_message spi_msg2;
- struct spi_transfer spi_xfer1;
- struct spi_transfer spi_xfer2[2];
-
u8 *rx_buffer;
u32 buffer_size;
u8 sync;
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_MAX
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
- rtl_set_tx_config_registers(tp);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
+ rtl_set_tx_config_registers(tp);
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
rtl8169_update_counters(tp);
rtl_lock_work(tp);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl8169_down(dev);
rtl_unlock_work(tp);
rtl_lock_work(tp);
napi_disable(&tp->napi);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
rtl_unlock_work(tp);
rtl_pll_power_down(tp);
+# SPDX-License-Identifier: GPL-2.0
#
# Renesas device configuration
#
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Renesas device drivers.
#
+// SPDX-License-Identifier: GPL-2.0+
/* PTP 1588 clock using the Renesas Ethernet AVB
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include "ravb.h"
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Quectel EP06/EG06/EM06 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC,
+ 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+ },
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
return false;
}
+static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+
+ if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+ intf_desc.bNumEndpoints == 2)
+ return true;
+
+ return false;
+}
+
static int qmi_wwan_probe(struct usb_interface *intf,
const struct usb_device_id *prod)
{
return -ENODEV;
}
+ /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+ * we need to match on class/subclass/protocol. These values are
+ * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+ * different. Ignore the current interface if the number of endpoints
+ * the number for the diag interface (two).
+ */
+ if (quectel_ep06_diag_detected(intf))
+ return -ENODEV;
+
return usbnet_probe(intf, id);
}
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
-static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
struct netfront_stats {
u64 packets;
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
- wait_event(module_load_q,
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateClosed &&
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateUnknown);
+ wait_event(module_wq,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown);
return netdev;
exit:
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+ wake_up_all(&module_wq);
+
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- break;
-
case XenbusStateUnknown:
- wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
break;
case XenbusStateClosed:
- wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
- wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
xenbus_switch_state(dev, XenbusStateClosing);
- wait_event(module_unload_q,
+ wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
- wait_event(module_unload_q,
+ wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
priv.buffer_len = oat_data.buffer_len;
priv.response_len = 0;
- priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
+ priv.buffer = vzalloc(oat_data.buffer_len);
if (!priv.buffer) {
rc = -ENOMEM;
goto out;
rc = -EFAULT;
out_free:
- kfree(priv.buffer);
+ vfree(priv.buffer);
out:
return rc;
}
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
}
return dev;
qeth_update_from_chp_desc(card);
card->dev = qeth_alloc_netdev(card);
- if (!card->dev)
+ if (!card->dev) {
+ rc = -ENOMEM;
goto err_card;
+ }
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
QEDI_NVM_TGT_SEC,
};
+struct qedi_nvm_iscsi_image {
+ struct nvm_iscsi_cfg iscsi_cfg;
+ u32 crc;
+};
+
struct qedi_uio_ctrl {
/* meta data */
u32 uio_hsi_version;
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
- struct nvm_iscsi_cfg *iscsi_cfg;
+ struct qedi_nvm_iscsi_image *iscsi_image;
dma_addr_t nvm_buf_dma;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- if (qedi->iscsi_cfg)
+ if (qedi->iscsi_image)
dma_free_coherent(&qedi->pdev->dev,
- sizeof(struct nvm_iscsi_cfg),
- qedi->iscsi_cfg, qedi->nvm_buf_dma);
+ sizeof(struct qedi_nvm_iscsi_image),
+ qedi->iscsi_image, qedi->nvm_buf_dma);
}
static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
- sizeof(struct nvm_iscsi_cfg),
- &qedi->nvm_buf_dma, GFP_KERNEL);
- if (!qedi->iscsi_cfg) {
+ struct qedi_nvm_iscsi_image nvm_image;
+
+ qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
+ sizeof(nvm_image),
+ &qedi->nvm_buf_dma,
+ GFP_KERNEL);
+ if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
return -ENOMEM;
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
- "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+ "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
qedi->nvm_buf_dma);
return 0;
struct nvm_iscsi_block *block;
pf = qedi->dev_info.common.abs_pf_id;
- block = &qedi->iscsi_cfg->block[0];
+ block = &qedi->iscsi_image->iscsi_cfg.block[0];
for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
static int qedi_get_boot_info(struct qedi_ctx *qedi)
{
int ret = 1;
- u16 len;
-
- len = sizeof(struct nvm_iscsi_cfg);
+ struct qedi_nvm_iscsi_image nvm_image;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Get NVM iSCSI CFG image\n");
ret = qedi_ops->common->nvm_get_image(qedi->cdev,
QED_NVM_IMAGE_ISCSI_CFG,
- (char *)qedi->iscsi_cfg, len);
+ (char *)qedi->iscsi_image,
+ sizeof(nvm_image));
if (ret)
QEDI_ERR(&qedi->dbg_ctx,
"Could not get NVM image. ret = %d\n", ret);
crypto_free_ahash(tfm);
}
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
- conn->conn_ops = NULL;
-
if (conn->sock)
sock_release(conn->sock);
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
- iscsit_put_transport(conn->conn_transport);
-
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
- kfree(conn);
+ iscsit_free_conn(conn);
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
goto out_req_buf;
}
- conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
- if (!conn->conn_ops) {
- pr_err("Unable to allocate memory for"
- " struct iscsi_conn_ops.\n");
- goto out_rsp_buf;
- }
-
- init_waitqueue_head(&conn->queues_wq);
- INIT_LIST_HEAD(&conn->conn_list);
- INIT_LIST_HEAD(&conn->conn_cmd_list);
- INIT_LIST_HEAD(&conn->immed_queue_list);
- INIT_LIST_HEAD(&conn->response_queue_list);
- init_completion(&conn->conn_post_wait_comp);
- init_completion(&conn->conn_wait_comp);
- init_completion(&conn->conn_wait_rcfr_comp);
- init_completion(&conn->conn_waiting_on_uc_comp);
- init_completion(&conn->conn_logout_comp);
- init_completion(&conn->rx_half_close_comp);
- init_completion(&conn->tx_half_close_comp);
- init_completion(&conn->rx_login_comp);
- spin_lock_init(&conn->cmd_lock);
- spin_lock_init(&conn->conn_usage_lock);
- spin_lock_init(&conn->immed_queue_lock);
- spin_lock_init(&conn->nopin_timer_lock);
- spin_lock_init(&conn->response_queue_lock);
- spin_lock_init(&conn->state_lock);
-
- if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
- pr_err("Unable to allocate conn->conn_cpumask\n");
- goto out_conn_ops;
- }
conn->conn_login = login;
return login;
-out_conn_ops:
- kfree(conn->conn_ops);
-out_rsp_buf:
- kfree(login->rsp_buf);
out_req_buf:
kfree(login->req_buf);
out_login:
return -ENOMEM;
}
- ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
- if (unlikely(ret)) {
- kfree(sess);
- return ret;
- }
+ if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
+ goto free_sess;
+
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
return 0;
}
+static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
+{
+ struct iscsi_conn *conn;
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for new connection\n");
+ return NULL;
+ }
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+
+ init_waitqueue_head(&conn->queues_wq);
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+ INIT_LIST_HEAD(&conn->response_queue_list);
+ init_completion(&conn->conn_post_wait_comp);
+ init_completion(&conn->conn_wait_comp);
+ init_completion(&conn->conn_wait_rcfr_comp);
+ init_completion(&conn->conn_waiting_on_uc_comp);
+ init_completion(&conn->conn_logout_comp);
+ init_completion(&conn->rx_half_close_comp);
+ init_completion(&conn->tx_half_close_comp);
+ init_completion(&conn->rx_login_comp);
+ spin_lock_init(&conn->cmd_lock);
+ spin_lock_init(&conn->conn_usage_lock);
+ spin_lock_init(&conn->immed_queue_lock);
+ spin_lock_init(&conn->nopin_timer_lock);
+ spin_lock_init(&conn->response_queue_lock);
+ spin_lock_init(&conn->state_lock);
+
+ timer_setup(&conn->nopin_response_timer,
+ iscsit_handle_nopin_response_timeout, 0);
+ timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+
+ if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
+ goto free_conn;
+
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
+ goto put_transport;
+ }
+
+ if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->conn_cpumask\n");
+ goto free_mask;
+ }
+
+ return conn;
+
+free_mask:
+ free_cpumask_var(conn->conn_cpumask);
+put_transport:
+ iscsit_put_transport(conn->conn_transport);
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+
+void iscsit_free_conn(struct iscsi_conn *conn)
+{
+ free_cpumask_var(conn->conn_cpumask);
+ kfree(conn->conn_ops);
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+}
+
void iscsi_target_login_sess_out(struct iscsi_conn *conn,
struct iscsi_np *np, bool zero_tsih, bool new_sess)
{
crypto_free_ahash(tfm);
}
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
-
if (conn->param_list) {
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
+ iscsit_free_conn(conn);
}
static int __iscsi_target_login_thread(struct iscsi_np *np)
}
spin_unlock_bh(&np->np_thread_lock);
- conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ conn = iscsit_alloc_conn(np);
if (!conn) {
- pr_err("Could not allocate memory for"
- " new connection\n");
/* Get another socket */
return 1;
}
- pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
- conn->conn_state = TARG_CONN_STATE_FREE;
-
- timer_setup(&conn->nopin_response_timer,
- iscsit_handle_nopin_response_timeout, 0);
- timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
-
- if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
- kfree(conn);
- return 1;
- }
rc = np->np_transport->iscsit_accept_np(np, conn);
if (rc == -ENOSYS) {
complete(&np->np_restart_comp);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
+ iscsit_free_conn(conn);
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
+ iscsit_free_conn(conn);
/* Get another socket */
return 1;
}
spin_unlock_bh(&np->np_thread_lock);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
- goto out;
+ iscsit_free_conn(conn);
+ return 1;
}
/*
* Perform the remaining iSCSI connection initialization items..
tpg_np = NULL;
}
-out:
return 1;
exit:
extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern void iscsit_free_conn(struct iscsi_conn *);
extern int iscsit_start_kthreads(struct iscsi_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
const char *name;
bool registered;
struct list_head reports; /* the list of reports */
+ unsigned int application; /* application usage for this input */
};
enum hid_type {
struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
- u32 frag_sz_m1;
- u32 strides_offset;
+ u16 frag_sz_m1;
+ u16 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
}
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
- u32 strides_offset,
+ u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
};
static inline unsigned int *
-nf_ct_timeout_data(struct nf_conn_timeout *t)
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
{
struct nf_ct_timeout *timeout;
WARN_ON_ONCE(!in_task());
- if (!sock_flag(sk, SOCK_ZEROCOPY))
- return NULL;
-
skb = sock_omalloc(sk, 0, GFP_KERNEL);
if (!skb)
return NULL;
nextp = &fp->next;
fp->prev = NULL;
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
head->data_len += fp->len;
head->len += fp->len;
if (head->ip_summed != fp->ip_summed)
if (tpi->proto == htons(ETH_P_TEB))
itn = net_generic(net, gre_tap_net_id);
+ else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+ tpi->proto == htons(ETH_P_ERSPAN2))
+ itn = net_generic(net, erspan_net_id);
else
itn = net_generic(net, ipgre_net_id);
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
+ return PACKET_REJECT;
+
drop:
kfree_skb(skb);
return PACKET_RCVD;
if NF_NAT_IPV4
+config NF_NAT_MASQUERADE_IPV4
+ bool
+
+if NF_TABLES
config NFT_CHAIN_NAT_IPV4
depends on NF_TABLES_IPV4
tristate "IPv4 nf_tables nat chain support"
packet transformations such as the source, destination address and
source and destination ports.
-config NF_NAT_MASQUERADE_IPV4
- bool
-
config NFT_MASQ_IPV4
tristate "IPv4 masquerading support for nf_tables"
depends on NF_TABLES_IPV4
help
This is the expression that provides IPv4 redirect support for
nf_tables.
+endif # NF_TABLES
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
flags = msg->msg_flags;
- if (flags & MSG_ZEROCOPY && size) {
+ if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -EINVAL;
goto out_err;
if (!queue->synflood_warned &&
net->ipv4.sysctl_tcp_syncookies != 2 &&
xchg(&queue->synflood_warned, 1) == 0)
- pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
- proto, ntohs(tcp_hdr(skb)->dest), msg);
+ net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
+ proto, ntohs(tcp_hdr(skb)->dest), msg);
return want_cookie;
}
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
+ fp->sk = NULL;
}
sub_frag_mem_limit(fq->q.net, head->truesize);
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
skb->dev = iucv->hs_dev;
- if (!skb->dev)
- return -ENODEV;
- if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
- return -ENETDOWN;
+ if (!skb->dev) {
+ err = -ENODEV;
+ goto err_free;
+ }
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+ err = -ENETDOWN;
+ goto err_free;
+ }
if (skb->len > skb->dev->mtu) {
- if (sock->sk_type == SOCK_SEQPACKET)
- return -EMSGSIZE;
- else
- skb_trim(skb, skb->dev->mtu);
+ if (sock->sk_type == SOCK_SEQPACKET) {
+ err = -EMSGSIZE;
+ goto err_free;
+ }
+ skb_trim(skb, skb->dev->mtu);
}
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return -ENOMEM;
+ if (!nskb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
return net_xmit_eval(err);
+
+err_free:
+ kfree_skb(skb);
+ return err;
}
static struct sock *__iucv_get_sock_by_name(char *nm)
err = afiucv_hs_send(&txmsg, sk, skb, 0);
if (err) {
atomic_dec(&iucv->msg_sent);
- goto fail;
+ goto out;
}
} else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb);
struct sock *sk;
struct iucv_sock *iucv;
struct af_iucv_trans_hdr *trans_hdr;
+ int err = NET_RX_SUCCESS;
char nullstring[8];
- int err = 0;
if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
- ;
+ kfree_skb(skb);
}
return err;
* Returns 0 if there are still iucv pathes defined
* 1 if there are no iucv pathes defined
*/
-int iucv_path_table_empty(void)
+static int iucv_path_table_empty(void)
{
int i;
depends on NETFILTER_ADVANCED
---help---
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
- table.
+ table to work around buggy DHCP clients in virtualized environments.
- You can use this target to compute and fill in the checksum in
- a packet that lacks a checksum. This is particularly useful,
- if you need to work around old applications such as dhcp clients,
- that do not work well with checksum offloads, but don't want to disable
- checksum offload in your device.
+ Some old DHCP clients drop packets because they are not aware
+ that the checksum would normally be offloaded to hardware and
+ thus should be considered valid.
+ This target can be used to fill in the checksum using iptables
+ when such packets are sent via a virtual network device.
To compile it as a module, choose M here. If unsure, say N.
};
#endif
+static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
+{
+ u8 nfproto = (unsigned long)_nfproto;
+
+ if (nf_ct_l3num(ct) != nfproto)
+ return 0;
+
+ if (nf_ct_protonum(ct) == IPPROTO_TCP &&
+ ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
+ ct->proto.tcp.seen[0].td_maxwin = 0;
+ ct->proto.tcp.seen[1].td_maxwin = 0;
+ }
+
+ return 0;
+}
+
static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
{
struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ bool fixup_needed = false;
int err = 0;
mutex_lock(&nf_ct_proto_mutex);
ARRAY_SIZE(ipv4_conntrack_ops));
if (err)
cnet->users4 = 0;
+ else
+ fixup_needed = true;
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
ARRAY_SIZE(ipv6_conntrack_ops));
if (err)
cnet->users6 = 0;
+ else
+ fixup_needed = true;
break;
#endif
default:
}
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
+
+ if (fixup_needed)
+ nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
+ (void *)(unsigned long)nfproto, 0, 0);
+
return err;
}
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
+
+ timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
return 0;
}
[CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
+
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
}
return dccp_kmemdup_sysctl_table(net, pn, dn);
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};
return ret;
}
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
[CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table generic_sysctl_table[] = {
.pkt_to_tuple = generic_pkt_to_tuple,
.packet = generic_packet,
.new = generic_new,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
.obj_to_nlattr = generic_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int),
.nla_policy = generic_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = generic_init_net,
.get_net_proto = generic_get_net_proto,
};
nf_ct_gre_keymap_destroy(master);
}
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
[CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
static int gre_init_net(struct net *net, u_int16_t proto)
{
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = gre_timeout_nlattr_to_obj,
.obj_to_nlattr = gre_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * GRE_CT_MAX,
.nla_policy = gre_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.net_id = &proto_gre_net_id,
.init_net = gre_init_net,
};
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
[CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table icmp_sysctl_table[] = {
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmp_timeout_nlattr_to_obj,
.obj_to_nlattr = icmp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int),
.nla_policy = icmp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = icmp_init_net,
.get_net_proto = icmp_get_net_proto,
};
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
[CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table icmpv6_sysctl_table[] = {
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
.obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int),
.nla_policy = icmpv6_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = icmpv6_init_net,
.get_net_proto = icmpv6_get_net_proto,
};
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
+
+ timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
return 0;
}
[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
sn->timeouts[i] = sctp_timeouts[i];
+
+ /* timeouts[0] is unused, init it so ->timeouts[0] contains
+ * 'new' timeout, like udp or icmp.
+ */
+ sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
}
return sctp_kmemdup_sysctl_table(pn, sn);
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#endif
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
+
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
+
+ timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
return 0;
}
[CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table tcp_sysctl_table[] = {
for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
tn->timeouts[i] = tcp_timeouts[i];
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
tn->tcp_loose = nf_ct_tcp_loose;
tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
.nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};
return NF_ACCEPT;
}
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
[CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table udp_sysctl_table[] = {
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
#endif
-#include <net/netfilter/nf_conntrack_timeout.h>
}
set->ndeact++;
+ nft_set_elem_deactivate(ctx->net, set, elem);
nft_trans_elem_set(trans) = set;
nft_trans_elem(trans) = *elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return err;
}
-static struct ctnl_timeout *
-ctnl_timeout_find_get(struct net *net, const char *name)
+static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
+ const char *name)
{
struct ctnl_timeout *timeout, *matching = NULL;
break;
}
err:
- return matching;
+ return matching ? &matching->timeout : NULL;
}
static void ctnl_timeout_put(struct nf_ct_timeout *t)
int err;
if (verdict == NF_ACCEPT ||
+ verdict == NF_REPEAT ||
verdict == NF_STOP) {
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
}
struct nft_ct_timeout_obj {
- struct nf_conn *tmpl;
+ struct nf_ct_timeout *timeout;
u8 l4proto;
};
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
- struct sk_buff *skb = pkt->skb;
+ struct nf_conn_timeout *timeout;
+ const unsigned int *values;
+
+ if (priv->l4proto != pkt->tprot)
+ return;
- if (ct ||
- priv->l4proto != pkt->tprot)
+ if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
return;
- nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
+ timeout = nf_ct_timeout_find(ct);
+ if (!timeout) {
+ timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
+ if (!timeout) {
+ regs->verdict.code = NF_DROP;
+ return;
+ }
+ }
+
+ rcu_assign_pointer(timeout->timeout, priv->timeout);
+
+ /* adjust the timeout as per 'new' state. ct is unconfirmed,
+ * so the current timestamp must not be added.
+ */
+ values = nf_ct_timeout_data(timeout);
+ if (values)
+ nf_ct_refresh(ct, pkt->skb, values[0]);
}
static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
- const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
const struct nf_conntrack_l4proto *l4proto;
- struct nf_conn_timeout *timeout_ext;
struct nf_ct_timeout *timeout;
int l3num = ctx->family;
- struct nf_conn *tmpl;
__u8 l4num;
int ret;
timeout->l3num = l3num;
timeout->l4proto = l4proto;
- tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
- if (!tmpl) {
- ret = -ENOMEM;
- goto err_free_timeout;
- }
-
- timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
- if (!timeout_ext) {
- ret = -ENOMEM;
- goto err_free_tmpl;
- }
ret = nf_ct_netns_get(ctx->net, ctx->family);
if (ret < 0)
- goto err_free_tmpl;
-
- priv->tmpl = tmpl;
+ goto err_free_timeout;
+ priv->timeout = timeout;
return 0;
-err_free_tmpl:
- nf_ct_tmpl_free(tmpl);
err_free_timeout:
kfree(timeout);
err_proto_put:
struct nft_object *obj)
{
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
- struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
- struct nf_ct_timeout *timeout;
+ struct nf_ct_timeout *timeout = priv->timeout;
- timeout = rcu_dereference_raw(t->timeout);
nf_ct_untimeout(ctx->net, timeout);
nf_ct_l4proto_put(timeout->l4proto);
nf_ct_netns_put(ctx->net, ctx->family);
- nf_ct_tmpl_free(priv->tmpl);
+ kfree(priv->timeout);
}
static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
- const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
- const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
+ const struct nf_ct_timeout *timeout = priv->timeout;
struct nlattr *nest_params;
int ret;
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CHECKSUM.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
MODULE_DESCRIPTION("Xtables: checksum modification");
static unsigned int
checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (skb->ip_summed == CHECKSUM_PARTIAL)
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
skb_checksum_help(skb);
return XT_CONTINUE;
static int checksum_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_CHECKSUM_info *einfo = par->targinfo;
+ const struct ip6t_ip6 *i6 = par->entryinfo;
+ const struct ipt_ip *i4 = par->entryinfo;
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
if (!einfo->operation)
return -EINVAL;
+ switch (par->family) {
+ case NFPROTO_IPV4:
+ if (i4->proto == IPPROTO_UDP &&
+ (i4->invflags & XT_INV_PROTO) == 0)
+ return 0;
+ break;
+ case NFPROTO_IPV6:
+ if ((i6->flags & IP6T_F_PROTO) &&
+ i6->proto == IPPROTO_UDP &&
+ (i6->invflags & XT_INV_PROTO) == 0)
+ return 0;
+ break;
+ }
+
+ pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
return 0;
}
static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
+ int ret;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
return -EDOM;
}
- return 0;
+
+ ret = nf_ct_netns_get(par->net, par->family);
+ if (ret < 0)
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
+}
+
+static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
+{
+ nf_ct_netns_put(par->net, par->family);
}
static struct xt_match xt_cluster_match __read_mostly = {
.match = xt_cluster_mt,
.checkentry = xt_cluster_mt_checkentry,
.matchsize = sizeof(struct xt_cluster_match_info),
+ .destroy = xt_cluster_mt_destroy,
.me = THIS_MODULE,
};
static void *dl_seq_start(struct seq_file *s, loff_t *pos)
__acquires(htable->lock)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket;
spin_lock_bh(&htable->lock);
static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
*pos = ++(*bucket);
static void dl_seq_stop(struct seq_file *s, void *v)
__releases(htable->lock)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
if (!IS_ERR(bucket))
static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
- struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
- struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
- struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
static int dl_seq_show_v2(struct seq_file *s, void *v)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = (unsigned int *)v;
struct dsthash_ent *ent;
static int dl_seq_show_v1(struct seq_file *s, void *v)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
static int dl_seq_show(struct seq_file *s, void *v)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
struct rds_sock *rs;
__rds_create_bind_key(key, addr, port, scope_id);
- rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
+ rcu_read_lock();
+ rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
+ rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
ntohs(port));
goto out;
}
+ sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
goto out;
&metadata->u.tun_info,
opts_len, extack);
if (ret < 0)
- goto err_out;
+ goto release_tun_meta;
}
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
- goto err_out;
+ goto release_tun_meta;
}
ret = ACT_P_CREATED;
} else if (!ovr) {
- tcf_idr_release(*a, bind);
NL_SET_ERR_MSG(extack, "TC IDR already exists");
- return -EEXIST;
+ ret = -EEXIST;
+ goto release_tun_meta;
}
t = to_tunnel_key(*a);
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- tcf_idr_release(*a, bind);
NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
- return -ENOMEM;
+ ret = -ENOMEM;
+ exists = true;
+ goto release_tun_meta;
}
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
return ret;
+release_tun_meta:
+ dst_release(&metadata->dst);
+
err_out:
if (exists)
tcf_idr_release(*a, bind);
nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
opt->type) ||
nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
- opt->length * 4, opt + 1))
+ opt->length * 4, opt + 1)) {
+ nla_nest_cancel(skb, start);
return -EMSGSIZE;
+ }
len -= sizeof(struct geneve_opt) + opt->length * 4;
src += sizeof(struct geneve_opt) + opt->length * 4;
const struct ip_tunnel_info *info)
{
struct nlattr *start;
- int err;
+ int err = -EINVAL;
if (!info->options_len)
return 0;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
err = tunnel_key_geneve_opts_dump(skb, info);
if (err)
- return err;
+ goto err_out;
} else {
- return -EINVAL;
+err_out:
+ nla_nest_cancel(skb, start);
+ return err;
}
nla_nest_end(skb, start);
return -ENOMEM;
buf->sk = msg->dst_sk;
+ if (__tipc_dump_start(&cb, msg->net)) {
+ kfree_skb(buf);
+ return -ENOMEM;
+ }
do {
int rem;
err = 0;
err_out:
+ tipc_dump_done(&cb);
kfree_skb(buf);
if (err == -EMSGSIZE) {
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
+ sock_orphan(sk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
tipc_dest_list_purge(&tsk->cong_links);
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
- struct rhashtable_iter *iter = (void *)cb->args[0];
+ struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
int err;
int tipc_dump_start(struct netlink_callback *cb)
{
- struct rhashtable_iter *iter = (void *)cb->args[0];
- struct net *net = sock_net(cb->skb->sk);
+ return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+ /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+ struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_net *tn = tipc_net(net);
if (!iter) {
if (!iter)
return -ENOMEM;
- cb->args[0] = (long)iter;
+ cb->args[4] = (long)iter;
}
rhashtable_walk_enter(&tn->sk_rht, iter);
return 0;
}
-EXPORT_SYMBOL(tipc_dump_start);
int tipc_dump_done(struct netlink_callback *cb)
{
- struct rhashtable_iter *hti = (void *)cb->args[0];
+ struct rhashtable_iter *hti = (void *)cb->args[4];
rhashtable_walk_exit(hti);
kfree(hti);
struct netlink_callback *cb,
struct tipc_sock *tsk));
int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
int tipc_dump_done(struct netlink_callback *cb);
#endif
&ctx->sg_encrypted_num_elem,
&ctx->sg_encrypted_size, 0);
+ if (rc == -ENOSPC)
+ ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
+
return rc;
}
&ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
tls_ctx->pending_open_record_frags);
+ if (rc == -ENOSPC)
+ ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
+
return rc;
}