diff options
Diffstat (limited to 'drivers')
371 files changed, 11179 insertions, 2471 deletions
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index aa64eece77a6..57d3b2e2d007 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -134,22 +134,13 @@ static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset) } #ifdef CONFIG_TEGRA_IOMMU_SMMU -static int tegra_ahb_match_by_smmu(struct device *dev, const void *data) -{ - struct tegra_ahb *ahb = dev_get_drvdata(dev); - const struct device_node *dn = data; - - return (ahb->dev->of_node == dn) ? 1 : 0; -} - int tegra_ahb_enable_smmu(struct device_node *dn) { struct device *dev; u32 val; struct tegra_ahb *ahb; - dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn, - tegra_ahb_match_by_smmu); + dev = driver_find_device_by_of_node(&tegra_ahb_driver.driver, dn); if (!dev) return -EPROBE_DEFER; ahb = dev_get_drvdata(dev); diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 2e2efa577437..8c37294f1d1e 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI make the card work). config ATM_NICSTAR_USE_IDT77105 - bool "Use IDT77015 PHY driver (25Mbps)" + bool "Use IDT77105 PHY driver (25Mbps)" depends on ATM_NICSTAR help Support for the PHYsical layer chip in ForeRunner LE25 cards. In diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 9c0bb771751d..a2fcde582e2a 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -74,7 +74,7 @@ struct ht16k33_priv { struct ht16k33_fbdev fbdev; }; -static struct fb_fix_screeninfo ht16k33_fb_fix = { +static const struct fb_fix_screeninfo ht16k33_fb_fix = { .id = DRIVER_NAME, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, @@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo ht16k33_fb_var = { +static const struct fb_var_screeninfo ht16k33_fb_var = { .xres = HT16K33_MATRIX_LED_MAX_ROWS, .yres = HT16K33_MATRIX_LED_MAX_COLS, .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, diff --git a/drivers/base/bus.c b/drivers/base/bus.c index df3cac739813..a1d1e8256324 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -342,30 +342,6 @@ struct device *bus_find_device(struct bus_type *bus, } EXPORT_SYMBOL_GPL(bus_find_device); -static int match_name(struct device *dev, const void *data) -{ - const char *name = data; - - return sysfs_streq(name, dev_name(dev)); -} - -/** - * bus_find_device_by_name - device iterator for locating a particular device of a specific name - * @bus: bus type - * @start: Device to begin with - * @name: name of the device to match - * - * This is similar to the bus_find_device() function above, but it handles - * searching by a name automatically, no need to write another strcmp matching - * function. - */ -struct device *bus_find_device_by_name(struct bus_type *bus, - struct device *start, const char *name) -{ - return bus_find_device(bus, start, (void *)name, match_name); -} -EXPORT_SYMBOL_GPL(bus_find_device_by_name); - /** * subsys_find_device_by_id - find a device with a specific enumeration number * @subsys: subsystem diff --git a/drivers/base/core.c b/drivers/base/core.c index 1669d41fcddc..3d350b7ab060 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2932,13 +2932,6 @@ struct device *device_create_with_groups(struct class *class, } EXPORT_SYMBOL_GPL(device_create_with_groups); -static int __match_devt(struct device *dev, const void *data) -{ - const dev_t *devt = data; - - return dev->devt == *devt; -} - /** * device_destroy - removes a device that was created with device_create() * @class: pointer to the struct class that this device was registered with @@ -2951,7 +2944,7 @@ void device_destroy(struct class *class, dev_t devt) { struct device *dev; - dev = class_find_device(class, NULL, &devt, __match_devt); + dev = class_find_device_by_devt(class, devt); if (dev) { put_device(dev); device_unregister(dev); @@ -3422,8 +3415,38 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) } EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); +int device_match_name(struct device *dev, const void *name) +{ + return sysfs_streq(dev_name(dev), name); +} +EXPORT_SYMBOL_GPL(device_match_name); + int device_match_of_node(struct device *dev, const void *np) { return dev->of_node == np; } EXPORT_SYMBOL_GPL(device_match_of_node); + +int device_match_fwnode(struct device *dev, const void *fwnode) +{ + return dev_fwnode(dev) == fwnode; +} +EXPORT_SYMBOL_GPL(device_match_fwnode); + +int device_match_devt(struct device *dev, const void *pdevt) +{ + return dev->devt == *(dev_t *)pdevt; +} +EXPORT_SYMBOL_GPL(device_match_devt); + +int device_match_acpi_dev(struct device *dev, const void *adev) +{ + return ACPI_COMPANION(dev) == adev; +} +EXPORT_SYMBOL(device_match_acpi_dev); + +int device_match_any(struct device *dev, const void *unused) +{ + return 1; +} +EXPORT_SYMBOL_GPL(device_match_any); diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c index 09f28479b243..14e2178e09f8 100644 --- a/drivers/base/devcon.c +++ b/drivers/base/devcon.c @@ -12,9 +12,6 @@ static DEFINE_MUTEX(devcon_lock); static LIST_HEAD(devcon_list); -typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, - void *data); - static void * fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match) @@ -61,6 +58,34 @@ fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id, } /** + * fwnode_connection_find_match - Find connection from a device node + * @fwnode: Device node with the connection + * @con_id: Identifier for the connection + * @data: Data for the match function + * @match: Function to check and convert the connection description + * + * Find a connection with unique identifier @con_id between @fwnode and another + * device node. @match will be used to convert the connection description to + * data the caller is expecting to be returned. + */ +void *fwnode_connection_find_match(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match) +{ + void *ret; + + if (!fwnode || !match) + return NULL; + + ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); + if (ret) + return ret; + + return fwnode_devcon_match(fwnode, con_id, data, match); +} +EXPORT_SYMBOL_GPL(fwnode_connection_find_match); + +/** * device_connection_find_match - Find physical connection to a device * @dev: Device with the connection * @con_id: Identifier for the connection @@ -83,15 +108,9 @@ void *device_connection_find_match(struct device *dev, const char *con_id, if (!match) return NULL; - if (fwnode) { - ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); - if (ret) - return ret; - - ret = fwnode_devcon_match(fwnode, con_id, data, match); - if (ret) - return ret; - } + ret = fwnode_connection_find_match(fwnode, con_id, data, match); + if (ret) + return ret; mutex_lock(&devcon_lock); @@ -133,19 +152,13 @@ static struct bus_type *generic_match_buses[] = { NULL, }; -static int device_fwnode_match(struct device *dev, const void *fwnode) -{ - return dev_fwnode(dev) == fwnode; -} - static void *device_connection_fwnode_match(struct device_connection *con) { struct bus_type *bus; struct device *dev; for (bus = generic_match_buses[0]; bus; bus++) { - dev = bus_find_device(bus, NULL, (void *)con->fwnode, - device_fwnode_match); + dev = bus_find_device_by_fwnode(bus, con->fwnode); if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id))) return dev; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 600913aea73b..ad78c810666a 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1197,6 +1197,20 @@ struct bus_type platform_bus_type = { }; EXPORT_SYMBOL_GPL(platform_bus_type); +/** + * platform_find_device_by_driver - Find a platform device with a given + * driver. + * @start: The device to start the search from. + * @drv: The device driver to look for. + */ +struct device *platform_find_device_by_driver(struct device *start, + const struct device_driver *drv) +{ + return bus_find_device(&platform_bus_type, start, drv, + (void *)platform_match); +} +EXPORT_SYMBOL_GPL(platform_find_device_by_driver); + int __init platform_bus_init(void) { int error; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9bd4ddd12b25..5b248763a672 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg) thi->name[0], resource->name); + allow_kernel_signal(DRBD_SIGKILL); + allow_kernel_signal(SIGXCPU); restart: retval = thi->function(thi); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3327192bb71f..c8fb886aebd4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3038,6 +3038,17 @@ again: } return true; case RBD_OBJ_READ_PARENT: + /* + * The parent image is read only up to the overlap -- zero-fill + * from the overlap to the end of the request. + */ + if (!*result) { + u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req); + + if (obj_overlap < obj_req->ex.oe_len) + rbd_obj_zero_range(obj_req, obj_overlap, + obj_req->ex.oe_len - obj_overlap); + } return true; default: BUG(); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 8b33128dccee..0875470a7806 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev) return 0; } +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + bt_dev_dbg(hdev, "QCA pre shutdown cmd"); + + skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0, + NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); + static void qca_tlv_check_data(struct rome_config *config, const struct firmware *fw) { @@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config, BT_DBG("Length\t\t : %d bytes", length); config->dnld_mode = ROME_SKIP_EVT_NONE; + config->dnld_type = ROME_SKIP_EVT_NONE; switch (config->type) { case TLV_TYPE_PATCH: @@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev) evt = skb_put(skb, sizeof(*evt)); evt->ncmd = 1; - evt->opcode = QCA_HCI_CC_OPCODE; + evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE); skb_put_u8(skb, QCA_HCI_CC_SUCCESS); @@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev, */ if (config->dnld_type == ROME_SKIP_EVT_VSE_CC || config->dnld_type == ROME_SKIP_EVT_VSE) - return qca_inject_cmd_complete_event(hdev); + ret = qca_inject_cmd_complete_event(hdev); out: release_firmware(fw); @@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; } + /* Give the controller some time to get ready to receive the NVM */ + msleep(10); + /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (firmware_name) diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 6a291a7a5d96..69c5315a65fd 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -13,6 +13,7 @@ #define EDL_PATCH_TLV_REQ_CMD (0x1E) #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) #define MAX_SIZE_PER_TLV_SEGMENT (243) +#define QCA_PRE_SHUTDOWN_CMD (0xFC08) #define EDL_CMD_REQ_RES_EVT (0x00) #define EDL_PATCH_VER_RES_EVT (0x19) @@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, const char *firmware_name); int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) { return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; @@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) { return false; } + +static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} #endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3876fee6ad13..5cf0734eb31b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) fw_size = fw->size; /* The size of patch header is 30 bytes, should be skip */ - if (fw_size < 30) + if (fw_size < 30) { + err = -EINVAL; goto err_release_fw; + } fw_size -= 30; fw_ptr += 30; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 82a0a3691a63..9a970fd1975a 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu) unsigned long flags; struct qca_data *qca = hu->priv; - BT_DBG("hu %p want to sleep", hu); + BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); spin_lock_irqsave(&qca->hci_ibs_lock, flags); @@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu) break; case HCI_IBS_RX_ASLEEP: - /* Fall through */ + break; default: /* Any other state is illegal */ @@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb) if (hdr->evt == HCI_EV_VENDOR) complete(&qca->drop_ev_comp); - kfree(skb); + kfree_skb(skb); return 0; } @@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); + /* Perform pre shutdown command */ + qca_send_pre_shutdown_cmd(hdev); + qca_power_shutdown(hu); return 0; } diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 19d7b6ff2f17..20c957185af2 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell { size_t pdata_size; }; +static void hisi_lpc_acpi_remove(struct device *hostdev) +{ + struct acpi_device *adev = ACPI_COMPANION(hostdev); + struct acpi_device *child; + + device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev); + + list_for_each_entry(child, &adev->children, node) + acpi_device_clear_enumerated(child); +} + /* * hisi_lpc_acpi_probe - probe children for ACPI FW * @hostdev: LPC host device pointer @@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) return 0; fail: - device_for_each_child(hostdev, NULL, - hisi_lpc_acpi_remove_subdev); + hisi_lpc_acpi_remove(hostdev); return ret; } @@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev) { return -ENODEV; } + +static void hisi_lpc_acpi_remove(struct device *hostdev) +{ +} #endif // CONFIG_ACPI /* @@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev) range->fwnode = dev->fwnode; range->flags = LOGIC_PIO_INDIRECT; range->size = PIO_INDIRECT_SIZE; + range->hostdata = lpcdev; + range->ops = &hisi_lpc_ops; + lpcdev->io_host = range; ret = logic_pio_register_range(range); if (ret) { dev_err(dev, "register IO range failed (%d)!\n", ret); return ret; } - lpcdev->io_host = range; /* register the LPC host PIO resources */ if (acpi_device) ret = hisi_lpc_acpi_probe(dev); else ret = of_platform_populate(dev->of_node, NULL, NULL, dev); - if (ret) + if (ret) { + logic_pio_unregister_range(range); return ret; + } - lpcdev->io_host->hostdata = lpcdev; - lpcdev->io_host->ops = &hisi_lpc_ops; + dev_set_drvdata(dev, lpcdev); io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; dev_info(dev, "registered range [%pa - %pa]\n", @@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev) return ret; } +static int hisi_lpc_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct acpi_device *acpi_device = ACPI_COMPANION(dev); + struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev); + struct logic_pio_hwaddr *range = lpcdev->io_host; + + if (acpi_device) + hisi_lpc_acpi_remove(dev); + else + of_platform_depopulate(dev); + + logic_pio_unregister_range(range); + + return 0; +} + static const struct of_device_id hisi_lpc_of_match[] = { { .compatible = "hisilicon,hip06-lpc", }, { .compatible = "hisilicon,hip07-lpc", }, @@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = { .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), }, .probe = hisi_lpc_probe, + .remove = hisi_lpc_remove, }; builtin_platform_driver(hisi_lpc_driver); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index e6deabd8305d..2db474ab4c6b 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode) *best_mode = SYSC_IDLE_SMART_WKUP; else if (idlemodes & BIT(SYSC_IDLE_SMART)) *best_mode = SYSC_IDLE_SMART; - else if (idlemodes & SYSC_IDLE_FORCE) + else if (idlemodes & BIT(SYSC_IDLE_FORCE)) *best_mode = SYSC_IDLE_FORCE; else return -EINVAL; @@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 0xffff00f0, 0), - SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), @@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata) if (error) return 0; - if (val) - ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; - else - ddata->cfg.sysc_val = ddata->cap->sysc_mask; + ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; return 0; } @@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev) error = sysc_init_dts_quirks(ddata); if (error) - goto unprepare; + return error; error = sysc_map_and_check_registers(ddata); if (error) - goto unprepare; + return error; error = sysc_init_sysc_mask(ddata); if (error) - goto unprepare; + return error; error = sysc_init_idlemodes(ddata); if (error) - goto unprepare; + return error; error = sysc_init_syss_mask(ddata); if (error) - goto unprepare; + return error; error = sysc_init_pdata(ddata); if (error) - goto unprepare; + return error; sysc_init_early_quirks(ddata); @@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev) error = sysc_init_resets(ddata); if (error) - return error; + goto unprepare; error = sysc_init_module(ddata); if (error) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c0990703ce54..1c46babeb093 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name) return NULL; } +#ifdef CONFIG_OF +static int of_parse_clkspec(const struct device_node *np, int index, + const char *name, struct of_phandle_args *out_args); +static struct clk_hw * +of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); +#else +static inline int of_parse_clkspec(const struct device_node *np, int index, + const char *name, + struct of_phandle_args *out_args) +{ + return -ENOENT; +} +static inline struct clk_hw * +of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) +{ + return ERR_PTR(-ENOENT); +} +#endif + /** * clk_core_get - Find the clk_core parent of a clk * @core: clk to find parent of @@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name) * }; * * Returns: -ENOENT when the provider can't be found or the clk doesn't - * exist in the provider. -EINVAL when the name can't be found. NULL when the - * provider knows about the clk but it isn't provided on this system. + * exist in the provider or the name can't be found in the DT node or + * in a clkdev lookup. NULL when the provider knows about the clk but it + * isn't provided on this system. * A valid clk_core pointer when the clk can be found in the provider. */ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) @@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) struct device *dev = core->dev; const char *dev_id = dev ? dev_name(dev) : NULL; struct device_node *np = core->of_node; + struct of_phandle_args clkspec; - if (np && (name || index >= 0)) - hw = of_clk_get_hw(np, index, name); - - /* - * If the DT search above couldn't find the provider or the provider - * didn't know about this clk, fallback to looking up via clkdev based - * clk_lookups - */ - if (PTR_ERR(hw) == -ENOENT && name) + if (np && (name || index >= 0) && + !of_parse_clkspec(np, index, name, &clkspec)) { + hw = of_clk_get_hw_from_clkspec(&clkspec); + of_node_put(clkspec.np); + } else if (name) { + /* + * If the DT search above couldn't find the provider fallback to + * looking up via clkdev based clk_lookups. + */ hw = clk_find_hw(dev_id, name); + } if (IS_ERR(hw)) return ERR_CAST(hw); @@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index) parent = ERR_PTR(-EPROBE_DEFER); } else { parent = clk_core_get(core, index); - if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) + if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name) parent = clk_core_lookup(entry->name); } @@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core, break; /* Fallback to comparing globally unique names */ - if (!strcmp(parent->name, core->parents[i].name)) + if (core->parents[i].name && + !strcmp(parent->name, core->parents[i].name)) break; } diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c index 91db7894125d..65c82d922b05 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.c +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c @@ -14,7 +14,7 @@ #include "clk-exynos5-subcmu.h" static struct samsung_clk_provider *ctx; -static const struct exynos5_subcmu_info *cmu; +static const struct exynos5_subcmu_info **cmu; static int nr_cmus; static void exynos5_subcmu_clk_save(void __iomem *base, @@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx, * when OF-core populates all device-tree nodes. */ void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, - const struct exynos5_subcmu_info *_cmu) + const struct exynos5_subcmu_info **_cmu) { ctx = _ctx; cmu = _cmu; nr_cmus = _nr_cmus; for (; _nr_cmus--; _cmu++) { - exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, - _cmu->nr_gate_clks); - exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, - _cmu->nr_suspend_regs); + exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks, + (*_cmu)->nr_gate_clks); + exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs, + (*_cmu)->nr_suspend_regs); } } @@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev) if (of_property_read_string(np, "label", &name) < 0) continue; for (i = 0; i < nr_cmus; i++) - if (strcmp(cmu[i].pd_name, name) == 0) + if (strcmp(cmu[i]->pd_name, name) == 0) exynos5_clk_register_subcmu(&pdev->dev, - &cmu[i], np); + cmu[i], np); } return 0; } diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h index 755ee8aaa3de..9ae5356f25aa 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.h +++ b/drivers/clk/samsung/clk-exynos5-subcmu.h @@ -21,6 +21,6 @@ struct exynos5_subcmu_info { }; void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, - const struct exynos5_subcmu_info *cmu); + const struct exynos5_subcmu_info **cmu); #endif diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index f2b896881768..931c70a4da19 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = { .pd_name = "DISP1", }; +static const struct exynos5_subcmu_info *exynos5250_subcmus[] = { + &exynos5250_disp_subcmu, +}; + static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ @@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np) samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs)); - exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus), + exynos5250_subcmus); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 01bca5a498b2..7670cc596c74 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), - GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", - SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), + /* Maudio Block */ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", + GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", + GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { @@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = { /* GSCL Block */ DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), - /* MSCL Block */ - DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), - /* PSGEN */ DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), @@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), - /* Maudio Block */ - GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", - GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", - GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), - /* FSYS Block */ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), @@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", GATE_IP_GSCL1, 17, 0, 0), - /* MSCL Block */ - GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), - GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), - GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), - GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", - GATE_IP_MSCL, 8, 0, 0), - GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", - GATE_IP_MSCL, 9, 0, 0), - GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", - GATE_IP_MSCL, 10, 0, 0), - /* ISP */ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), @@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = { { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ }; -static const struct exynos5_subcmu_info exynos5x_subcmus[] = { - { - .div_clks = exynos5x_disp_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), - .gate_clks = exynos5x_disp_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), - .suspend_regs = exynos5x_disp_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), - .pd_name = "DISP", - }, { - .div_clks = exynos5x_gsc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), - .gate_clks = exynos5x_gsc_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), - .suspend_regs = exynos5x_gsc_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), - .pd_name = "GSC", - }, { - .div_clks = exynos5x_mfc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), - .gate_clks = exynos5x_mfc_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), - .suspend_regs = exynos5x_mfc_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), - .pd_name = "MFC", - }, +static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = { + /* MSCL Block */ + GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), + GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), + GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), + GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", + GATE_IP_MSCL, 8, 0, 0), + GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", + GATE_IP_MSCL, 9, 0, 0), + GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", + GATE_IP_MSCL, 10, 0, 0), +}; + +static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = { + DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), +}; + +static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = { + { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */ + { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */ + { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */ +}; + +static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = { + GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", + SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", + GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", + GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), +}; + +static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = { + { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */ +}; + +static const struct exynos5_subcmu_info exynos5x_disp_subcmu = { + .div_clks = exynos5x_disp_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), + .gate_clks = exynos5x_disp_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), + .suspend_regs = exynos5x_disp_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), + .pd_name = "DISP", +}; + +static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = { + .div_clks = exynos5x_gsc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), + .gate_clks = exynos5x_gsc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), + .suspend_regs = exynos5x_gsc_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), + .pd_name = "GSC", +}; + +static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { + .div_clks = exynos5x_mfc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), + .gate_clks = exynos5x_mfc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), + .suspend_regs = exynos5x_mfc_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), + .pd_name = "MFC", +}; + +static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = { + .div_clks = exynos5x_mscl_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks), + .gate_clks = exynos5x_mscl_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks), + .suspend_regs = exynos5x_mscl_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs), + .pd_name = "MSC", +}; + +static const struct exynos5_subcmu_info exynos5800_mau_subcmu = { + .gate_clks = exynos5800_mau_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks), + .suspend_regs = exynos5800_mau_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs), + .pd_name = "MAU", +}; + +static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { + &exynos5x_disp_subcmu, + &exynos5x_gsc_subcmu, + &exynos5x_mfc_subcmu, + &exynos5x_mscl_subcmu, +}; + +static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { + &exynos5x_disp_subcmu, + &exynos5x_gsc_subcmu, + &exynos5x_mfc_subcmu, + &exynos5x_mscl_subcmu, + &exynos5800_mau_subcmu, }; static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { @@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np, samsung_clk_extended_sleep_init(reg_base, exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); - if (soc == EXYNOS5800) + + if (soc == EXYNOS5800) { samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, ARRAY_SIZE(exynos5800_clk_regs)); - exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), - exynos5x_subcmus); + + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus), + exynos5800_subcmus); + } else { + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), + exynos5x_subcmus); + } samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c index 5c50e723ecae..1a191eeeebba 100644 --- a/drivers/clk/socfpga/clk-periph-s10.c +++ b/drivers/clk/socfpga/clk-periph-s10.c @@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk, if (socfpgaclk->fixed_div) { div = socfpgaclk->fixed_div; } else { - if (!socfpgaclk->bypass_reg) + if (socfpgaclk->hw.reg) div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); } diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index f79eede71c62..edefa669153f 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) unsigned long flags; unsigned int i; + /* If there's no device there's nothing to do */ + if (!ccp) + return 0; + spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 1; @@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp) unsigned long flags; unsigned int i; + /* If there's no device there's nothing to do */ + if (!ccp) + return 0; + spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 0; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 23e0a356f167..ad72b3f42ffa 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + /* Fall through */ case FSL_DMA_IP_83XX: chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->set_src_loop_size = fsl_chan_set_src_loop_size; diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index a13f224303c6..0221dee8dd4c 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr, return -EIO; } - if (!IS_ERR(conf->confd)) { + if (conf->confd) { if (!gpiod_get_raw_value_cansleep(conf->confd)) { dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); return -EIO; @@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi) return PTR_ERR(conf->status); } - conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); + conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN); if (IS_ERR(conf->confd)) { - dev_warn(&spi->dev, "Not using confd gpio: %ld\n", - PTR_ERR(conf->confd)); + dev_err(&spi->dev, "Failed to get confd gpio: %ld\n", + PTR_ERR(conf->confd)); + return PTR_ERR(conf->confd); + } else if (!conf->confd) { + dev_warn(&spi->dev, "Not using confd gpio"); } /* Register manager with unique name */ diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 80bd8f1b2aa6..4bab9028940a 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -19,11 +19,6 @@ static struct class *fpga_bridge_class; /* Lock for adding/removing bridges to linked lists*/ static spinlock_t bridge_list_lock; -static int fpga_bridge_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * fpga_bridge_enable - Enable transactions on the bridge * @@ -104,8 +99,7 @@ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, { struct device *dev; - dev = class_find_device(fpga_bridge_class, NULL, np, - fpga_bridge_of_node_match); + dev = class_find_device_by_of_node(fpga_bridge_class, np); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index c3866816456a..e05104f5e40c 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -482,11 +482,6 @@ struct fpga_manager *fpga_mgr_get(struct device *dev) } EXPORT_SYMBOL_GPL(fpga_mgr_get); -static int fpga_mgr_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr. * @@ -498,8 +493,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { struct device *dev; - dev = class_find_device(fpga_mgr_class, NULL, node, - fpga_mgr_of_node_match); + dev = class_find_device_by_of_node(fpga_mgr_class, node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c index 343153d47e5b..004dc03ccf09 100644 --- a/drivers/fsi/fsi-scom.c +++ b/drivers/fsi/fsi-scom.c @@ -38,8 +38,7 @@ #define SCOM_STATUS_PIB_RESP_MASK 0x00007000 #define SCOM_STATUS_PIB_RESP_SHIFT 12 -#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \ - SCOM_STATUS_PROTECTION | \ +#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \ SCOM_STATUS_PARITY | \ SCOM_STATUS_PIB_ABORT | \ SCOM_STATUS_PIB_RESP_MASK) @@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status) /* Return -EBUSY on PIB abort to force a retry */ if (status & SCOM_STATUS_PIB_ABORT) return -EBUSY; - if (status & SCOM_STATUS_ERR_SUMMARY) { - fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy, - sizeof(uint32_t)); - return -EIO; - } return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 567fb98c0892..9762dd6d99fa 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, /* Special handling for SPI GPIOs if used */ if (IS_ERR(desc)) desc = of_find_spi_gpio(dev, con_id, &of_flags); - if (IS_ERR(desc)) { + if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) { /* This quirk looks up flags and all */ desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); if (!IS_ERR(desc)) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f497003f119c..cca749010cd0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN | + GPIOLINE_FLAG_IS_OUT); if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE | + GPIOLINE_FLAG_IS_OUT); if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) return -EFAULT; @@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (status) goto err_remove_from_list; - status = gpiochip_irqchip_init_valid_mask(chip); - if (status) - goto err_remove_from_list; - status = gpiochip_alloc_valid_mask(chip); if (status) - goto err_remove_irqchip_mask; - - status = gpiochip_add_irqchip(chip, lock_key, request_key); - if (status) - goto err_free_gpiochip_mask; + goto err_remove_from_list; status = of_gpiochip_add(chip); if (status) - goto err_remove_chip; + goto err_free_gpiochip_mask; status = gpiochip_init_valid_mask(chip); if (status) @@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, machine_gpiochip_add(chip); + status = gpiochip_irqchip_init_valid_mask(chip); + if (status) + goto err_remove_acpi_chip; + + status = gpiochip_add_irqchip(chip, lock_key, request_key); + if (status) + goto err_remove_irqchip_mask; + /* * By first adding the chardev, and then adding the device, * we get a device node entry in sysfs under @@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (gpiolib_initialized) { status = gpiochip_setup_dev(gdev); if (status) - goto err_remove_acpi_chip; + goto err_remove_irqchip; } return 0; +err_remove_irqchip: + gpiochip_irqchip_remove(chip); +err_remove_irqchip_mask: + gpiochip_irqchip_free_valid_mask(chip); err_remove_acpi_chip: acpi_gpiochip_remove(chip); err_remove_of_chip: gpiochip_free_hogs(chip); of_gpiochip_remove(chip); -err_remove_chip: - gpiochip_irqchip_remove(chip); err_free_gpiochip_mask: gpiochip_free_valid_mask(chip); -err_remove_irqchip_mask: - gpiochip_irqchip_free_valid_mask(chip); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 9b384a94d2f3..3e35a8f2c5e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4e4094f842e7..8b26c970a3cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem); + if (p->post_deps) + return -EINVAL; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), GFP_KERNEL); p->num_post_deps = 0; @@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, - struct amdgpu_cs_chunk - *chunk) + struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; unsigned num_deps; @@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_syncobj); + if (p->post_deps) + return -EINVAL; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), GFP_KERNEL); p->num_post_deps = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f539a2a92774..7398b4850649 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity) { struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); - unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); - struct dma_fence *other = centity->fences[idx]; + struct dma_fence *other; + unsigned idx; + long r; - if (other) { - signed long r; - r = dma_fence_wait(other, true); - if (r < 0) { - if (r != -ERESTARTSYS) - DRM_ERROR("Error (%ld) waiting for fence!\n", r); + spin_lock(&ctx->ring_lock); + idx = centity->sequence & (amdgpu_sched_jobs - 1); + other = dma_fence_get(centity->fences[idx]); + spin_unlock(&ctx->ring_lock); - return r; - } - } + if (!other) + return 0; - return 0; + r = dma_fence_wait(other, true); + if (r < 0 && r != -ERESTARTSYS) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + + dma_fence_put(other); + return r; } void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 04b8ac4432c7..c066e1d3f981 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -596,14 +596,18 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - break; - if ((adev->gfx.rlc_fw_version != 106 && - adev->gfx.rlc_fw_version < 531) || - (adev->gfx.rlc_fw_version == 53815) || - (adev->gfx.rlc_feature_version < 1) || - !adev->gfx.rlc.is_rlc_v2_1) + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) + &&((adev->gfx.rlc_fw_version != 106 && + adev->gfx.rlc_fw_version < 531) || + (adev->gfx.rlc_fw_version == 53815) || + (adev->gfx.rlc_feature_version < 1) || + !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + + if (adev->pm.pp_feature & PP_GFXOFF_MASK) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 662612f89c70..9922bce3fd89 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 23265414d448..04fbf05d7176 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } - - if (adev->pm.pp_feature & PP_GFXOFF_MASK) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4a29f72334d0..45be7a2132bb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3131,13 +3131,25 @@ static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, const struct drm_connector_state *state) { - uint32_t bpc = connector->display_info.bpc; + uint8_t bpc = (uint8_t)connector->display_info.bpc; + + /* Assume 8 bpc by default if no bpc is specified. */ + bpc = bpc ? bpc : 8; if (!state) state = connector->state; if (state) { - bpc = state->max_bpc; + /* + * Cap display bpc based on the user requested value. + * + * The value for state->max_bpc may not correctly updated + * depending on when the connector gets added to the state + * or if this was called outside of atomic check, so it + * can't be used directly. + */ + bpc = min(bpc, state->max_requested_bpc); + /* Round down to the nearest even number. */ bpc = bpc - (bpc & 1); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f27c6fbb192e..90c4e87ac5ad 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, if (ret) return ret; - *query = metrics_table.CurrSocketPower << 8; + /* For the 40.46 release, they changed the value name */ + if (hwmgr->smu_version == 0x282e00) + *query = metrics_table.AverageSocketPower << 8; + else + *query = metrics_table.CurrSocketPower << 8; return ret; } @@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) data->dpm_table.soc_table.dpm_state.soft_max_level = data->dpm_table.soc_table.dpm_levels[soft_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_table.soc_table.dpm_state.soft_max_level = data->dpm_table.soc_table.dpm_levels[soft_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t soft_min_level, soft_max_level; int ret = 0; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); + /* gfxclk soft min/max settings */ + soft_min_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + soft_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; + + /* uclk soft min/max settings */ + soft_min_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + soft_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_levels[soft_min_level].value; + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_max_level].value; + + /* socclk soft min/max settings */ + soft_min_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); + soft_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); + + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_levels[soft_min_level].value; + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_max_level].value; + + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Max Levels!", return ret); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index a0f52c86d8c7..a78b2e295895 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -907,8 +907,6 @@ struct smu_funcs ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) #define smu_set_azalia_d3_pme(smu) \ ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) -#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ - ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) #define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) #define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 5fde5cf65b42..53097961bf2b 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; const struct smc_firmware_header_v1_0 *hdr; int ret, index; - uint32_t size; + uint32_t size = 0; + uint16_t atom_table_size; uint8_t frev, crev; void *table; uint16_t version_major, version_minor; @@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, powerplayinfo); - ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, + ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev, (uint8_t **)&table); if (ret) return ret; + size = atom_table_size; } if (!smu->smu_table.power_play_table) diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index dd6fd1c8bf24..6a14497257e4 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -3050,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) { + uint32_t smu_version; int ret = 0; SmuMetrics_t metrics; @@ -3060,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) if (ret) return ret; - *value = metrics.CurrSocketPower << 8; + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + /* For the 40.46 release, they changed the value name */ + if (smu_version == 0x282e00) + *value = metrics.AverageSocketPower << 8; + else + *value = metrics.CurrSocketPower << 8; return 0; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 5a118984de33..9d4d5075cc64 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -8,6 +8,7 @@ #include <linux/iommu.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #ifdef CONFIG_DEBUG_FS @@ -126,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np) pipe->of_output_port = of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); - pipe->of_node = np; + pipe->of_node = of_node_get(np); return 0; } @@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev) return mdev->irq; } + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) + return ret; + ret = 0; + for_each_available_child_of_node(np, child) { if (of_node_cmp(child->name, "pipeline") == 0) { ret = komeda_parse_pipe_dt(mdev, child); @@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev) mdev->n_pipelines = 0; + of_reserved_mem_device_release(dev); + if (funcs && funcs->cleanup) funcs->cleanup(mdev); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c index cd4d9f53ddef..c9a1edb9a000 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c @@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table, return NULL; } +u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier) +{ + u32 bpp; + + switch (info->format) { + case DRM_FORMAT_YUV420_8BIT: + bpp = 12; + break; + case DRM_FORMAT_YUV420_10BIT: + bpp = 15; + break; + default: + bpp = info->cpp[0] * 8; + break; + } + + return bpp; +} + /* Two assumptions * 1. RGB always has YTR * 2. Tiled RGB always has SC diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h index 3631910d33b5..32273cf18f7c 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h @@ -97,6 +97,9 @@ const struct komeda_format_caps * komeda_get_format_caps(struct komeda_format_caps_table *table, u32 fourcc, u64 modifier); +u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, + u64 modifier); + u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, u32 layer_type, u32 *n_fmts); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 3b0a70ed6aa0..1b01a625f40e 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file, struct drm_framebuffer *fb = &kfb->base; const struct drm_format_info *info = fb->format; struct drm_gem_object *obj; - u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; + u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp; u64 min_size; obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); @@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file, kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, alignment_header); + bpp = komeda_get_afbc_format_bpp(info, fb->modifier); kfb->afbc_size = kfb->offset_payload + n_blocks * - ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, + ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8, AFBC_SUPERBLK_ALIGNMENT); min_size = kfb->afbc_size + fb->offsets[0]; if (min_size > obj->size) { diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 419a8b0e5de8..69d9e26c60c8 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -14,6 +14,7 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> +#include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "komeda_dev.h" @@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); struct komeda_plane_state *kplane_st; struct drm_plane_state *plane_st; - struct drm_framebuffer *fb; struct drm_plane *plane; struct list_head zorder_list; int order = 0, err; @@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, list_for_each_entry(kplane_st, &zorder_list, zlist_node) { plane_st = &kplane_st->base; - fb = plane_st->fb; plane = plane_st->plane; plane_st->normalized_zpos = order++; @@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_st, *new_crtc_st; + struct drm_crtc_state *new_crtc_st; int i, err; err = drm_atomic_helper_check_modeset(dev, state); @@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev, * so need to add all affected_planes (even unchanged) to * drm_atomic_state. */ - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { + for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) { err = drm_atomic_add_affected_planes(state, crtc); if (err) return err; @@ -307,24 +306,33 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) komeda_kms_irq_handler, IRQF_SHARED, drm->driver->name, drm); if (err) - goto cleanup_mode_config; + goto free_component_binding; err = mdev->funcs->enable_irq(mdev); if (err) - goto cleanup_mode_config; + goto free_component_binding; drm->irq_enabled = true; + drm_kms_helper_poll_init(drm); + err = drm_dev_register(drm, 0); if (err) - goto cleanup_mode_config; + goto free_interrupts; return kms; -cleanup_mode_config: +free_interrupts: + drm_kms_helper_poll_fini(drm); drm->irq_enabled = false; + mdev->funcs->disable_irq(mdev); +free_component_binding: + component_unbind_all(mdev->dev, drm); +cleanup_mode_config: drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); + drm->dev_private = NULL; + drm_dev_put(drm); free_kms: kfree(kms); return ERR_PTR(err); @@ -335,12 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms) struct drm_device *drm = &kms->base; struct komeda_dev *mdev = drm->dev_private; + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); drm->irq_enabled = false; mdev->funcs->disable_irq(mdev); - drm_dev_unregister(drm); component_unbind_all(mdev->dev, drm); - komeda_kms_cleanup_private_objs(kms); drm_mode_config_cleanup(drm); + komeda_kms_cleanup_private_objs(kms); drm->dev_private = NULL; drm_dev_put(drm); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h index a90bcbb3cb23..14b683164544 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe, struct seq_file *sf); /* component APIs */ +extern __printf(10, 11) struct komeda_component * komeda_component_add(struct komeda_pipeline *pipe, size_t comp_sz, u32 id, u32 hw_id, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index 617e1f7b8472..2851cac94d86 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, if (!kcrtc->master->wb_layer) return 0; - kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL); + kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL); if (!kwb_conn) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index ad19df0686c9..bd2498bbd74a 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -93,11 +93,6 @@ static struct bus_type mipi_dsi_bus_type = { .pm = &mipi_dsi_device_pm_ops, }; -static int of_device_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a * device tree node @@ -110,7 +105,7 @@ struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np) { struct device *dev; - dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match); + dev = bus_find_device_by_of_node(&mipi_dsi_bus_type, np); return dev ? to_mipi_dsi_device(dev) : NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 58baf49d9926..badab94be2d6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -242,9 +242,7 @@ static struct component_match *exynos_drm_match_add(struct device *dev) if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER)) continue; - while ((d = bus_find_device(&platform_bus_type, p, - &info->driver->driver, - (void *)platform_bus_type.match))) { + while ((d = platform_find_device_by_driver(p, &info->driver->driver))) { put_device(p); if (!(info->flags & DRM_FIMC_DEVICE) || @@ -412,9 +410,8 @@ static void exynos_drm_unregister_devices(void) if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE)) continue; - while ((dev = bus_find_device(&platform_bus_type, NULL, - &info->driver->driver, - (void *)platform_bus_type.match))) { + while ((dev = platform_find_device_by_driver(NULL, + &info->driver->driver))) { put_device(dev); platform_device_unregister(to_platform_device(dev)); } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 7925a176f900..1cb1fa74cfbc 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) else if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); - else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) - dotclock = pipe_config->port_clock * 2 / 3; + else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) + dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp; else dotclock = pipe_config->port_clock; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 60652ebbdf61..18e4cba76720 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -539,7 +539,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - drm_connector_attach_max_bpc_property(connector, 6, 12); + + /* + * Reuse the prop from the SST connector because we're + * not allowed to create new props after device registration. + */ + connector->max_bpc_property = + intel_dp->attached_connector->base.max_bpc_property; + if (connector->max_bpc_property) + drm_connector_attach_max_bpc_property(connector, 6, 12); return connector; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index ffec807b8960..f413904a3e96 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); DRM_INFO("PPS2 = 0x%08x\n", pps_val); - if (encoder->type == INTEL_OUTPUT_EDP) { + if (cpu_transcoder == TRANSCODER_EDP) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f62e3397d936..bac1ee94f63f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(pdev); + /* + * We don't have a max segment size, so set it to the max so sg's + * debugging layer doesn't complain + */ + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN(dev_priv, 2)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 94d3992b599d..724627afdedc 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info; static void vgt_deballoon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node) { + if (!drm_mm_node_allocated(node)) + return; + DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", node->start, node->start + node->size, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1d58f7ec5d84..f11979879e7b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -829,7 +829,7 @@ struct intel_crtc_state { /* * Frequence the dpll for the port should run at. Differs from the - * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also + * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also * already multiplied by pixel_multiplier. */ int port_clock; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index baf63fb6850a..c07abf9e201c 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -477,8 +477,7 @@ static int mcde_probe(struct platform_device *pdev) struct device_driver *drv = &mcde_component_drivers[i]->driver; struct device *p = NULL, *d; - while ((d = bus_find_device(&platform_bus_type, p, drv, - (void *)platform_bus_type.match))) { + while ((d = platform_find_device_by_driver(p, drv))) { put_device(p); component_match_add(dev, &match, mcde_compare_dev, d); p = d; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 95fdbd0fbcac..945bc20f1d33 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -17,6 +17,7 @@ #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> +#include <linux/dma-mapping.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp.h" @@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct mtk_drm_private *private = drm->dev_private; struct platform_device *pdev; struct device_node *np; + struct device *dma_dev; int ret; if (!iommu_present(&platform_bus_type)) @@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm) goto err_component_unbind; } - private->dma_dev = &pdev->dev; + dma_dev = &pdev->dev; + private->dma_dev = dma_dev; + + /* + * Configure the DMA segment size to make sure we get contiguous IOVA + * when importing PRIME buffers. + */ + if (!dma_dev->dma_parms) { + private->dma_parms_allocated = true; + dma_dev->dma_parms = + devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), + GFP_KERNEL); + } + if (!dma_dev->dma_parms) { + ret = -ENOMEM; + goto err_component_unbind; + } + + ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); + if (ret) { + dev_err(dma_dev, "Failed to set DMA segment size\n"); + goto err_unset_dma_parms; + } /* * We don't use the drm_irq_install() helpers provided by the DRM @@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) - goto err_component_unbind; + goto err_unset_dma_parms; drm_kms_helper_poll_init(drm); drm_mode_config_reset(drm); return 0; +err_unset_dma_parms: + if (private->dma_parms_allocated) + dma_dev->dma_parms = NULL; err_component_unbind: component_unbind_all(drm->dev, drm); err_config_cleanup: @@ -302,9 +329,14 @@ err_config_cleanup: static void mtk_drm_kms_deinit(struct drm_device *drm) { + struct mtk_drm_private *private = drm->dev_private; + drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); + if (private->dma_parms_allocated) + private->dma_dev->dma_parms = NULL; + component_unbind_all(drm->dev, drm); drm_mode_config_cleanup(drm); } @@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = { .compat_ioctl = drm_compat_ioctl, }; +/* + * We need to override this because the device used to import the memory is + * not dev->dev, as drm_gem_prime_import() expects. + */ +struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct mtk_drm_private *private = dev->dev_private; + + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); +} + static struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, @@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, + .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, @@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev) comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; + of_node_put(node); goto err_node; } ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); - if (ret) + if (ret) { + of_node_put(node); goto err_node; + } private->ddp_comp[comp_id] = comp; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 598ff3e70446..e03fea12ff59 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -51,6 +51,8 @@ struct mtk_drm_private { } commit; struct drm_atomic_state *suspend_state; + + bool dma_parms_allocated; }; extern struct platform_driver mtk_ddp_driver; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index b4e7404fe660..a11637b0f6cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) u8 *ptr = msg->buf; while (remaining) { - u8 cnt = (remaining > 16) ? 16 : remaining; - u8 cmd; + u8 cnt, retries, cmd; if (msg->flags & I2C_M_RD) cmd = 1; @@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if (mcnt || remaining > 16) cmd |= 4; /* MOT */ - ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); - if (ret < 0) { - nvkm_i2c_aux_release(aux); - return ret; + for (retries = 0, cnt = 0; + retries < 32 && !cnt; + retries++) { + cnt = min_t(u8, remaining, 16); + ret = aux->func->xfer(aux, true, cmd, + msg->addr, ptr, &cnt); + if (ret < 0) + goto out; + } + if (!cnt) { + AUX_TRACE(aux, "no data after 32 retries"); + ret = -EIO; + goto out; } ptr += cnt; @@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) msg++; } + ret = num; +out: nvkm_i2c_aux_release(aux); - return num; + return ret; } static u32 diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index de0f882f0f7b..14b41de44ebc 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -4,6 +4,7 @@ * Author: Archit Taneja <archit@ti.com> */ +#include <linux/bitops.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out) { struct device_node *remote_node; - remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); + remote_node = of_graph_get_remote_node(out->dev->of_node, + ffs(out->of_ports) - 1, 0); if (!remote_node) { dev_dbg(out->dev, "failed to find video sink\n"); return 0; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 288c59dae56a..1bad0a2cc5c6 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev) if (omapdss_is_initialized() == false) return -EPROBE_DEFER; - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "Failed to set the DMA mask\n"); return ret; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index f33e349c4ec5..952201c6d821 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400); static struct drm_driver qxl_driver; static struct pci_driver qxl_pci_driver; +static bool is_vga(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_DISPLAY_VGA << 8; +} + static int qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto disable_pci; + if (is_vga(pdev)) { + ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO); + if (ret) { + DRM_ERROR("can't get legacy vga ioports\n"); + goto disable_pci; + } + } + ret = qxl_device_init(qdev, &qxl_driver, pdev); if (ret) - goto disable_pci; + goto put_vga; ret = qxl_modeset_init(qdev); if (ret) @@ -105,6 +118,9 @@ modeset_cleanup: qxl_modeset_fini(qdev); unload: qxl_device_fini(qdev); +put_vga: + if (is_vga(pdev)) + vga_put(pdev, VGA_RSRC_LEGACY_IO); disable_pci: pci_disable_device(pdev); free_dev: @@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev) qxl_modeset_fini(qdev); qxl_device_fini(qdev); + if (is_vga(pdev)) + vga_put(pdev, VGA_RSRC_LEGACY_IO); dev->dev_private = NULL; kfree(qdev); diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 1c62578590f4..082d02c84024 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds) /* Locate the companion LVDS encoder for dual-link operation, if any. */ companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); - if (!companion) { - dev_err(dev, "Companion LVDS encoder not found\n"); - return -ENXIO; - } + if (!companion) + return 0; /* * Sanity check: the companion encoder must have the same compatible diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 53d2c5bd61dc..38dc26376961 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -330,8 +330,7 @@ static struct component_match *rockchip_drm_match_add(struct device *dev) struct device *p = NULL, *d; do { - d = bus_find_device(&platform_bus_type, p, &drv->driver, - (void *)platform_bus_type.match); + d = platform_find_device_by_driver(p, &drv->driver); put_device(p); p = d; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 64c43ee6bd92..df0cc8f46d7b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon, /* R and B components are only 5 bits deep */ val |= SUN4I_TCON0_FRM_CTL_MODE_R; val |= SUN4I_TCON0_FRM_CTL_MODE_B; + /* Fall through */ case MEDIA_BUS_FMT_RGB666_1X18: case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: /* Fall through: enable dithering */ diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index a1fc8b520985..b889ad3e86e1 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host, ret = sun6i_dsi_dcs_read(dsi, msg); break; } + /* Else, fall through */ default: ret = -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index bf11930e40e1..1551c8253bec 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -237,8 +237,7 @@ static void vc4_match_add_drivers(struct device *dev, struct device_driver *drv = &drivers[i]->driver; struct device *p = NULL, *d; - while ((d = bus_find_device(&platform_bus_type, p, drv, - (void *)platform_bus_type.match))) { + while ((d = platform_find_device_by_driver(p, drv))) { put_device(p); component_match_add(dev, match, compare_dev, d); p = d; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index b2da31310d24..09b526518f5a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, .interruptible = false, .no_wait_gpu = false }; + size_t max_segment; /* wtf swapping */ if (bo->pages) @@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, if (!bo->pages) goto out; - ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); + max_segment = virtio_max_dma_size(qdev->vdev); + max_segment &= PAGE_MASK; + if (max_segment > SCATTERLIST_MAX_SEGMENT) + max_segment = SCATTERLIST_MAX_SEGMENT; + ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); if (ret) goto out; return 0; diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 2310c96ccf4a..db1b55df0d13 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); - cp2112_gpio_direction_input(gc, d->hwirq); - if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); @@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, return PTR_ERR(dev->desc[pin]); } + ret = cp2112_gpio_direction_input(&dev->gc, pin); + if (ret < 0) { + dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n"); + goto err_desc; + } + ret = gpiochip_lock_as_irq(&dev->gc, pin); if (ret) { dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 21268c9fa71a..0179f7ed77e5 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = { { L27MHZ_DEVICE(HID_ANY_ID) }, - { /* Logitech G203/Prodigy Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) }, - { /* Logitech G302 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) }, - { /* Logitech G303 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) }, - { /* Logitech G400 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) }, { /* Logitech G403 Wireless Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, - { /* Logitech G403 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) }, - { /* Logitech G403 Hero Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) }, - { /* Logitech G502 Proteus Core Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) }, - { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) }, - { /* Logitech G502 Hero Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) }, - { /* Logitech G700 Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) }, - { /* Logitech G700s Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) }, { /* Logitech G703 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, { /* Logitech G703 Hero Gaming Mouse over USB */ diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 1065692f90e2..5792a104000a 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,6 +24,7 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define EHL_Ax_DEVICE_ID 0x4BB3 #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index aa80b4d3b740..279567baca3d 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 7a8ddc999a8e..1713235d28cb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom) y >>= 1; distance >>= 1; } + if (features->type == INTUOSHT2) + distance = features->distance_max - distance; input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_DISTANCE, distance); @@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len) input_report_key(input, BTN_BASE2, (data[11] & 0x02)); if (data[12] & 0x80) - input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); + input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1); else input_report_abs(input, ABS_WHEEL, 0); @@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) } if (wacom->tool[0]) { input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); - if (wacom->features.type == INTUOSP2_BT) { + if (wacom->features.type == INTUOSP2_BT || + wacom->features.type == INTUOSP2S_BT) { input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max); } else { diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 5f9505a087f6..23f358cb7f49 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -26,7 +26,7 @@ static unsigned long virt_to_hvpfn(void *addr) { - unsigned long paddr; + phys_addr_t paddr; if (is_vmalloc_addr(addr)) paddr = page_to_phys(vmalloc_to_page(addr)) + diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 362e70e9d145..fb16a622e8ab 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -146,8 +146,6 @@ struct hv_context { */ u64 guestid; - void *tsc_page; - struct hv_per_cpu_context __percpu *cpu_context; /* diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c index dad7d96c5943..3c5bee429105 100644 --- a/drivers/hwtracing/coresight/coresight-platform.c +++ b/drivers/hwtracing/coresight/coresight-platform.c @@ -37,11 +37,6 @@ static int coresight_alloc_conns(struct device *dev, return 0; } -int coresight_device_fwnode_match(struct device *dev, const void *fwnode) -{ - return dev_fwnode(dev) == fwnode; -} - static struct device * coresight_find_device_by_fwnode(struct fwnode_handle *fwnode) { @@ -51,8 +46,7 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode) * If we have a non-configurable replicator, it will be found on the * platform bus. */ - dev = bus_find_device(&platform_bus_type, NULL, - fwnode, coresight_device_fwnode_match); + dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); if (dev) return dev; @@ -60,8 +54,7 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode) * We have a configurable component - circle through the AMBA bus * looking for the device that matches the endpoint node. */ - return bus_find_device(&amba_bustype, NULL, - fwnode, coresight_device_fwnode_match); + return bus_find_device_by_fwnode(&amba_bustype, fwnode); } #ifdef CONFIG_OF diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 7d401790dd7e..61d7f9ff054d 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -202,6 +202,4 @@ static inline void *coresight_get_uci_data(const struct amba_id *id) void coresight_release_platform_data(struct coresight_platform_data *pdata); -int coresight_device_fwnode_match(struct device *dev, const void *fwnode); - #endif diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 55db77f6410b..6453c67a4d01 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -1046,9 +1046,7 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) struct coresight_connection *conn = &csdev->pdata->conns[i]; struct device *dev = NULL; - dev = bus_find_device(&coresight_bustype, NULL, - (void *)conn->child_fwnode, - coresight_device_fwnode_match); + dev = bus_find_device_by_fwnode(&coresight_bustype, conn->child_fwnode); if (dev) { conn->child_dev = to_coresight_device(dev); /* and put reference from 'bus_find_device()' */ diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 55922896d862..d5c1821b31c6 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -789,12 +789,6 @@ static int intel_th_populate(struct intel_th *th) return 0; } -static int match_devt(struct device *dev, const void *data) -{ - dev_t devt = (dev_t)(unsigned long)(void *)data; - return dev->devt == devt; -} - static int intel_th_output_open(struct inode *inode, struct file *file) { const struct file_operations *fops; @@ -802,9 +796,7 @@ static int intel_th_output_open(struct inode *inode, struct file *file) struct device *dev; int err; - dev = bus_find_device(&intel_th_bus, NULL, - (void *)(unsigned long)inode->i_rdev, - match_devt); + dev = bus_find_device_by_devt(&intel_th_bus, inode->i_rdev); if (!dev || !dev->driver) return -ENODEV; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c0378c3de9a4..91dfeba62485 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)0, }, { + /* Lewisburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226), + .driver_data = (kernel_ulong_t)0, + }, + { /* Gemini Lake */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Tiger Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index e55b902560de..603b83ac5085 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -89,13 +89,6 @@ static struct class stm_class = { .dev_groups = stm_groups, }; -static int stm_dev_match(struct device *dev, const void *data) -{ - const char *name = data; - - return sysfs_streq(name, dev_name(dev)); -} - /** * stm_find_device() - find stm device by name * @buf: character buffer containing the name @@ -116,7 +109,7 @@ struct stm_device *stm_find_device(const char *buf) if (!stm_core_up) return NULL; - dev = class_find_device(&stm_class, NULL, buf, stm_dev_match); + dev = class_find_device_by_name(&stm_class, buf); if (!dev) return NULL; @@ -1276,7 +1269,6 @@ int stm_source_register_device(struct device *parent, err: put_device(&src->dev); - kfree(src); return err; } diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c index c7fe3b44a860..5e4800d72e00 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c @@ -457,18 +457,12 @@ static struct pci_driver amd_mp2_pci_driver = { }; module_pci_driver(amd_mp2_pci_driver); -static int amd_mp2_device_match(struct device *dev, const void *data) -{ - return 1; -} - struct amd_mp2_dev *amd_mp2_find_device(void) { struct device *dev; struct pci_dev *pci_dev; - dev = driver_find_device(&amd_mp2_pci_driver.driver, NULL, NULL, - amd_mp2_device_match); + dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL); if (!dev) return NULL; diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index d7fd76baec92..19ef2b0c682a 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -790,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) { - u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + u32 val; + + /* We do not support the SMBUS Quick command */ + val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); if (adap->algo->reg_slave) val |= I2C_FUNC_SLAVE; diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index e7f9305b2dd9..f5f001738df5 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) dev->disable_int(dev); dev->disable(dev); + synchronize_irq(dev->irq); dev->slave = NULL; pm_runtime_put(dev->dev); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index f2956936c3f2..2e08b4722dc4 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle, int i; status = acpi_get_object_info(obj_handle, &info); - if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID)) + if (ACPI_FAILURE(status)) return AE_OK; + if (!(info->valid & ACPI_VALID_HID)) + goto smo88xx_not_found; + hid = info->hardware_id.string; if (!hid) - return AE_OK; + goto smo88xx_not_found; i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); if (i < 0) - return AE_OK; + goto smo88xx_not_found; + + kfree(info); *((bool *)return_value) = true; return AE_CTRL_TERMINATE; + +smo88xx_not_found: + kfree(info); + return AE_OK; } static bool is_dell_system_with_lis3lv02d(void) diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 252edb433fdf..29eae1bf4f86 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = { .max_num_msgs = 255, }; +static const struct i2c_adapter_quirks mt8183_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + static const struct mtk_i2c_compatible mt2712_compat = { .regs = mt_i2c_regs_v1, .pmic_i2c = 0, @@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = { }; static const struct mtk_i2c_compatible mt8183_compat = { + .quirks = &mt8183_i2c_quirks, .regs = mt_i2c_regs_v2, .pmic_i2c = 0, .dcm = 0, @@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id) static u32 mtk_i2c_functionality(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN) + return I2C_FUNC_I2C | + (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); + else + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm mtk_i2c_algorithm = { diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index c46c4bddc7ca..cba325eb852f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -91,7 +91,7 @@ #define SB800_PIIX4_PORT_IDX_MASK 0x06 #define SB800_PIIX4_PORT_IDX_SHIFT 1 -/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ +/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 @@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, /* Find which register is used for port selection */ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { - switch (PIIX4_dev->device) { - case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: + if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS || + (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && + PIIX4_dev->revision >= 0x1F)) { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; - break; - case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS: - default: + } else { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; - break; } } else { if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 4dbbc9a35f65..bc80aafb521f 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -354,17 +354,11 @@ static int i2c_acpi_find_match_adapter(struct device *dev, const void *data) return ACPI_HANDLE(dev) == (acpi_handle)data; } -static int i2c_acpi_find_match_device(struct device *dev, const void *data) -{ - return ACPI_COMPANION(dev) == data; -} struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { - struct device *dev; + struct device *dev = bus_find_device_by_acpi_dev(&i2c_bus_type, handle); - dev = bus_find_device(&i2c_bus_type, NULL, handle, - i2c_acpi_find_match_adapter); return dev ? i2c_verify_adapter(dev) : NULL; } EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle); @@ -373,8 +367,7 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { struct device *dev; - dev = bus_find_device(&i2c_bus_type, NULL, adev, - i2c_acpi_find_match_device); + dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev); return dev ? i2c_verify_client(dev) : NULL; } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f26ed495d384..9c440fa6a3dd 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device); */ void i2c_unregister_device(struct i2c_client *client) { - if (!client) + if (IS_ERR_OR_NULL(client)) return; if (client->dev.of_node) { diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index d1c48dec7118..6f632d543fcc 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -113,11 +113,6 @@ void of_i2c_register_devices(struct i2c_adapter *adap) of_node_put(bus); } -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - static int of_dev_or_parent_node_match(struct device *dev, const void *data) { if (dev->of_node == data) @@ -135,7 +130,7 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) struct device *dev; struct i2c_client *client; - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); + dev = bus_find_device_by_of_node(&i2c_bus_type, node); if (!dev) return NULL; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 19f1730a4f24..a68d0ccf67a4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4724,10 +4724,14 @@ static int __init cma_init(void) if (ret) goto err; - cma_configfs_init(); + ret = cma_configfs_init(); + if (ret) + goto err_ib; return 0; +err_ib: + ib_unregister_client(&cma_client); err: unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index b79890739a2c..af8c85d18e62 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, struct auto_mode_param *param = &counter->mode.param; bool match = true; - if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) + if (!rdma_is_visible_in_pid_ns(&qp->res)) return false; - /* Ensure that counter belong to right PID */ - if (!rdma_is_kernel_res(&counter->res) && - !rdma_is_kernel_res(&qp->res) && - (task_pid_vnr(counter->res.task) != current->pid)) + /* Ensure that counter belongs to the right PID */ + if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task)) return false; if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) @@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num) return qp; err: - rdma_restrack_put(&qp->res); + rdma_restrack_put(res); return NULL; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 87d40d1ecdde..020c26976558 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device) for (i = 0; i < RDMA_RESTRACK_MAX; i++) { if (!names[i]) continue; - curr = rdma_restrack_count(device, i, - task_active_pid_ns(current)); + curr = rdma_restrack_count(device, i); ret = fill_res_info_entry(msg, names[i], curr); if (ret) goto err; diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index bddff426ee0f..a07665f7ef8c 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev) * rdma_restrack_count() - the current usage of specific object * @dev: IB device * @type: actual type of object to operate - * @ns: PID namespace */ -int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, - struct pid_namespace *ns) +int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type) { struct rdma_restrack_root *rt = &dev->res[type]; struct rdma_restrack_entry *e; @@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, xa_lock(&rt->xa); xas_for_each(&xas, e, U32_MAX) { - if (ns == &init_pid_ns || - (!rdma_is_kernel_res(e) && - ns == task_active_pid_ns(e->task))) - cnt++; + if (!rdma_is_visible_in_pid_ns(e)) + continue; + cnt++; } xa_unlock(&rt->xa); return cnt; @@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res) */ if (rdma_is_kernel_res(res)) return task_active_pid_ns(current) == &init_pid_ns; - return task_active_pid_ns(current) == task_active_pid_ns(res->task); + + /* PID 0 means that resource is not found in current namespace */ + return task_pid_vnr(res->task); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 08da840ed7ee..56553668256f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release); int ib_umem_page_count(struct ib_umem *umem) { - int i; - int n; + int i, n = 0; struct scatterlist *sg; - if (umem->is_odp) - return ib_umem_num_pages(umem); - - n = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) n += sg_dma_len(sg) >> PAGE_SHIFT; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 48b04d2f175f..60c8f76aab33 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, spin_unlock_irqrestore(&cmdq->lock, flags); return -EBUSY; } + + size = req->cmd_size; + /* change the cmd_size to the number of 16byte cmdq unit. + * req->cmd_size is modified here + */ + bnxt_qplib_set_cmd_slots(req); + memset(resp, 0, sizeof(*resp)); crsqe->resp = (struct creq_qp_event *)resp; crsqe->resp->cookie = req->cookie; @@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; - size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; do { /* Locate the next cmdq slot */ sw_prod = HWQ_CMP(cmdq->prod, cmdq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 2138533bb642..dfeadc192e17 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -55,9 +55,7 @@ do { \ memset(&(req), 0, sizeof((req))); \ (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ - (req).cmd_size = (sizeof((req)) + \ - BNXT_QPLIB_CMDQE_UNITS - 1) / \ - BNXT_QPLIB_CMDQE_UNITS; \ + (req).cmd_size = sizeof((req)); \ (req).flags = cpu_to_le16(cmd_flags); \ } while (0) @@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth) BNXT_QPLIB_CMDQE_UNITS); } +/* Set the cmd_size to a factor of CMDQE unit */ +static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) +{ + req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; +} + #define MAX_CMDQ_IDX(depth) ((depth) - 1) static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 93613e5def9b..986c12153e62 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, if (!data) return -ENOMEM; copy = min(len, datalen - 1); - if (copy_from_user(data, buf, copy)) - return -EFAULT; + if (copy_from_user(data, buf, copy)) { + ret = -EFAULT; + goto free_data; + } ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; ptr = data; token = ptr; for (ptr = data; *ptr; ptr = end + 1, token = ptr) { @@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, ret = len; debugfs_file_put(file->f_path.dentry); +free_data: kfree(data); return ret; } @@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, return -ENOMEM; ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; bit = find_first_bit(fault->opcodes, bitsize); while (bit < bitsize) { zero = find_next_zero_bit(fault->opcodes, bitsize, bit); @@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, data[size - 1] = '\n'; data[size] = '\0'; ret = simple_read_from_buffer(buf, len, pos, data, size); +free_data: kfree(data); return ret; } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 996fc298207e..6141f4edc6bf 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp) hfi1_kern_clear_hw_flow(priv->rcd, qp); } -static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, - struct hfi1_packet *packet, u8 rcv_type, - u8 opcode) +static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type) { struct rvt_qp *qp = packet->qp; - struct hfi1_qp_priv *qpriv = qp->priv; - u32 ipsn; - struct ib_other_headers *ohdr = packet->ohdr; - struct rvt_ack_entry *e; - struct tid_rdma_request *req; - struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); - u32 i; if (rcv_type >= RHF_RCV_TYPE_IB) goto done; @@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, if (rcv_type == RHF_RCV_TYPE_EAGER) { hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); hfi1_schedule_send(qp); - goto done_unlock; - } - - /* - * For TID READ response, error out QP after freeing the tid - * resources. - */ - if (opcode == TID_OP(READ_RESP)) { - ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn)); - if (cmp_psn(ipsn, qp->s_last_psn) > 0 && - cmp_psn(ipsn, qp->s_psn) < 0) { - hfi1_kern_read_tid_flow_free(qp); - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - goto done; - } - goto done_unlock; - } - - /* - * Error out the qp for TID RDMA WRITE - */ - hfi1_kern_clear_hw_flow(qpriv->rcd, qp); - for (i = 0; i < rvt_max_atomic(rdi); i++) { - e = &qp->s_ack_queue[i]; - if (e->opcode == TID_OP(WRITE_REQ)) { - req = ack_to_tid_req(e); - hfi1_kern_exp_rcv_clear_all(req); - } } - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_LEN_ERR); - goto done; -done_unlock: + /* Since no payload is delivered, just drop the packet */ spin_unlock(&qp->s_lock); done: return true; @@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, u32 fpsn; lockdep_assert_held(&qp->r_lock); + spin_lock(&qp->s_lock); /* If the psn is out of valid range, drop the packet */ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || cmp_psn(ibpsn, qp->s_psn) > 0) - return ret; + goto s_unlock; - spin_lock(&qp->s_lock); /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued @@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, wqe = do_rc_completion(qp, wqe, ibp); if (qp->s_acked == qp->s_tail) - break; + goto s_unlock; } + if (qp->s_acked == qp->s_tail) + goto s_unlock; + /* Handle the eflags for the request */ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) goto s_unlock; @@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, if (lnh == HFI1_LRH_GRH) goto r_unlock; - if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) + if (tid_rdma_tid_err(packet, rcv_type)) goto r_unlock; } @@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, */ spin_lock(&qp->s_lock); qpriv = qp->priv; + if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID || + qpriv->r_tid_tail == qpriv->r_tid_head) + goto unlock; e = &qp->s_ack_queue[qpriv->r_tid_tail]; + if (e->opcode != TID_OP(WRITE_REQ)) + goto unlock; req = ack_to_tid_req(e); + if (req->comp_seg == req->cur_seg) + goto unlock; flow = &req->flows[req->clear_tail]; trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); @@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) struct rvt_swqe *wqe; struct tid_rdma_request *req; struct tid_rdma_flow *flow; - u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; + u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn; unsigned long flags; u16 fidx; @@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) ack_kpsn--; } + if (unlikely(qp->s_acked == qp->s_tail)) + goto ack_op_err; + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) @@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); /* Drop stale ACK/NAK */ - if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) + if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 || + cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0) goto ack_op_err; while (cmp_psn(ack_kpsn, @@ -4712,7 +4685,12 @@ done: switch ((aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ + if (!req->flows) + break; flow = &req->flows[req->acked_tail]; + flpsn = full_flow_psn(flow, flow->flow_state.lpsn); + if (cmp_psn(psn, flpsn) > 0) + break; trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index c07e387a07a3..141205e76314 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -4501,19 +4501,13 @@ static const struct acpi_device_id hns_roce_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); -static int hns_roce_node_match(struct device *dev, const void *fwnode) -{ - return dev->fwnode == fwnode; -} - static struct platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) { struct device *dev; /* get the 'device' corresponding to the matching 'fwnode' */ - dev = bus_find_device(&platform_bus_type, NULL, - fwnode, hns_roce_node_match); + dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); /* get the platform device */ return dev ? to_platform_device(dev) : NULL; } diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 68c951491a08..57079110af9b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1677,8 +1677,6 @@ tx_err: tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } - kfree(tun_qp->tx_ring); - tun_qp->tx_ring = NULL; i = MLX4_NUM_TUNNEL_BUFS; err: while (i > 0) { @@ -1687,6 +1685,8 @@ err: rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } + kfree(tun_qp->tx_ring); + tun_qp->tx_ring = NULL; kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e12a4404096b..0569bcab02d4 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { - if (MLX5_CAP_GEN(mdev, pg)) + if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; props->odp_caps = dev->odp_caps; } @@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->port[i].roce.last_port_state = IB_PORT_DOWN; } + mlx5_ib_internal_fill_odp_caps(dev); + err = mlx5_ib_init_multiport_master(dev); if (err) return err; @@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) { - mlx5_ib_internal_fill_odp_caps(dev); - return mlx5_ib_odp_init_one(dev); } diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index fe1a76d8531c..a40e0abf2338 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int entry; if (umem->is_odp) { - unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; + struct ib_umem_odp *odp = to_ib_umem_odp(umem); + unsigned int page_shift = odp->page_shift; - *ncont = ib_umem_page_count(umem); + *ncont = ib_umem_odp_num_pages(odp); *count = *ncont << (page_shift - PAGE_SHIFT); *shift = page_shift; if (order) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f6a53455bf8b..9ae587b74b12 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev, bool dyn_bfreg); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); + +static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, + bool do_modify_atomic) +{ + if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) + return false; + + if (do_modify_atomic && + MLX5_CAP_GEN(dev->mdev, atomic) && + MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) + return false; + + return true; +} #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index b74fad08412f..3401f5f6792e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && - (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || - !MLX5_CAP_GEN(dev->mdev, atomic)); + use_umr = mlx5_ib_can_use_umr(dev, true); if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, @@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, goto err; } - if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { + if (!mlx5_ib_can_use_umr(dev, true) || + (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) { /* * UMR can't be used - MKey needs to be replaced. */ diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 1d257d1b3b0d..0a59912a4cef 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) memset(caps, 0, sizeof(*caps)); - if (!MLX5_CAP_GEN(dev->mdev, pg)) + if (!MLX5_CAP_GEN(dev->mdev, pg) || + !mlx5_ib_can_use_umr(dev, true)) return; caps->general_caps = IB_ODP_SUPPORT; @@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && MLX5_CAP_GEN(dev->mdev, null_mkey) && - MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) + MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && + !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled)) caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; return; @@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) { int ret = 0; - if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) - ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); + if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) + return ret; + + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); @@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) } } - if (!MLX5_CAP_GEN(dev->mdev, pg)) - return ret; - ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); return ret; @@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) { - if (!MLX5_CAP_GEN(dev->mdev, pg)) + if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) return; mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 379328b2598f..72869ff4a334 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes) MLX5_IB_UMR_OCTOWORD; } -static __be64 frwr_mkey_mask(void) +static __be64 frwr_mkey_mask(bool atomic) { u64 result; @@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void) MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | - MLX5_MKEY_MASK_A | MLX5_MKEY_MASK_SMALL_FENCE | MLX5_MKEY_MASK_FREE; + if (atomic) + result |= MLX5_MKEY_MASK_A; + return cpu_to_be64(result); } @@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void) } static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, - struct mlx5_ib_mr *mr, u8 flags) + struct mlx5_ib_mr *mr, u8 flags, bool atomic) { int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; @@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, umr->flags = flags; umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); - umr->mkey_mask = frwr_mkey_mask(); + umr->mkey_mask = frwr_mkey_mask(atomic); } static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) @@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, { struct mlx5_ib_mr *mr = to_mmr(wr->mr); struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); + struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; + bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; u8 flags = 0; + if (!mlx5_ib_can_use_umr(dev, atomic)) { + mlx5_ib_warn(to_mdev(qp->ibqp.device), + "Fast update of %s for MR is disabled\n", + (MLX5_CAP_GEN(dev->mdev, + umr_modify_entity_size_disabled)) ? + "entity size" : + "atomic access"); + return -EINVAL; + } + if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { mlx5_ib_warn(to_mdev(qp->ibqp.device), "Invalid IB_SEND_INLINE send flag\n"); @@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, if (umr_inline) flags |= MLX5_UMR_INLINE; - set_reg_umr_seg(*seg, mr, flags); + set_reg_umr_seg(*seg, mr, flags, atomic); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; handle_post_send_edge(&qp->sq, seg, *size, cur_edge); diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 77b1aabf6ff3..dba4535494ab 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -138,9 +138,9 @@ struct siw_umem { }; struct siw_pble { - u64 addr; /* Address of assigned user buffer */ - u64 size; /* Size of this entry */ - u64 pbl_off; /* Total offset from start of PBL */ + dma_addr_t addr; /* Address of assigned buffer */ + unsigned int size; /* Size of this entry */ + unsigned long pbl_off; /* Total offset from start of PBL */ }; struct siw_pbl { @@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) #define siw_dbg_cep(cep, fmt, ...) \ - ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ + ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \ cep, __func__, ##__VA_ARGS__) void siw_cq_flush(struct siw_cq *cq); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 9ce8a1b925d2..8c1931a57f4a 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason, getname_local(cep->sock, &event.local_addr); getname_peer(cep->sock, &event.remote_addr); } - siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", - cep->qp ? qp_id(cep->qp) : -1, id, reason, status); + siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n", + cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status); return id->event_handler(id, &event); } @@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_get(new_cep); new_s->sk->sk_user_data = new_cep; - siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s); - if (siw_tcp_nagle == false) { int val = 1; @@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w) cep = work->cep; siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", - cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); + cep->qp ? qp_id(cep->qp) : UINT_MAX, + work->type, cep->state); siw_cep_set_inuse(cep); @@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w) } if (release_cep) { siw_dbg_cep(cep, - "release: timer=%s, QP[%u], id 0x%p\n", + "release: timer=%s, QP[%u]\n", cep->mpa_timer ? "y" : "n", - cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); + cep->qp ? qp_id(cep->qp) : UINT_MAX); siw_cancel_mpatimer(cep); @@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type) else delay = MPAREP_TIMEOUT; } - siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", - cep->qp ? qp_id(cep->qp) : -1, type, work, delay); + siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n", + cep->qp ? qp_id(cep->qp) : -1, type, delay); queue_delayed_work(siw_cm_wq, &work->work, delay); @@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) } if (v4) siw_dbg_qp(qp, - "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", - id, pd_len, + "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", + pd_len, &((struct sockaddr_in *)(laddr))->sin_addr, ntohs(((struct sockaddr_in *)(laddr))->sin_port), &((struct sockaddr_in *)(raddr))->sin_addr, ntohs(((struct sockaddr_in *)(raddr))->sin_port)); else siw_dbg_qp(qp, - "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", - id, pd_len, + "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", + pd_len, &((struct sockaddr_in6 *)(laddr))->sin6_addr, ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), &((struct sockaddr_in6 *)(raddr))->sin6_addr, @@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) if (rv >= 0) { rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); if (!rv) { - siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, - qp_id(qp)); + siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp)); siw_cep_set_free(cep); return 0; } } error: - siw_dbg_qp(qp, "failed: %d\n", rv); + siw_dbg(id->device, "failed: %d\n", rv); if (cep) { siw_socket_disassoc(s); @@ -1540,7 +1538,8 @@ error: } else if (s) { sock_release(s); } - siw_qp_put(qp); + if (qp) + siw_qp_put(qp); return rv; } @@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) siw_cancel_mpatimer(cep); if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { - siw_dbg_cep(cep, "id 0x%p: out of state\n", id); + siw_dbg_cep(cep, "out of state\n"); siw_cep_set_free(cep); siw_cep_put(cep); @@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) up_write(&qp->state_lock); goto error; } - siw_dbg_cep(cep, "id 0x%p\n", id); + siw_dbg_cep(cep, "[QP %d]\n", params->qpn); if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { siw_dbg_cep(cep, "peer allows GSO on TX\n"); @@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) params->ird > sdev->attrs.max_ird) { siw_dbg_cep( cep, - "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", - id, qp_id(qp), params->ord, sdev->attrs.max_ord, + "[QP %u]: ord %d (max %d), ird %d (max %d)\n", + qp_id(qp), params->ord, sdev->attrs.max_ord, params->ird, sdev->attrs.max_ird); rv = -EINVAL; up_write(&qp->state_lock); @@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) if (params->private_data_len > max_priv_data) { siw_dbg_cep( cep, - "id 0x%p, [QP %u]: private data length: %d (max %d)\n", - id, qp_id(qp), params->private_data_len, max_priv_data); + "[QP %u]: private data length: %d (max %d)\n", + qp_id(qp), params->private_data_len, max_priv_data); rv = -EINVAL; up_write(&qp->state_lock); goto error; @@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) qp_attrs.flags = SIW_MPA_CRC; qp_attrs.state = SIW_QP_STATE_RTS; - siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); + siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp)); /* Associate QP with CEP */ siw_cep_get(cep); @@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) if (rv) goto error; - siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", - id, qp_id(qp), params->private_data_len); + siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n", + qp_id(qp), params->private_data_len); rv = siw_send_mpareqrep(cep, params->private_data, params->private_data_len); @@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len) siw_cancel_mpatimer(cep); if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { - siw_dbg_cep(cep, "id 0x%p: out of state\n", id); + siw_dbg_cep(cep, "out of state\n"); siw_cep_set_free(cep); siw_cep_put(cep); /* put last reference */ return -ECONNRESET; } - siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, + siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state, pd_len); if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { @@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, sizeof(s_val)); if (rv) { - siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); + siw_dbg(id->device, "setsockopt error: %d\n", rv); goto error; } rv = s->ops->bind(s, laddr, addr_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (rv) { - siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); + siw_dbg(id->device, "socket bind error: %d\n", rv); goto error; } cep = siw_cep_alloc(sdev); @@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, rv = siw_cm_alloc_work(cep, backlog); if (rv) { siw_dbg(id->device, - "id 0x%p: alloc_work error %d, backlog %d\n", id, + "alloc_work error %d, backlog %d\n", rv, backlog); goto error; } rv = s->ops->listen(s, backlog); if (rv) { - siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); + siw_dbg(id->device, "listen error %d\n", rv); goto error; } cep->cm_id = id; @@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id) list_del(p); - siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, - cep->state); + siw_dbg_cep(cep, "drop cep, state %d\n", cep->state); siw_cep_set_inuse(cep); @@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct net_device *dev = to_siw_dev(id->device)->netdev; int rv = 0, listeners = 0; - siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); + siw_dbg(id->device, "backlog %d\n", backlog); /* * For each attached address of the interface, create a @@ -1964,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct sockaddr_in s_laddr, *s_raddr; const struct in_ifaddr *ifa; + if (!in_dev) { + rv = -ENODEV; + goto out; + } memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); s_raddr = (struct sockaddr_in *)&id->remote_addr; siw_dbg(id->device, - "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", - id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), + "laddr %pI4:%d, raddr %pI4:%d\n", + &s_laddr.sin_addr, ntohs(s_laddr.sin_port), &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); rtnl_lock(); @@ -1993,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), *s_raddr = &to_sockaddr_in6(id->remote_addr); + if (!in6_dev) { + rv = -ENODEV; + goto out; + } siw_dbg(id->device, - "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", - id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), + "laddr %pI6:%d, raddr %pI6:%d\n", + &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); - read_lock_bh(&in6_dev->lock); + rtnl_lock(); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { - struct sockaddr_in6 bind_addr; - + if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) + continue; if (ipv6_addr_any(&s_laddr->sin6_addr) || ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { - bind_addr.sin6_family = AF_INET6; - bind_addr.sin6_port = s_laddr->sin6_port; - bind_addr.sin6_flowinfo = 0; - bind_addr.sin6_addr = ifp->addr; - bind_addr.sin6_scope_id = dev->ifindex; + struct sockaddr_in6 bind_addr = { + .sin6_family = AF_INET6, + .sin6_port = s_laddr->sin6_port, + .sin6_flowinfo = 0, + .sin6_addr = ifp->addr, + .sin6_scope_id = dev->ifindex }; rv = siw_listen_address(id, backlog, (struct sockaddr *)&bind_addr, @@ -2017,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) listeners++; } } - read_unlock_bh(&in6_dev->lock); - + rtnl_unlock(); in6_dev_put(in6_dev); } else { - return -EAFNOSUPPORT; + rv = -EAFNOSUPPORT; } +out: if (listeners) rv = 0; else if (!rv) rv = -EINVAL; - siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); + siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK"); return rv; } int siw_destroy_listen(struct iw_cm_id *id) { - siw_dbg(id->device, "id 0x%p\n", id); - if (!id->provider_data) { - siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); + siw_dbg(id->device, "no cep(s)\n"); return 0; } siw_drop_listeners(id); diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index e381ae9b7d62..d8db3bee9da7 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; - siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", + siw_dbg_cq(cq, + "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, - cqe->flags, (void *)cqe->id); + cqe->flags, (void *)(uintptr_t)cqe->id); } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index 67171c82b0c4..87a56039f0ef 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, */ if (addr < mem->va || addr + len > mem->va + mem->len) { siw_dbg_pd(pd, "MEM interval len %d\n", len); - siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", - (unsigned long long)addr, - (unsigned long long)(addr + len)); - siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", - (unsigned long long)mem->va, - (unsigned long long)(mem->va + mem->len), + siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n", + (void *)(uintptr_t)addr, + (void *)(uintptr_t)(addr + len)); + siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n", + (void *)(uintptr_t)mem->va, + (void *)(uintptr_t)(mem->va + mem->len), mem->stag); return -E_BASE_BOUNDS; @@ -330,7 +330,7 @@ out: * Optionally, provides remaining len within current element, and * current PBL index for later resume at same element. */ -u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) +dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) { int i = idx ? *idx : 0; diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h index f43daf280891..db138c8423da 100644 --- a/drivers/infiniband/sw/siw/siw_mem.h +++ b/drivers/infiniband/sw/siw/siw_mem.h @@ -9,7 +9,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); void siw_umem_release(struct siw_umem *umem, bool dirty); struct siw_pbl *siw_pbl_alloc(u32 num_buf); -u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); +dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); int siw_invalidate_stag(struct ib_pd *pd, u32 stag); diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 0990307c5d2c..430314c8abd9 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -949,7 +949,7 @@ skip_irq: rv = -EINVAL; goto out; } - wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; + wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; wqe->sqe.sge[0].lkey = 0; wqe->sqe.num_sge = 1; } diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index f87657a11657..c0a887240325 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, p = siw_get_upage(umem, dest_addr); if (unlikely(!p)) { - pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", + pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n", __func__, qp_id(rx_qp(srx)), - (void *)dest_addr, (void *)umem->fp_addr); + (void *)(uintptr_t)dest_addr, + (void *)(uintptr_t)umem->fp_addr); /* siw internal error */ srx->skb_copied += copied; srx->skb_new -= copied; @@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, pg_off = dest_addr & ~PAGE_MASK; bytes = min(len, (int)PAGE_SIZE - pg_off); - siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); + siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes); dest = kmap_atomic(p); rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, @@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len) { int rv; - siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); + siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len); rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); if (unlikely(rv)) { - pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", + pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n", qp_id(rx_qp(srx)), __func__, len, kva, rv); return rv; @@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, while (len) { int bytes; - u64 buf_addr = + dma_addr_t buf_addr = siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); if (!buf_addr) break; @@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp) mem_p = *mem; if (mem_p->mem_obj == NULL) rv = siw_rx_kva(srx, - (void *)(sge->laddr + frx->sge_off), - sge_bytes); + (void *)(uintptr_t)(sge->laddr + frx->sge_off), + sge_bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + frx->sge_off, sge_bytes); @@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp) if (mem->mem_obj == NULL) rv = siw_rx_kva(srx, - (void *)(srx->ddp_to + srx->fpdu_part_rcvd), - bytes); + (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd), + bytes); else if (!mem->is_pbl) rv = siw_rx_umem(srx, mem->umem, srx->ddp_to + srx->fpdu_part_rcvd, bytes); @@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp) bytes = min(srx->fpdu_part_rem, srx->skb_new); if (mem_p->mem_obj == NULL) - rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), - bytes); + rv = siw_rx_kva(srx, + (void *)(uintptr_t)(sge->laddr + wqe->processed), + bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, bytes); diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 43020d2040fc..438a2917a47c 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) { struct siw_pbl *pbl = mem->pbl; u64 offset = addr - mem->va; - u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); + dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) return virt_to_page(paddr); @@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) /* * Copy short payload at provided destination payload address */ -static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) +static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) { struct siw_wqe *wqe = &c_tx->wqe_active; struct siw_sge *sge = &wqe->sqe.sge[0]; @@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) return 0; if (tx_flags(wqe) & SIW_WQE_INLINE) { - memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); + memcpy(paddr, &wqe->sqe.sge[1], bytes); } else { struct siw_mem *mem = wqe->mem[0]; if (!mem->mem_obj) { /* Kernel client using kva */ - memcpy((void *)paddr, (void *)sge->laddr, bytes); + memcpy(paddr, + (const void *)(uintptr_t)sge->laddr, bytes); } else if (c_tx->in_syscall) { - if (copy_from_user((void *)paddr, - (const void __user *)sge->laddr, + if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), bytes)) return -EFAULT; } else { @@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) buffer = kmap_atomic(p); if (likely(PAGE_SIZE - off >= bytes)) { - memcpy((void *)paddr, buffer + off, bytes); + memcpy(paddr, buffer + off, bytes); kunmap_atomic(buffer); } else { unsigned long part = bytes - (PAGE_SIZE - off); - memcpy((void *)paddr, buffer + off, part); + memcpy(paddr, buffer + off, part); kunmap_atomic(buffer); if (!mem->is_pbl) @@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) return -EFAULT; buffer = kmap_atomic(p); - memcpy((void *)(paddr + part), buffer, + memcpy(paddr + part, buffer, bytes - part); kunmap_atomic(buffer); } @@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_send); crc = (char *)&c_tx->pkt.send_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; case SIW_OP_SEND_REMOTE_INV: @@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_send_inv); crc = (char *)&c_tx->pkt.send_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; case SIW_OP_WRITE: @@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); crc = (char *)&c_tx->pkt.write_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; case SIW_OP_READ_RESPONSE: @@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); crc = (char *)&c_tx->pkt.write_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; default: @@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page, #define MAX_TRAILER (MPA_CRC_SIZE + 4) -static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) +static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask) { - if (hdr_len) { - ++pages; - --num_maps; - } - while (num_maps-- > 0) { - kunmap(*pages); - pages++; + while (kmap_mask) { + if (kmap_mask & BIT(0)) + kunmap(*pp); + pp++; + kmap_mask >>= 1; } } @@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, pbl_idx = c_tx->pbl_idx; + unsigned long kmap_mask = 0L; if (c_tx->state == SIW_SEND_HDR) { if (c_tx->use_sendpage) { @@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { mem = wqe->mem[sge_idx]; - if (!mem->mem_obj) - is_kva = 1; + is_kva = mem->mem_obj == NULL ? 1 : 0; } else { is_kva = 1; } @@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) * tx from kernel virtual address: either inline data * or memory region with assigned kernel buffer */ - iov[seg].iov_base = (void *)(sge->laddr + sge_off); + iov[seg].iov_base = + (void *)(uintptr_t)(sge->laddr + sge_off); iov[seg].iov_len = sge_len; if (do_crc) @@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) p = siw_get_upage(mem->umem, sge->laddr + sge_off); if (unlikely(!p)) { - if (hdr_len) - seg--; - if (!c_tx->use_sendpage && seg) { - siw_unmap_pages(page_array, - hdr_len, seg); - } + siw_unmap_pages(page_array, kmap_mask); wqe->processed -= c_tx->bytes_unsent; rv = -EFAULT; goto done_crc; @@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) if (!c_tx->use_sendpage) { iov[seg].iov_base = kmap(p) + fp_off; iov[seg].iov_len = plen; + + /* Remember for later kunmap() */ + kmap_mask |= BIT(seg); + if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, @@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) page_address(p) + fp_off, plen); } else { - u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); + u64 va = sge->laddr + sge_off; - page_array[seg] = virt_to_page(pa); + page_array[seg] = virt_to_page(va & PAGE_MASK); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, - (void *)(sge->laddr + sge_off), + (void *)(uintptr_t)va, plen); } @@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) if (++seg > (int)MAX_ARRAY) { siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); - if (!is_kva && !c_tx->use_sendpage) { - siw_unmap_pages(page_array, hdr_len, - seg - 1); - } + siw_unmap_pages(page_array, kmap_mask); wqe->processed -= c_tx->bytes_unsent; rv = -EMSGSIZE; goto done_crc; @@ -597,8 +592,7 @@ sge_done: } else { rv = kernel_sendmsg(s, &msg, iov, seg + 1, hdr_len + data_len + trl_len); - if (!is_kva) - siw_unmap_pages(page_array, hdr_len, seg); + siw_unmap_pages(page_array, kmap_mask); } if (rv < (int)hdr_len) { /* Not even complete hdr pushed or negative rv */ @@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) rv = -EINVAL; goto tx_error; } - wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; + wqe->sqe.sge[0].laddr = + (u64)(uintptr_t)&wqe->sqe.sge[1]; } } wqe->wr_status = SIW_WR_INPROGRESS; @@ -924,7 +919,7 @@ tx_error: static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) { - struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; + struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); int rv = 0; @@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) mem->stag = sqe->rkey; mem->perms = sqe->access; - siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", - mem->va, base_mr->iova); + siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey); mem->va = base_mr->iova; mem->stag_valid = 1; out: diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index e7f3a2379d9d..da52c90e06d4 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, */ qp->srq = to_siw_srq(attrs->srq); qp->attrs.rq_size = 0; - siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", - qp->qp_num, qp->srq); + siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num); } else if (num_rqe) { if (qp->kernel_verbs) qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); @@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) base_ucontext); struct siw_qp_attrs qp_attrs; - siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); + siw_dbg_qp(qp, "state %d\n", qp->attrs.state); /* * Mark QP as in process of destruction to prevent from @@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, void *kbuf = &sqe->sge[1]; int num_sge = core_wr->num_sge, bytes = 0; - sqe->sge[0].laddr = (u64)kbuf; + sqe->sge[0].laddr = (uintptr_t)kbuf; sqe->sge[0].lkey = 0; while (num_sge--) { @@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, break; case IB_WR_REG_MR: - sqe->base_mr = (uint64_t)reg_wr(wr)->mr; + sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; sqe->rkey = reg_wr(wr)->key; sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; sqe->opcode = SIW_OP_REG_MR; @@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, rv = -EINVAL; break; } - siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", - sqe->opcode, sqe->flags, (void *)sqe->id); + siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", + sqe->opcode, sqe->flags, + (void *)(uintptr_t)sqe->id); if (unlikely(rv < 0)) break; @@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); int rv; - siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", - (unsigned long long)start, (unsigned long long)rnic_va, + siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", + (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, (unsigned long long)len); if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { @@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, struct siw_mem *mem = mr->mem; struct siw_pbl *pbl = mem->pbl; struct siw_pble *pble; - u64 pbl_size; + unsigned long pbl_size; int i, rv; if (!pbl) { @@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, pbl_size += sg_dma_len(slp); } siw_dbg_mem(mem, - "sge[%d], size %llu, addr 0x%016llx, total %llu\n", - i, pble->size, pble->addr, pbl_size); + "sge[%d], size %u, addr 0x%p, total %lu\n", + i, pble->size, (void *)(uintptr_t)pble->addr, + pbl_size); } rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); if (rv > 0) { mem->len = base_mr->length; mem->va = base_mr->iova; siw_dbg_mem(mem, - "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", - mem->len, mem->va, num_sle, pbl->num_buf); + "%llu bytes, start 0x%pK, %u SLE to %u entries\n", + mem->len, (void *)(uintptr_t)mem->va, num_sle, + pbl->num_buf); } return rv; } @@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq, } spin_lock_init(&srq->lock); - siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); + siw_dbg_pd(base_srq->pd, "[SRQ]: success\n"); return 0; @@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, if (unlikely(!srq->kernel_verbs)) { siw_dbg_pd(base_srq->pd, - "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", - srq); + "[SRQ]: no kernel post_recv for mapped srq\n"); rv = -EINVAL; goto out; } @@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, } if (unlikely(wr->num_sge > srq->max_sge)) { siw_dbg_pd(base_srq->pd, - "[SRQ 0x%p]: too many sge's: %d\n", srq, - wr->num_sge); + "[SRQ]: too many sge's: %d\n", wr->num_sge); rv = -EINVAL; break; } @@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&srq->lock, flags); out: if (unlikely(rv < 0)) { - siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); + siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv); *bad_wr = wr; } return rv; diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 88ae7c2ac3c8..e486a8a74c40 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev, static void hv_kbd_on_channel_callback(void *context) { + struct vmpacket_descriptor *desc; struct hv_device *hv_dev = context; - void *buffer; - int bufferlen = 0x100; /* Start with sensible size */ u32 bytes_recvd; u64 req_id; - int error; - buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - while (1) { - error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, - &bytes_recvd, &req_id); - switch (error) { - case 0: - if (bytes_recvd == 0) { - kfree(buffer); - return; - } - - hv_kbd_handle_received_packet(hv_dev, buffer, - bytes_recvd, req_id); - break; + foreach_vmbus_pkt(desc, hv_dev->channel) { + bytes_recvd = desc->len8 * 8; + req_id = desc->trans_id; - case -ENOBUFS: - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (!buffer) - return; - break; - } + hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd, + req_id); } } diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index c5c93e48b4db..e7f49fd1a7ba 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2034,16 +2034,11 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) static struct platform_driver arm_smmu_driver; -static int arm_smmu_match_node(struct device *dev, const void *data) -{ - return dev->fwnode == data; -} - static struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) { - struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, - fwnode, arm_smmu_match_node); + struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver, + fwnode); put_device(dev); return dev ? dev_get_drvdata(dev) : NULL; } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 64977c131ee6..aa06498f291d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1426,16 +1426,11 @@ static bool arm_smmu_capable(enum iommu_cap cap) } } -static int arm_smmu_match_node(struct device *dev, const void *data) -{ - return dev->fwnode == data; -} - static struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) { - struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, - fwnode, arm_smmu_match_node); + struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver, + fwnode); put_device(dev); return dev ? dev_get_drvdata(dev) : NULL; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d991d40f797f..f68a62c3c32b 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -965,11 +965,14 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, { bool coherent = dev_is_dma_coherent(dev); size_t alloc_size = PAGE_ALIGN(size); + int node = dev_to_node(dev); struct page *page = NULL; void *cpu_addr; page = dma_alloc_contiguous(dev, alloc_size, gfp); if (!page) + page = alloc_pages_node(node, gfp, get_order(alloc_size)); + if (!page) return NULL; if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 4793e77808e2..d54c8e4d8954 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -213,13 +213,6 @@ static int led_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume); -static int match_name(struct device *dev, const void *data) -{ - if (!dev_name(dev)) - return 0; - return !strcmp(dev_name(dev), (char *)data); -} - static int led_classdev_next_name(const char *init_name, char *name, size_t len) { @@ -230,7 +223,7 @@ static int led_classdev_next_name(const char *init_name, char *name, strlcpy(name, init_name, len); while ((ret < len) && - (dev = class_find_device(leds_class, NULL, name, match_name))) { + (dev = class_find_device_by_name(leds_class, name))) { put_device(dev); ret = snprintf(name, len, "%s_%u", init_name, ++i); } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index b6b5acc92ca2..2a48ea3f1b30 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) unsigned long freed; c = container_of(shrink, struct dm_bufio_client, shrinker); - if (!dm_bufio_trylock(c)) + if (sc->gfp_mask & __GFP_FS) + dm_bufio_lock(c); + else if (!dm_bufio_trylock(c)) return SHRINK_STOP; freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c index 845f376a72d9..8288887b7f94 100644 --- a/drivers/md/dm-dust.c +++ b/drivers/md/dm-dust.c @@ -25,6 +25,7 @@ struct dust_device { unsigned long long badblock_count; spinlock_t dust_lock; unsigned int blksz; + int sect_per_block_shift; unsigned int sect_per_block; sector_t start; bool fail_read_on_bb:1; @@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block) unsigned long flags; spin_lock_irqsave(&dd->dust_lock, flags); - bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); + bblock = dust_rb_search(&dd->badblocklist, block); if (bblock == NULL) { if (!dd->quiet_mode) { @@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block) } spin_lock_irqsave(&dd->dust_lock, flags); - bblock->bb = block * dd->sect_per_block; + bblock->bb = block; if (!dust_rb_insert(&dd->badblocklist, bblock)) { if (!dd->quiet_mode) { DMERR("%s: block %llu already in badblocklist", @@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block) unsigned long flags; spin_lock_irqsave(&dd->dust_lock, flags); - bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); + bblock = dust_rb_search(&dd->badblocklist, block); if (bblock != NULL) DMINFO("%s: block %llu found in badblocklist", __func__, block); else @@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock, int ret = DM_MAPIO_REMAPPED; if (fail_read_on_bb) { + thisblock >>= dd->sect_per_block_shift; spin_lock_irqsave(&dd->dust_lock, flags); ret = __dust_map_read(dd, thisblock); spin_unlock_irqrestore(&dd->dust_lock, flags); @@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock, unsigned long flags; if (fail_read_on_bb) { + thisblock >>= dd->sect_per_block_shift; spin_lock_irqsave(&dd->dust_lock, flags); __dust_map_write(dd, thisblock); spin_unlock_irqrestore(&dd->dust_lock, flags); @@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv) dd->blksz = blksz; dd->start = tmp; + dd->sect_per_block_shift = __ffs(sect_per_block); + /* * Whether to fail a read on a "bad" block. * Defaults to false; enabled later by message. diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index b1b0de402dfc..9118ab85cb3a 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1943,7 +1943,22 @@ offload_to_thread: queue_work(ic->wait_wq, &dio->work); return; } + if (journal_read_pos != NOT_FOUND) + dio->range.n_sectors = ic->sectors_per_block; wait_and_add_new_range(ic, &dio->range); + /* + * wait_and_add_new_range drops the spinlock, so the journal + * may have been changed arbitrarily. We need to recheck. + * To simplify the code, we restrict I/O size to just one block. + */ + if (journal_read_pos != NOT_FOUND) { + sector_t next_sector; + unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); + if (unlikely(new_pos != journal_read_pos)) { + remove_range_unlocked(ic, &dio->range); + goto retry; + } + } } spin_unlock_irq(&ic->endio_wait.lock); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index df2011de7be2..1bbe4a34ef4c 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job) * no point in continuing. */ if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && - job->master_job->write_err) + job->master_job->write_err) { + job->write_err = job->master_job->write_err; return -EIO; + } io_job_start(job->kc->throttle); @@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, else job->read_err = 1; push(&kc->complete_jobs, job); + wake(kc); break; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 8a60a4a070ac..1f933dd197cd 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) */ r = rs_prepare_reshape(rs); if (r) - return r; + goto bad; /* Reshaping ain't recovery, so disable recovery */ rs_setup_recovery(rs, MaxSector); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 7b6c3ee9e755..8820931ec7d2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t) } EXPORT_SYMBOL(dm_table_event); -sector_t dm_table_get_size(struct dm_table *t) +inline sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } @@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) unsigned int l, n = 0, k = 0; sector_t *node; + if (unlikely(sector >= dm_table_get_size(t))) + return &t->targets[t->num_targets]; + for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 8545dcee9fd0..595a73110e17 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -34,7 +35,7 @@ * (1) Super block (1 block) * (2) Chunk mapping table (nr_map_blocks) * (3) Bitmap blocks (nr_bitmap_blocks) - * All metadata blocks are stored in conventional zones, starting from the + * All metadata blocks are stored in conventional zones, starting from * the first conventional zone found on disk. */ struct dmz_super { @@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd) * Lock/unlock metadata access. This is a "read" lock on a semaphore * that prevents metadata flush from running while metadata are being * modified. The actual metadata write mutual exclusion is achieved with - * the map lock and zone styate management (active and reclaim state are + * the map lock and zone state management (active and reclaim state are * mutually exclusive). */ void dmz_lock_metadata(struct dmz_metadata *zmd) @@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; struct bio *bio; + if (dmz_bdev_is_dying(zmd->dev)) + return ERR_PTR(-EIO); + /* Get a new block and a BIO to read it */ mblk = dmz_alloc_mblock(zmd, mblk_no); if (!mblk) - return NULL; + return ERR_PTR(-ENOMEM); bio = bio_alloc(GFP_NOIO, 1); if (!bio) { dmz_free_mblock(zmd, mblk); - return NULL; + return ERR_PTR(-ENOMEM); } spin_lock(&zmd->mblk_lock); @@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, if (!mblk) { /* Cache miss: read the block from disk */ mblk = dmz_get_mblock_slow(zmd, mblk_no); - if (!mblk) - return ERR_PTR(-ENOMEM); + if (IS_ERR(mblk)) + return mblk; } /* Wait for on-going read I/O and check for error */ @@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) /* * Issue a metadata block write BIO. */ -static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, - unsigned int set) +static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, + unsigned int set) { sector_t block = zmd->sb[set].block + mblk->no; struct bio *bio; + if (dmz_bdev_is_dying(zmd->dev)) + return -EIO; + bio = bio_alloc(GFP_NOIO, 1); if (!bio) { set_bit(DMZ_META_ERROR, &mblk->state); - return; + return -ENOMEM; } set_bit(DMZ_META_WRITING, &mblk->state); @@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); + + return 0; } /* @@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, struct bio *bio; int ret; + if (dmz_bdev_is_dying(zmd->dev)) + return -EIO; + bio = bio_alloc(GFP_NOIO, 1); if (!bio) return -ENOMEM; @@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, { struct dmz_mblock *mblk; struct blk_plug plug; - int ret = 0; + int ret = 0, nr_mblks_submitted = 0; /* Issue writes */ blk_start_plug(&plug); - list_for_each_entry(mblk, write_list, link) - dmz_write_mblock(zmd, mblk, set); + list_for_each_entry(mblk, write_list, link) { + ret = dmz_write_mblock(zmd, mblk, set); + if (ret) + break; + nr_mblks_submitted++; + } blk_finish_plug(&plug); /* Wait for completion */ list_for_each_entry(mblk, write_list, link) { + if (!nr_mblks_submitted) + break; wait_on_bit_io(&mblk->state, DMZ_META_WRITING, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { clear_bit(DMZ_META_ERROR, &mblk->state); ret = -EIO; } + nr_mblks_submitted--; } /* Flush drive cache (this will also sync data) */ @@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ dmz_lock_flush(zmd); + if (dmz_bdev_is_dying(zmd->dev)) { + ret = -EIO; + goto out; + } + /* Get dirty blocks */ spin_lock(&zmd->mblk_lock); list_splice_init(&zmd->mblk_dirty_list, &write_list); @@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) struct dm_zone *zone; if (list_empty(&zmd->map_rnd_list)) - return NULL; + return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_rnd_list, link) { if (dmz_is_buf(zone)) @@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) return dzone; } - return NULL; + return ERR_PTR(-EBUSY); } /* @@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) struct dm_zone *zone; if (list_empty(&zmd->map_seq_list)) - return NULL; + return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_seq_list, link) { if (!zone->bzone) @@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) return zone; } - return NULL; + return ERR_PTR(-EBUSY); } /* @@ -1628,9 +1652,13 @@ again: if (op != REQ_OP_WRITE) goto out; - /* Alloate a random zone */ + /* Allocate a random zone */ dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!dzone) { + if (dmz_bdev_is_dying(zmd->dev)) { + dzone = ERR_PTR(-EIO); + goto out; + } dmz_wait_for_free_zones(zmd); goto again; } @@ -1725,9 +1753,13 @@ again: if (bzone) goto out; - /* Alloate a random zone */ + /* Allocate a random zone */ bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!bzone) { + if (dmz_bdev_is_dying(zmd->dev)) { + bzone = ERR_PTR(-EIO); + goto out; + } dmz_wait_for_free_zones(zmd); goto again; } diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index edf4b95eb075..d240d7ca8a8a 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -37,7 +38,7 @@ enum { /* * Number of seconds of target BIO inactivity to consider the target idle. */ -#define DMZ_IDLE_PERIOD (10UL * HZ) +#define DMZ_IDLE_PERIOD (10UL * HZ) /* * Percentage of unmapped (free) random zones below which reclaim starts @@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc, set_bit(DM_KCOPYD_WRITE_SEQ, &flags); while (block < end_block) { + if (dev->flags & DMZ_BDEV_DYING) + return -EIO; + /* Get a valid region from the source zone */ ret = dmz_first_valid_block(zmd, src_zone, &block); if (ret <= 0) @@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone) /* * Find a candidate zone for reclaim and process it. */ -static void dmz_reclaim(struct dmz_reclaim *zrc) +static int dmz_do_reclaim(struct dmz_reclaim *zrc) { struct dmz_metadata *zmd = zrc->metadata; struct dm_zone *dzone; @@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) /* Get a data zone */ dzone = dmz_get_zone_for_reclaim(zmd); - if (!dzone) - return; + if (IS_ERR(dzone)) + return PTR_ERR(dzone); start = jiffies; @@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) out: if (ret) { dmz_unlock_zone_reclaim(dzone); - return; + return ret; } - (void) dmz_flush_metadata(zrc->metadata); + ret = dmz_flush_metadata(zrc->metadata); + if (ret) { + dmz_dev_debug(zrc->dev, + "Metadata flush for zone %u failed, err %d\n", + dmz_id(zmd, rzone), ret); + return ret; + } dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); + return 0; } /* @@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc) return false; /* - * If the percentage of unmappped random zones is low, + * If the percentage of unmapped random zones is low, * reclaim even if the target is busy. */ return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; @@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work) struct dmz_metadata *zmd = zrc->metadata; unsigned int nr_rnd, nr_unmap_rnd; unsigned int p_unmap_rnd; + int ret; + + if (dmz_bdev_is_dying(zrc->dev)) + return; if (!dmz_should_reclaim(zrc)) { mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); @@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work) (dmz_target_idle(zrc) ? "Idle" : "Busy"), p_unmap_rnd, nr_unmap_rnd, nr_rnd); - dmz_reclaim(zrc); + ret = dmz_do_reclaim(zrc); + if (ret) { + dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret); + if (ret == -EIO) + /* + * LLD might be performing some error handling sequence + * at the underlying device. To not interfere, do not + * attempt to schedule the next reclaim run immediately. + */ + return; + } dmz_schedule_reclaim(zrc); } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 51d029bbb740..31478fef6032 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, refcount_inc(&bioctx->ref); generic_make_request(clone); + if (clone->bi_status == BLK_STS_IOERR) + return -EIO; if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) zone->wp_block += nr_blocks; @@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz, /* Get the buffer zone. One will be allocated if needed */ bzone = dmz_get_chunk_buffer(zmd, zone); - if (!bzone) - return -ENOSPC; + if (IS_ERR(bzone)) + return PTR_ERR(bzone); if (dmz_is_readonly(bzone)) return -EROFS; @@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, dmz_lock_metadata(zmd); + if (dmz->dev->flags & DMZ_BDEV_DYING) { + ret = -EIO; + goto out; + } + /* * Get the data zone mapping the chunk. There may be no * mapping for read and discard. If a mapping is obtained, @@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work) /* Flush dirty metadata blocks */ ret = dmz_flush_metadata(dmz->metadata); + if (ret) + dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); /* Process queued flush requests */ while (1) { @@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work) * Get a chunk work and start it to process a new BIO. * If the BIO chunk has no work yet, create one. */ -static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) +static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) { unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); struct dm_chunk_work *cw; + int ret = 0; mutex_lock(&dmz->chunk_lock); /* Get the BIO chunk work. If one is not active yet, create one */ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); if (!cw) { - int ret; /* Create a new chunk work */ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); - if (!cw) + if (unlikely(!cw)) { + ret = -ENOMEM; goto out; + } INIT_WORK(&cw->work, dmz_chunk_work); refcount_set(&cw->refcount, 0); @@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); if (unlikely(ret)) { kfree(cw); - cw = NULL; goto out; } } @@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) bio_list_add(&cw->bio_list, bio); dmz_get_chunk_work(cw); + dmz_reclaim_bio_acc(dmz->reclaim); if (queue_work(dmz->chunk_wq, &cw->work)) dmz_get_chunk_work(cw); out: mutex_unlock(&dmz->chunk_lock); + return ret; +} + +/* + * Check the backing device availability. If it's on the way out, + * start failing I/O. Reclaim and metadata components also call this + * function to cleanly abort operation in the event of such failure. + */ +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev) +{ + struct gendisk *disk; + + if (!(dmz_dev->flags & DMZ_BDEV_DYING)) { + disk = dmz_dev->bdev->bd_disk; + if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { + dmz_dev_warn(dmz_dev, "Backing device queue dying"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } else if (disk->fops->check_events) { + if (disk->fops->check_events(disk, 0) & + DISK_EVENT_MEDIA_CHANGE) { + dmz_dev_warn(dmz_dev, "Backing device offline"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } + } + } + + return dmz_dev->flags & DMZ_BDEV_DYING; } /* @@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) sector_t sector = bio->bi_iter.bi_sector; unsigned int nr_sectors = bio_sectors(bio); sector_t chunk_sector; + int ret; + + if (dmz_bdev_is_dying(dmz->dev)) + return DM_MAPIO_KILL; dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", bio_op(bio), (unsigned long long)sector, nr_sectors, @@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); /* Now ready to handle this BIO */ - dmz_reclaim_bio_acc(dmz->reclaim); - dmz_queue_chunk_work(dmz, bio); + ret = dmz_queue_chunk_work(dmz, bio); + if (ret) { + dmz_dev_debug(dmz->dev, + "BIO op %d, can't process chunk %llu, err %i\n", + bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), + ret); + return DM_MAPIO_REQUEUE; + } return DM_MAPIO_SUBMITTED; } @@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { struct dmz_target *dmz = ti->private; + if (dmz_bdev_is_dying(dmz->dev)) + return -ENODEV; + *bdev = dmz->dev->bdev; return 0; diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index ed8de49c9a08..d8e70b0ade35 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -56,6 +57,8 @@ struct dmz_dev { unsigned int nr_zones; + unsigned int flags; + sector_t zone_nr_sectors; unsigned int zone_nr_sectors_shift; @@ -67,6 +70,9 @@ struct dmz_dev { (dev)->zone_nr_sectors_shift) #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) +/* Device flags. */ +#define DMZ_BDEV_DYING (1 << 0) + /* * Zone descriptor. */ @@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc); void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); void dmz_schedule_reclaim(struct dmz_reclaim *zrc); +/* + * Functions defined in dm-zoned-target.c + */ +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev); + #endif /* DM_ZONED_H */ diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 58b319757b1e..8aae0624a297 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) new_parent = shadow_current(s); + pn = dm_block_data(new_parent); + size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? + sizeof(__le64) : s->info->value_type.size; + + /* create & init the left block */ r = new_block(s->info, &left); if (r < 0) return r; + ln = dm_block_data(left); + nr_left = le32_to_cpu(pn->header.nr_entries) / 2; + + ln->header.flags = pn->header.flags; + ln->header.nr_entries = cpu_to_le32(nr_left); + ln->header.max_entries = pn->header.max_entries; + ln->header.value_size = pn->header.value_size; + memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); + memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); + + /* create & init the right block */ r = new_block(s->info, &right); if (r < 0) { unlock_block(s->info, left); return r; } - pn = dm_block_data(new_parent); - ln = dm_block_data(left); rn = dm_block_data(right); - - nr_left = le32_to_cpu(pn->header.nr_entries) / 2; nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; - ln->header.flags = pn->header.flags; - ln->header.nr_entries = cpu_to_le32(nr_left); - ln->header.max_entries = pn->header.max_entries; - ln->header.value_size = pn->header.value_size; - rn->header.flags = pn->header.flags; rn->header.nr_entries = cpu_to_le32(nr_right); rn->header.max_entries = pn->header.max_entries; rn->header.value_size = pn->header.value_size; - - memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); - - size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? - sizeof(__le64) : s->info->value_type.size; - memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), nr_right * size); diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index aec449243966..25328582cc48 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm) } if (smm->recursion_count == 1) - apply_bops(smm); + r = apply_bops(smm); smm->recursion_count--; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index f129f9678940..c8cbde59bbf6 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1105,7 +1105,6 @@ config MFD_SI476X_CORE config MFD_SM501 tristate "Silicon Motion SM501" depends on HAS_DMA - select DMA_DECLARE_COHERENT ---help--- This is the core driver for the Silicon Motion SM501 multimedia companion chip. This device is a multifunction device which may @@ -1714,7 +1713,6 @@ config MFD_TC6393XB select GPIOLIB select MFD_CORE select MFD_TMIO - select DMA_DECLARE_COHERENT help Support for Toshiba Mobile IO Controller TC6393XB diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c index 2ee14d8a6d31..d2a13a547a3c 100644 --- a/drivers/mfd/altera-sysmgr.c +++ b/drivers/mfd/altera-sysmgr.c @@ -88,16 +88,6 @@ static struct regmap_config altr_sysmgr_regmap_cfg = { }; /** - * sysmgr_match_phandle - * Matching function used by driver_find_device(). - * Return: True if match is found, otherwise false. - */ -static int sysmgr_match_phandle(struct device *dev, const void *data) -{ - return dev->of_node == (const struct device_node *)data; -} - -/** * altr_sysmgr_regmap_lookup_by_phandle * Find the sysmgr previous configured in probe() and return regmap property. * Return: regmap if found or error if not found. @@ -117,8 +107,8 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np, if (!sysmgr_np) return ERR_PTR(-ENODEV); - dev = driver_find_device(&altr_sysmgr_driver.driver, NULL, - (void *)sysmgr_np, sysmgr_match_phandle); + dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver, + (void *)sysmgr_np); of_node_put(sysmgr_np); if (!dev) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index 601cefb5c9d8..050478cabc95 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client) return 0; } -static int rk8xx_suspend(struct device *dev) +static int __maybe_unused rk8xx_suspend(struct device *dev) { struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); int ret = 0; @@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev) return ret; } -static int rk8xx_resume(struct device *dev) +static int __maybe_unused rk8xx_resume(struct device *dev) { struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); int ret = 0; @@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev) return ret; } -SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); +static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); static struct i2c_driver rk808_i2c_driver = { .driver = { diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 1606658b9b7e..24245ccdba72 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -22,7 +22,7 @@ struct lkdtm_list { * recurse past the end of THREAD_SIZE by default. */ #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) -#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) +#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) #else #define REC_STACK_SIZE (THREAD_SIZE / 8) #endif @@ -91,7 +91,7 @@ void lkdtm_LOOP(void) void lkdtm_EXHAUST_STACK(void) { - pr_info("Calling function with %d frame size to depth %d ...\n", + pr_info("Calling function with %lu frame size to depth %d ...\n", REC_STACK_SIZE, recur_count); recursive_loop(recur_count); pr_info("FAIL: survived without exhausting stack?!\n"); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 6c0173772162..77f7dff7098d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -81,6 +81,8 @@ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ + #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index f894d1f8a53e..7310b476323c 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -858,13 +858,6 @@ static ssize_t dev_state_show(struct device *device, } static DEVICE_ATTR_RO(dev_state); -static int match_devt(struct device *dev, const void *data) -{ - const dev_t *devt = data; - - return dev->devt == *devt; -} - /** * dev_set_devstate: set to new device state and notify sysfs file. * @@ -880,7 +873,7 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state) dev->dev_state = state; - clsdev = class_find_device(mei_class, NULL, &dev->cdev.dev, match_devt); + clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev); if (clsdev) { sysfs_notify(&clsdev->kobj, NULL, "dev_state"); put_device(clsdev); diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 57cb68f5cc64..541538eff8b1 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,6 +98,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 8840299420e0..5e6be1527571 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b, } if (page) { - vmballoon_mark_page_offline(page, ctl->page_size); /* Success. Add the page to the list and continue. */ list_add(&page->lru, &ctl->pages); continue; @@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list, list_for_each_entry_safe(page, tmp, page_list, lru) { list_del(&page->lru); - vmballoon_mark_page_online(page, page_size); __free_pages(page, vmballoon_page_order(page_size)); } @@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b, enum vmballoon_page_size_type page_size) { unsigned long flags; + struct page *page; if (page_size == VMW_BALLOON_4K_PAGE) { balloon_page_list_enqueue(&b->b_dev_info, pages); @@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b, * for the balloon compaction mechanism. */ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); + + list_for_each_entry(page, pages, lru) { + vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE); + } + list_splice_init(pages, &b->huge_pages); __count_vm_events(BALLOON_INFLATE, *n_pages * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); @@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b, /* 2MB pages */ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { + vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE); + list_move(&page->lru, pages); if (++i == n_req_pages) break; diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index bad89b6e0802..345addd9306d 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) entry = container_of(resource, struct dbell_entry, resource); if (entry->run_delayed) { - schedule_work(&entry->work); + if (!schedule_work(&entry->work)) + vmci_resource_put(resource); } else { entry->notify_cb(entry->client_data); vmci_resource_put(resource); @@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx) atomic_read(&dbell->active) == 1) { if (dbell->run_delayed) { vmci_resource_get(&dbell->resource); - schedule_work(&dbell->work); + if (!schedule_work(&dbell->work)) + vmci_resource_put(&dbell->resource); } else { dbell->notify_cb(dbell->client_data); } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index d681e8aaca83..fe914ff5f5d6 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host) goto err; } + /* + * Some SD cards claims an out of spec VDD voltage range. Let's treat + * these bits as being in-valid and especially also bit7. + */ + ocr &= ~0x7FFF; + rocr = mmc_select_voltage(host, ocr); /* diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 163d1cf4367e..44139fceac24 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; host->mmc_host_ops.hs400_enhanced_strobe = sdhci_cdns_hs400_enhanced_strobe; + sdhci_enable_v4_mode(host); sdhci_get_of_property(pdev); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d4e7e8b7be77..e7d1920729fb 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, 50); pm_runtime_use_autosuspend(&pdev->dev); + /* HS200 is broken at this moment */ + host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200; + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 83a4767ca680..d07b9793380f 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); u32 div, val, mask; - div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); - clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); - sdhci_enable_clk(host, clk); + div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); + div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); + sdhci_enable_clk(host, div); /* enable auto gate sdhc_enable_auto_gate */ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); @@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host) return 1 << 31; } +static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host) +{ + return 0; +} + static struct sdhci_ops sdhci_sprd_ops = { .read_l = sdhci_sprd_readl, .write_l = sdhci_sprd_writel, @@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = { .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, .hw_reset = sdhci_sprd_hw_reset, .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, + .get_ro = sdhci_sprd_get_ro, }; static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host, } static const struct sdhci_pltfm_data sdhci_sprd_pdata = { - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_MISSING_CAPS, .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | - SDHCI_QUIRK2_USE_32BIT_BLK_CNT, + SDHCI_QUIRK2_USE_32BIT_BLK_CNT | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .ops = &sdhci_sprd_ops, }; @@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev) sdhci_enable_v4_mode(host); + /* + * Supply the existing CAPS, but clear the UHS-I modes. This + * will allow these modes to be specified only by device + * tree properties through mmc_of_parse(). + */ + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + ret = sdhci_setup_host(host); if (ret) goto pm_runtime_disable; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index f4d4761cf20a..02d8f524bb9e 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) } } +static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) +{ + /* + * Write-enable shall be assumed if GPIO is missing in a board's + * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on + * Tegra. + */ + return mmc_gpio_get_ro(host->mmc); +} + static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { }; static const struct sdhci_ops tegra_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, @@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = { }; static const struct sdhci_ops tegra114_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = { }; static const struct sdhci_ops tegra210_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra210_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { }; static const struct sdhci_ops tegra186_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig index b4e3caf7d799..a4d8968d133d 100644 --- a/drivers/mtd/hyperbus/Kconfig +++ b/drivers/mtd/hyperbus/Kconfig @@ -1,5 +1,6 @@ menuconfig MTD_HYPERBUS tristate "HyperBus support" + depends on HAS_IOMEM select MTD_CFI select MTD_MAP_BANK_WIDTH_2 select MTD_CFI_AMDSTD diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 895510d40ce4..47602af4ee34 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r default: printk(KERN_WARNING "SA1100 flash: unknown base address " "0x%08lx, assuming CS0\n", phys); + /* Fall through */ case SA1100_CS0_PHYS: subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; diff --git a/drivers/mux/core.c b/drivers/mux/core.c index d1271c1ee23c..1fb22388e7e0 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -405,17 +405,12 @@ int mux_control_deselect(struct mux_control *mux) } EXPORT_SYMBOL_GPL(mux_control_deselect); -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* Note this function returns a reference to the mux_chip dev. */ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) { struct device *dev; - dev = class_find_device(&mux_class, NULL, np, of_dev_node_match); + dev = class_find_device_by_of_node(&mux_class, np); return dev ? to_mux_chip(dev) : NULL; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 02fd7822c14a..931d9d935686 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond) done: bond_dev->vlan_features = vlan_features; bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; bond_dev->mpls_features = mpls_features; bond_dev->gso_max_segs = gso_max_segs; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 3811fdbda13e..28c963a21dac 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) { + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; if (!phy_interface_mode_is_rgmii(state->interface) && @@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, state->interface != PHY_INTERFACE_MODE_INTERNAL && state->interface != PHY_INTERFACE_MODE_MOCA) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - dev_err(ds->dev, - "Unsupported interface: %d\n", state->interface); + if (port != core_readl(priv, CORE_IMP0_PRT_ID)) + dev_err(ds->dev, + "Unsupported interface: %d for port %d\n", + state->interface, port); return; } @@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, u32 id_mode_dis = 0, port_mode; u32 reg, offset; + if (port == core_readl(priv, CORE_IMP0_PRT_ID)) + return; + if (priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); else diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 5a9e27b337a8..098b01e4ed1a 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9897" }, { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9563" }, + { .compatible = "microchip,ksz8563" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index ee7096d8af07..72ec250b9540 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, #define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ { \ + .name = #width, \ .val_bits = (width), \ .reg_stride = (width) / 8, \ .reg_bits = (regbits) + (regalign), \ diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index d073baffc20b..df976b259e43 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1223,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; - u16 rx_vid, tx_vid; int i; - rx_vid = dsa_8021q_rx_vid(ds, port); - tx_vid = dsa_8021q_tx_vid(ds, port); - for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { struct sja1105_l2_lookup_entry l2_lookup = {0}; u8 macaddr[ETH_ALEN]; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index b41f23679a08..7ce9c69e9c44 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void) ret = xgbe_platform_init(); if (ret) - return ret; + goto err_platform_init; ret = xgbe_pci_init(); if (ret) - return ret; + goto err_pci_init; return 0; + +err_pci_init: + xgbe_platform_exit(); +err_platform_init: + unregister_netdevice_notifier(&xgbe_netdev_notifier); + return ret; } static void __exit xgbe_mod_exit(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index 440690b18734..aee827f07c16 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id) if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) break; } - if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { + if (rule && rule->type == aq_rx_filter_vlan && + be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { struct ethtool_rxnfc cmd; cmd.fs.location = rule->aq_fsp.location; @@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic) return err; if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { + if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) { err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, !(aq_nic->packet_filter & IFF_PROMISC)); aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 100722ad5c2d..b4a0fb281e69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev) if (err < 0) goto err_exit; + err = aq_filters_vlans_update(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); if (err < 0) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index e1392766e21e..8f66e7817811 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self) self->aq_nic_cfg.link_irq_vec); err = request_threaded_irq(irqvec, NULL, aq_linkstate_threaded_isr, - IRQF_SHARED, + IRQF_SHARED | IRQF_ONESHOT, self->ndev->name, self); if (err < 0) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 715685aa48c3..28892b8acd0e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) } } +err_exit: if (!was_tx_cleaned) work_done = budget; @@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) 1U << self->aq_ring_param.vec_idx); } } -err_exit: + return work_done; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e47ea92e2ae3..d10b421ed1f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ - if (IS_VF(bp)) + if (IS_VF(bp)) { + bnx2x_clear_vlan_info(bp); bnx2x_vfpf_close_vf(bp); - else if (unload_mode != UNLOAD_RECOVERY) + } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip*/ bnx2x_chip_cleanup(bp, unload_mode, keep_link); - else { + } else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c2f6e44e9a3f..8b08cb18e363 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp); void bnx2x_disable_close_the_gate(struct bnx2x *bp); int bnx2x_init_hw_func_cnic(struct bnx2x *bp); +void bnx2x_clear_vlan_info(struct bnx2x *bp); + /** * bnx2x_sp_event - handle ramrods completion. * diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2cc14db8f0ec..192ff8d5da32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, return rc; } +void bnx2x_clear_vlan_info(struct bnx2x *bp) +{ + struct bnx2x_vlan_entry *vlan; + + /* Mark that hw forgot all entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) + vlan->hw = false; + + bp->vlan_cnt = 0; +} + static int bnx2x_del_all_vlans(struct bnx2x *bp) { struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; unsigned long ramrod_flags = 0, vlan_flags = 0; - struct bnx2x_vlan_entry *vlan; int rc; __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); @@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp) if (rc) return rc; - /* Mark that hw forgot all entries */ - list_for_each_entry(vlan, &bp->vlan_reg, link) - vlan->hw = false; - bp->vlan_cnt = 0; + bnx2x_clear_vlan_info(bp); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7070349915bc..8dce4069472b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2021,9 +2021,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) if (bnapi->events & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (bnapi->events & BNXT_AGG_EVENT) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); } bnapi->events = 0; } @@ -5064,6 +5064,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { + bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); int i, rc = 0; u32 type; @@ -5139,7 +5140,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) if (rc) goto err_out; bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + /* If we have agg rings, post agg buffers first. */ + if (!agg_rings) + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -5158,7 +5161,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } } - if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (agg_rings) { type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; @@ -5174,6 +5177,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, ring->fw_ring_id); bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } @@ -7016,19 +7020,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) bnxt_hwrm_vnic_set_rss(bp, i, false); } -static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, - bool irq_re_init) +static void bnxt_clear_vnic(struct bnxt *bp) { - if (bp->vnic_info) { - bnxt_hwrm_clear_vnic_filter(bp); + if (!bp->vnic_info) + return; + + bnxt_hwrm_clear_vnic_filter(bp); + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { /* clear all RSS setting before free vnic ctx */ bnxt_hwrm_clear_vnic_rss(bp); bnxt_hwrm_vnic_ctx_free(bp); - /* before free the vnic, undo the vnic tpa settings */ - if (bp->flags & BNXT_FLAG_TPA) - bnxt_set_tpa(bp, false); - bnxt_hwrm_vnic_free(bp); } + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + bnxt_hwrm_vnic_ctx_free(bp); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + bnxt_clear_vnic(bp); bnxt_hwrm_ring_free(bp, close_path); bnxt_hwrm_ring_grp_free(bp); if (irq_re_init) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 549c90d3e465..c05d663212b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); - if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) + if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { memcpy(data_addr, buf, bytesize); - - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + } else { + rc = hwrm_send_message_silent(bp, msg, msg_len, + HWRM_CMD_TIMEOUT); + } if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) memcpy(buf, data_addr, bytesize); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index c7ee63d69679..8445a0cce849 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev, mutex_lock(&bp->hwrm_cmd_lock); hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) - goto flash_pkg_exit; - - if (resp->error_code) { + if (hwrm_err) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; - if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + if (resp->error_code && error_code == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) - goto flash_pkg_exit; } + if (hwrm_err) + goto flash_pkg_exit; } if (resp->result) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 6fe4a7174271..dd621f6bd127 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, u16 src_fid) { - flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; + flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; } static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, @@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, goto free_node; bnxt_tc_set_src_fid(bp, flow, src_fid); - - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - bnxt_tc_set_flow_dir(bp, flow, src_fid); + bnxt_tc_set_flow_dir(bp, flow, flow->src_fid); if (!bnxt_tc_can_offload(bp, flow)) { rc = -EOPNOTSUPP; @@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp, * 2. 15th bit of flow_handle must specify the flow * direction (TX/RX). */ - if (flow_node->flow.dir == BNXT_DIR_RX) + if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index ffec57d1a5ec..4f05305052f2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -23,6 +23,9 @@ struct bnxt_tc_l2_key { __be16 inner_vlan_tci; __be16 ether_type; u8 num_vlans; + u8 dir; +#define BNXT_DIR_RX 1 +#define BNXT_DIR_TX 0 }; struct bnxt_tc_l3_key { @@ -98,9 +101,6 @@ struct bnxt_tc_flow { /* flow applicable to pkts ingressing on this fid */ u16 src_fid; - u8 dir; -#define BNXT_DIR_RX 1 -#define BNXT_DIR_TX 0 struct bnxt_tc_l2_key l2_key; struct bnxt_tc_l2_key l2_mask; struct bnxt_tc_l3_key l3_key; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index d3a0b614dbfa..b22196880d6d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .set_coalesce = bcmgenet_set_coalesce, .get_link_ksettings = bcmgenet_get_link_ksettings, .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, }; /* Power down the unimac, based on mode. */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5ca17e62dc3e..35b59b5edf0f 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,emac", .data = &emac_config }, { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, - { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, + { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 73632b843749..b821c9e1604c 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -10,7 +10,7 @@ #include "cavium_ptp.h" -#define DRV_NAME "Cavium PTP Driver" +#define DRV_NAME "cavium_ptp" #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 032224178b64..6dd65f9b347c 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct, } oct->num_iqs++; - if (oct->fn_list.enable_io_queues(oct)) + if (oct->fn_list.enable_io_queues(oct)) { + octeon_delete_instr_queue(oct, iq_no); return 1; + } return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 02959035ed3f..d692251ee252 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, return -ENOMEM; err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); - if (err) + if (err) { + kvfree(t); return err; + } bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); kvfree(t); diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h index 133acca0bf31..092da2d90026 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.h +++ b/drivers/net/ethernet/ezchip/nps_enet.h @@ -167,7 +167,7 @@ struct nps_enet_priv { }; /** - * nps_reg_set - Sets ENET register with provided value. + * nps_enet_reg_set - Sets ENET register with provided value. * @priv: Pointer to EZchip ENET private data structure. * @reg: Register offset from base address. * @value: Value to set in register. @@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv, } /** - * nps_reg_get - Gets value of specified ENET register. + * nps_enet_reg_get - Gets value of specified ENET register. * @priv: Pointer to EZchip ENET private data structure. * @reg: Register offset from base address. * diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index 2fd2586e42bf..bc594892507a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev, n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); if (n != 1) { err = -EPERM; - goto err_irq; + goto err_irq_vectors; } ptp_qoriq->irq = pci_irq_vector(pdev, 0); @@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, err_no_clock: free_irq(ptp_qoriq->irq, ptp_qoriq); err_irq: + pci_free_irq_vectors(pdev); +err_irq_vectors: iounmap(base); err_ioremap: kfree(ptp_qoriq); @@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev) enetc_phc_index = -1; ptp_qoriq_free(ptp_qoriq); + pci_free_irq_vectors(pdev); kfree(ptp_qoriq); pci_release_mem_regions(pdev); diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 497298752381..aca95f64bde8 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) u64_stats_fetch_begin(&priv->tx[ring].statss); s->tx_packets += priv->tx[ring].pkt_done; s->tx_bytes += priv->tx[ring].bytes_done; - } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, start)); } } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index bb6586d0e5af..ed3829ae4ef1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -754,17 +754,11 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) return (void *)misc_op; } -static int hns_dsaf_dev_match(struct device *dev, const void *fwnode) -{ - return dev->fwnode == fwnode; -} - struct platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode) { struct device *dev; - dev = bus_find_device(&platform_bus_type, NULL, - fwnode, hns_dsaf_dev_match); + dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); return dev ? to_platform_device(dev) : NULL; } diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index d654c234aaf7..c5be4ebd8437 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct net_device *netdev; struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; - unsigned int *mcastFilterSize_p; + __be32 *mcastFilterSize_p; long ret; unsigned long ret_attr; @@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) return -EINVAL; } - mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, - VETH_MCAST_FILTER_SIZE, NULL); + mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, + VETH_MCAST_FILTER_SIZE, + NULL); if (!mcastFilterSize_p) { dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " "attribute\n"); @@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; - adapter->mcastFilterSize = *mcastFilterSize_p; + adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); adapter->pool_config = 0; netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3da680073265..fa4bb940665c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); + dma_unmap_single(dev, tx_buff->indir_dma, + sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); } else { tx_buff->num_entries = num_entries; lpar_rc = send_subcrq(adapter, handle_array[queue_num], @@ -1981,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); while (rwi) { + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) + goto out; + if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; rc = do_hard_reset(adapter, rwi, reset_state); @@ -2005,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work) netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } - +out: adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); @@ -2788,7 +2794,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, union sub_crq *next; int index; int i, j; - u8 *first; restart_loop: while (pending_scrq(adapter, scrq)) { @@ -2818,14 +2823,6 @@ restart_loop: txbuff->data_dma[j] = 0; } - /* if sub_crq was sent indirectly */ - first = &txbuff->indir_arr[0].generic.first; - if (*first == IBMVNIC_CRQ_CMD) { - dma_unmap_single(dev, txbuff->indir_dma, - sizeof(txbuff->indir_arr), - DMA_TO_DEVICE); - *first = 0; - } if (txbuff->last_frag) { dev_kfree_skb_any(txbuff->skb); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cbaf712d6529..7882148abb43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7897,11 +7897,8 @@ static void ixgbe_service_task(struct work_struct *work) return; } if (ixgbe_check_fw_error(adapter)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - rtnl_lock(); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) unregister_netdev(adapter->netdev); - rtnl_unlock(); - } ixgbe_service_event_complete(adapter); return; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index a01c75ede871..e0363870f3a5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4931,6 +4931,13 @@ static const struct dmi_system_id msi_blacklist[] = { DMI_MATCH(DMI_BOARD_NAME, "P6T"), }, }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, {} }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 6c01314e87b0..db3552f2d087 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); - goto rss_err; + goto qp_alloc_err; } rss_map->indir_qp->event = mlx4_en_sqp_event; @@ -1241,6 +1241,7 @@ indir_err: MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); mlx4_qp_remove(mdev->dev, rss_map->indir_qp); mlx4_qp_free(mdev->dev, rss_map->indir_qp); +qp_alloc_err: kfree(rss_map->indir_qp); rss_map->indir_qp = NULL; rss_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ce1be2a84231..65bec19a438f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; + union { + struct { + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; + }; + u8 tls_progress_params_ctx[0]; + }; }; struct mlx5e_rx_wqe_ll { @@ -1100,6 +1105,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info); +int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, + struct ethtool_flash *flash); void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, struct ethtool_pauseparam *pauseparam); int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index f3d98748b211..c7f86453c638 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) u8 state; int err; - if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - return 0; - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); if (err) { netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", sq->sqn, err); - return err; + goto out; } - if (state != MLX5_SQC_STATE_ERR) { - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); - return -EINVAL; - } + if (state != MLX5_SQC_STATE_ERR) + goto out; mlx5e_tx_disable_queue(sq->txq); err = mlx5e_wait_for_sq_flush(sq); if (err) - return err; + goto out; /* At this point, no new packets will arrive from the stack as TXQ is * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all @@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) err = mlx5e_sq_to_ready(sq, state); if (err) - return err; + goto out; mlx5e_reset_txqsq_cc_pc(sq); sq->stats->recover++; + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); mlx5e_activate_txqsq(sq); return 0; +out: + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); + return err; } static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index aaffa6f68dc0..7f78c004d12f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -143,7 +143,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c) { set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); /* TX queue is created active. */ + + spin_lock(&c->xskicosq_lock); mlx5e_trigger_irq(&c->xskicosq); + spin_unlock(&c->xskicosq_lock); } void mlx5e_deactivate_xsk(struct mlx5e_channel *c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 407da83474ef..b7298f9ee3d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -11,12 +11,14 @@ #include "accel/tls.h" #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ - (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) + (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \ + MLX5_ST_SZ_BYTES(tls_static_params)) #define MLX5E_KTLS_STATIC_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) #define MLX5E_KTLS_PROGRESS_WQE_SZ \ - (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) + (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ + MLX5_ST_SZ_BYTES(tls_progress_params)) #define MLX5E_KTLS_PROGRESS_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) #define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 3766545ce259..7833ddef0427 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | STATIC_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - cseg->imm = cpu_to_be32(priv_tx->tisn); + cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); ucseg->flags = MLX5_UMR_INLINE; ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); @@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, static void fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); + MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn); MLX5_SET(tls_progress_params, ctx, record_tracker_state, MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); MLX5_SET(tls_progress_params, ctx, auth_state, @@ -104,18 +104,20 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, PROGRESS_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - fill_progress_params_ctx(wqe->data, priv_tx); + fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx); } static void tx_fill_wi(struct mlx5e_txqsq *sq, u16 pi, u8 num_wqebbs, - skb_frag_t *resync_dump_frag) + skb_frag_t *resync_dump_frag, + u32 num_bytes) { struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; wi->skb = NULL; wi->num_wqebbs = num_wqebbs; wi->resync_dump_frag = resync_dump_frag; + wi->num_bytes = num_bytes; } void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) @@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq, umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); - tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); + tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0); sq->pc += MLX5E_KTLS_STATIC_WQEBBS; } @@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq, wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); - tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); + tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0); sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; } @@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); } +struct mlx5e_dump_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_data_seg data; +}; + static int tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, skb_frag_t *frag, u32 tisn, bool first) { struct mlx5_wqe_ctrl_seg *cseg; - struct mlx5_wqe_eth_seg *eseg; struct mlx5_wqe_data_seg *dseg; - struct mlx5e_tx_wqe *wqe; + struct mlx5e_dump_wqe *wqe; dma_addr_t dma_addr = 0; - u16 ds_cnt, ds_cnt_inl; u8 num_wqebbs; - u16 pi, ihs; + u16 ds_cnt; int fsz; - - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); - ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); - ds_cnt += ds_cnt_inl; - ds_cnt += 1; /* one frag */ + u16 pi; wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); cseg = &wqe->ctrl; - eseg = &wqe->eth; - dseg = wqe->data; + dseg = &wqe->data; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - cseg->imm = cpu_to_be32(tisn); + cseg->tisn = cpu_to_be32(tisn << 8); cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - eseg->inline_hdr.sz = cpu_to_be16(ihs); - memcpy(eseg->inline_hdr.start, skb->data, ihs); - dseg += ds_cnt_inl; - fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); @@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); - tx_fill_wi(sq, pi, num_wqebbs, frag); + tx_fill_wi(sq, pi, num_wqebbs, frag, fsz); sq->pc += num_wqebbs; WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, @@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - tx_fill_wi(sq, pi, 1, NULL); + tx_fill_wi(sq, pi, 1, NULL, 0); mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); } @@ -434,7 +430,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, priv_tx->expected_seq = seq + datalen; cseg = &(*wqe)->ctrl; - cseg->imm = cpu_to_be32(priv_tx->tisn); + cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; stats->tls_encrypted_bytes += datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 8657e0f26995..2c75b2752f58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, return &arfs_t->rules_hash[bucket_idx]; } -static u8 arfs_get_ip_proto(const struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) ? - ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; -} - static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, u8 ip_proto, __be16 etype) { @@ -602,31 +596,9 @@ out: arfs_may_expire_flow(priv); } -/* return L4 destination port from ip4/6 packets */ -static __be16 arfs_get_dst_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->dest; - return ((struct udphdr *)transport_header)->dest; -} - -/* return L4 source port from ip4/6 packets */ -static __be16 arfs_get_src_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->source; - return ((struct udphdr *)transport_header)->source; -} - static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, struct arfs_table *arfs_t, - const struct sk_buff *skb, + const struct flow_keys *fk, u16 rxq, u32 flow_id) { struct arfs_rule *rule; @@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, INIT_WORK(&rule->arfs_work, arfs_handle_work); tuple = &rule->tuple; - tuple->etype = skb->protocol; + tuple->etype = fk->basic.n_proto; + tuple->ip_proto = fk->basic.ip_proto; if (tuple->etype == htons(ETH_P_IP)) { - tuple->src_ipv4 = ip_hdr(skb)->saddr; - tuple->dst_ipv4 = ip_hdr(skb)->daddr; + tuple->src_ipv4 = fk->addrs.v4addrs.src; + tuple->dst_ipv4 = fk->addrs.v4addrs.dst; } else { - memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src, sizeof(struct in6_addr)); - memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, sizeof(struct in6_addr)); } - tuple->ip_proto = arfs_get_ip_proto(skb); - tuple->src_port = arfs_get_src_port(skb); - tuple->dst_port = arfs_get_dst_port(skb); + tuple->src_port = fk->ports.src; + tuple->dst_port = fk->ports.dst; rule->flow_id = flow_id; rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; @@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, return rule; } -static bool arfs_cmp_ips(struct arfs_tuple *tuple, - const struct sk_buff *skb) +static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk) { - if (tuple->etype == htons(ETH_P_IP) && - tuple->src_ipv4 == ip_hdr(skb)->saddr && - tuple->dst_ipv4 == ip_hdr(skb)->daddr) - return true; - if (tuple->etype == htons(ETH_P_IPV6) && - (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, - sizeof(struct in6_addr))) && - (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, - sizeof(struct in6_addr)))) - return true; + if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst) + return false; + if (tuple->etype != fk->basic.n_proto) + return false; + if (tuple->etype == htons(ETH_P_IP)) + return tuple->src_ipv4 == fk->addrs.v4addrs.src && + tuple->dst_ipv4 == fk->addrs.v4addrs.dst; + if (tuple->etype == htons(ETH_P_IPV6)) + return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); return false; } static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, - const struct sk_buff *skb) + const struct flow_keys *fk) { struct arfs_rule *arfs_rule; struct hlist_head *head; - __be16 src_port = arfs_get_src_port(skb); - __be16 dst_port = arfs_get_dst_port(skb); - head = arfs_hash_bucket(arfs_t, src_port, dst_port); + head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst); hlist_for_each_entry(arfs_rule, head, hlist) { - if (arfs_rule->tuple.src_port == src_port && - arfs_rule->tuple.dst_port == dst_port && - arfs_cmp_ips(&arfs_rule->tuple, skb)) { + if (arfs_cmp(&arfs_rule->tuple, fk)) return arfs_rule; - } } return NULL; @@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_table *arfs_t; struct arfs_rule *arfs_rule; + struct flow_keys fk; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; - if (skb->protocol != htons(ETH_P_IP) && - skb->protocol != htons(ETH_P_IPV6)) + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; if (skb->encapsulation) return -EPROTONOSUPPORT; - arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); + arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto); if (!arfs_t) return -EPROTONOSUPPORT; spin_lock_bh(&arfs->arfs_lock); - arfs_rule = arfs_find_rule(arfs_t, skb); + arfs_rule = arfs_find_rule(arfs_t, &fk); if (arfs_rule) { if (arfs_rule->rxq == rxq_index) { spin_unlock_bh(&arfs->arfs_lock); @@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } arfs_rule->rxq = rxq_index; } else { - arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, - rxq_index, flow_id); + arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id); if (!arfs_rule) { spin_unlock_bh(&arfs->arfs_lock); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 03bed714bac3..20e628c907e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : mlx5e_port_speed2linkmodes(mdev, speed, !ext); + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && + autoneg != AUTONEG_ENABLE) { + netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", + __func__); + err = -EINVAL; + goto out; + } + link_modes = link_modes & eproto.cap; if (!link_modes) { netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", @@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; int err; + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return -EOPNOTSUPP; + if (pauseparam->autoneg) return -EINVAL; @@ -1679,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } +int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, + struct ethtool_flash *flash) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct net_device *dev = priv->netdev; + const struct firmware *fw; + int err; + + if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) + return -EOPNOTSUPP; + + err = request_firmware_direct(&fw, flash->data, &dev->dev); + if (err) + return err; + + dev_hold(dev); + rtnl_unlock(); + + err = mlx5_firmware_flash(mdev, fw, NULL); + release_firmware(fw); + + rtnl_lock(); + dev_put(dev); + return err; +} + +static int mlx5e_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_flash_device(priv, flash); +} + static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, bool is_rx_cq) { @@ -1961,6 +2006,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .flash_device = mlx5e_flash_device, .get_priv_flags = mlx5e_get_priv_flags, .set_priv_flags = mlx5e_set_priv_flags, .self_test = mlx5e_self_test, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6c712c5be4d8..9d5f6e56188f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1321,7 +1321,6 @@ err_free_txqsq: void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); - clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7ecfc53cf5f6..00b2d4a86159 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_cls_offload *f, struct net_device *filter_dev, - u8 *match_level, u8 *tunnel_match_level) + u8 *inner_match_level, u8 *outer_match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; u8 ip_proto = 0; + u8 *match_level; - *match_level = MLX5_MATCH_NONE; + match_level = outer_match_level; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_META) | @@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } if (mlx5e_get_tc_tun(filter_dev)) { - if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) + if (parse_tunnel_attr(priv, spec, f, filter_dev, + outer_match_level)) return -EOPNOTSUPP; - /* In decap flow, header pointers should point to the inner + /* At this point, header pointers should point to the inner * headers, outer header were already set by parse_tunnel_attr */ + match_level = inner_match_level; headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, spec); headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, @@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct flow_cls_offload *f, struct net_device *filter_dev) { + u8 inner_match_level, outer_match_level, non_tunnel_match_level; struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; - u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; int err; - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); + inner_match_level = MLX5_MATCH_NONE; + outer_match_level = MLX5_MATCH_NONE; + + err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level, + &outer_match_level); + non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? + outer_match_level : inner_match_level; if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < match_level)) { + esw->offloads.inline_mode < non_tunnel_match_level)) { NL_SET_ERR_MSG_MOD(extack, "Flow is not offloaded due to min inline setting"); netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", - match_level, esw->offloads.inline_mode); + non_tunnel_match_level, esw->offloads.inline_mode); return -EOPNOTSUPP; } } if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - flow->esw_attr->match_level = match_level; - flow->esw_attr->tunnel_match_level = tunnel_match_level; + flow->esw_attr->inner_match_level = inner_match_level; + flow->esw_attr->outer_match_level = outer_match_level; } else { - flow->nic_attr->match_level = match_level; + flow->nic_attr->match_level = non_tunnel_match_level; } return err; @@ -3158,7 +3167,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, esw_attr->parse_attr = parse_attr; esw_attr->chain = f->common.chain_index; - esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; + esw_attr->prio = f->common.prio; esw_attr->in_rep = in_rep; esw_attr->in_mdev = in_mdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a38e8a3c7c9a..04685dbb280c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr { struct mlx5_termtbl_handle *termtbl; } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; - u8 match_level; - u8 tunnel_match_level; + u8 inner_match_level; + u8 outer_match_level; struct mlx5_fc *counter; u32 chain; u16 prio; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 089ae4d48a82..0323fd078271 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(esw, spec, attr); - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { - if (attr->tunnel_match_level != MLX5_MATCH_NONE) - spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - if (attr->match_level != MLX5_MATCH_NONE) - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - } else if (attr->match_level != MLX5_MATCH_NONE) { + if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - } + if (attr->inner_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; @@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(esw, spec, attr); spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; - if (attr->match_level != MLX5_MATCH_NONE) + if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9314777d99e3..d685122d9ff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, data_size = crdump_size - offset; else data_size = MLX5_CR_DUMP_CHUNK_SIZE; - err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); + err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, + data_size); if (err) goto free_data; } @@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t) if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; + fatal_error = check_fatal_sensors(dev); + + if (fatal_error && !health->fatal_error) { + mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); + dev->priv.health.fatal_error = fatal_error; + print_health_info(dev); + mlx5_trigger_health_work(dev); + goto out; + } + count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; @@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t) if (health->synd && health->synd != prev_synd) queue_work(health->wq, &health->report_work); - fatal_error = check_fatal_sensors(dev); - - if (fatal_error && !health->fatal_error) { - mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); - dev->priv.health.fatal_error = fatal_error; - print_health_info(dev); - mlx5_trigger_health_work(dev); - } - out: mod_timer(&health->timer, get_next_poll_jiffies()); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index ebd81f6b556e..90cb50fe17fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev, return mlx5e_ethtool_get_ts_info(priv, info); } +static int mlx5i_flash_device(struct net_device *netdev, + struct ethtool_flash *flash) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + + return mlx5e_ethtool_flash_device(priv, flash); +} + enum mlx5_ptys_width { MLX5_PTYS_WIDTH_1X = 1 << 0, MLX5_PTYS_WIDTH_2X = 1 << 1, @@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = { .get_ethtool_stats = mlx5i_get_ethtool_stats, .get_ringparam = mlx5i_get_ringparam, .set_ringparam = mlx5i_set_ringparam, + .flash_device = mlx5i_flash_device, .get_channels = mlx5i_get_channels, .set_channels = mlx5i_set_channels, .get_coalesce = mlx5i_get_coalesce, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index ea9ee88491e5..ea1d4d26ece0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, case 128: general_obj_key_size = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; + key_p += sz_bytes; break; case 256: general_obj_key_size = diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index e8ac90564dbe..84a87d059333 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -471,7 +471,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, unsigned int priority) { - rulei->priority = priority >> 16; + rulei->priority = priority; } void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index 63b07edd9d81..38bb1cfe4e8c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -29,7 +29,7 @@ struct mlxsw_sp_ptp_state { struct mlxsw_sp *mlxsw_sp; - struct rhashtable unmatched_ht; + struct rhltable unmatched_ht; spinlock_t unmatched_lock; /* protects the HT */ struct delayed_work ht_gc_dw; u32 gc_cycle; @@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key { struct mlxsw_sp1_ptp_unmatched { struct mlxsw_sp1_ptp_key key; - struct rhash_head ht_node; + struct rhlist_head ht_node; struct rcu_head rcu; struct sk_buff *skb; u64 timestamp; @@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb, /* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on * error. */ -static struct mlxsw_sp1_ptp_unmatched * +static int mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_ptp_key key, struct sk_buff *skb, @@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL; struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state; struct mlxsw_sp1_ptp_unmatched *unmatched; - struct mlxsw_sp1_ptp_unmatched *conflict; + int err; unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC); if (!unmatched) - return ERR_PTR(-ENOMEM); + return -ENOMEM; unmatched->key = key; unmatched->skb = skb; unmatched->timestamp = timestamp; unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles; - conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht, - &unmatched->ht_node, - mlxsw_sp1_ptp_unmatched_ht_params); - if (conflict) + err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node, + mlxsw_sp1_ptp_unmatched_ht_params); + if (err) kfree(unmatched); - return conflict; + return err; } static struct mlxsw_sp1_ptp_unmatched * mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp1_ptp_key key) + struct mlxsw_sp1_ptp_key key, int *p_length) { - return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, - mlxsw_sp1_ptp_unmatched_ht_params); + struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL; + struct rhlist_head *tmp, *list; + int length = 0; + + list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, + mlxsw_sp1_ptp_unmatched_ht_params); + rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) { + last = unmatched; + length++; + } + + *p_length = length; + return last; } static int mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_ptp_unmatched *unmatched) { - return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht, - &unmatched->ht_node, - mlxsw_sp1_ptp_unmatched_ht_params); + return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht, + &unmatched->ht_node, + mlxsw_sp1_ptp_unmatched_ht_params); } /* This function is called in the following scenarios: @@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp1_ptp_key key, struct sk_buff *skb, u64 timestamp) { - struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict; + struct mlxsw_sp1_ptp_unmatched *unmatched; + int length; int err; rcu_read_lock(); - unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key); - spin_lock(&mlxsw_sp->ptp_state->unmatched_lock); - if (unmatched) { - /* There was an unmatched entry when we looked, but it may have - * been removed before we took the lock. - */ - err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched); - if (err) - unmatched = NULL; - } - - if (!unmatched) { - /* We have no unmatched entry, but one may have been added after - * we looked, but before we took the lock. - */ - unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, - skb, timestamp); - if (IS_ERR(unmatched)) { - if (skb) - mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, - key.local_port, - key.ingress, NULL); - unmatched = NULL; - } else if (unmatched) { - /* Save just told us, under lock, that the entry is - * there, so this has to work. - */ - err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, - unmatched); - WARN_ON_ONCE(err); - } - } - - /* If unmatched is non-NULL here, it comes either from the lookup, or - * from the save attempt above. In either case the entry was removed - * from the hash table. If unmatched is NULL, a new unmatched entry was - * added to the hash table, and there was no conflict. - */ - + unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length); if (skb && unmatched && unmatched->timestamp) { unmatched->skb = skb; } else if (timestamp && unmatched && unmatched->skb) { unmatched->timestamp = timestamp; - } else if (unmatched) { - /* unmatched holds an older entry of the same type: either an - * skb if we are handling skb, or a timestamp if we are handling - * timestamp. We can't match that up, so save what we have. + } else { + /* Either there is no entry to match, or one that is there is + * incompatible. */ - conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, - skb, timestamp); - if (IS_ERR(conflict)) { - if (skb) - mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, - key.local_port, - key.ingress, NULL); - } else { - /* Above, we removed an object with this key from the - * hash table, under lock, so conflict can not be a - * valid pointer. - */ - WARN_ON_ONCE(conflict); - } + if (length < 100) + err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, + skb, timestamp); + else + err = -E2BIG; + if (err && skb) + mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, + key.local_port, + key.ingress, NULL); + unmatched = NULL; + } + + if (unmatched) { + err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched); + WARN_ON_ONCE(err); } spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock); @@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state, local_bh_disable(); spin_lock(&ptp_state->unmatched_lock); - err = rhashtable_remove_fast(&ptp_state->unmatched_ht, - &unmatched->ht_node, - mlxsw_sp1_ptp_unmatched_ht_params); + err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node, + mlxsw_sp1_ptp_unmatched_ht_params); spin_unlock(&ptp_state->unmatched_lock); if (err) @@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work) ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw); gc_cycle = ptp_state->gc_cycle++; - rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter); + rhltable_walk_enter(&ptp_state->unmatched_ht, &iter); rhashtable_walk_start(&iter); while ((obj = rhashtable_walk_next(&iter))) { if (IS_ERR(obj)) @@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp) spin_lock_init(&ptp_state->unmatched_lock); - err = rhashtable_init(&ptp_state->unmatched_ht, - &mlxsw_sp1_ptp_unmatched_ht_params); + err = rhltable_init(&ptp_state->unmatched_ht, + &mlxsw_sp1_ptp_unmatched_ht_params); if (err) goto err_hashtable_init; @@ -891,7 +863,7 @@ err_fifo_clr: err_mtptpt1_set: mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); err_mtptpt_set: - rhashtable_destroy(&ptp_state->unmatched_ht); + rhltable_destroy(&ptp_state->unmatched_ht); err_hashtable_init: kfree(ptp_state); return ERR_PTR(err); @@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state) mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false); mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0); mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); - rhashtable_free_and_destroy(&ptp_state->unmatched_ht, - &mlxsw_sp1_ptp_unmatched_free_fn, NULL); + rhltable_free_and_destroy(&ptp_state->unmatched_ht, + &mlxsw_sp1_ptp_unmatched_free_fn, NULL); kfree(ptp_state); } diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c index 39aca1ab4687..86fc6e6b46dd 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.c +++ b/drivers/net/ethernet/mscc/ocelot_ace.c @@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data, break; case OCELOT_ACL_ACTION_TRAP: VCAP_ACT_SET(PORT_MASK, 0x0); - VCAP_ACT_SET(MASK_MODE, 0x0); + VCAP_ACT_SET(MASK_MODE, 0x1); VCAP_ACT_SET(POLICE_ENA, 0x0); VCAP_ACT_SET(POLICE_IDX, 0x0); VCAP_ACT_SET(CPU_QU_NUM, 0x0); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 59487d446a09..b894bc0c9c16 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -13,12 +13,6 @@ struct ocelot_port_block { struct ocelot_port *port; }; -static u16 get_prio(u32 prio) -{ - /* prio starts from 0x1000 while the ids starts from 0 */ - return prio >> 16; -} - static int ocelot_flower_parse_action(struct flow_cls_offload *f, struct ocelot_ace_rule *rule) { @@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, } finished_key_parsing: - ocelot_rule->prio = get_prio(f->common.prio); + ocelot_rule->prio = f->common.prio; ocelot_rule->id = f->cookie; return ocelot_flower_parse_action(f, ocelot_rule); } @@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f, struct ocelot_ace_rule rule; int ret; - rule.prio = get_prio(f->common.prio); + rule.prio = f->common.prio; rule.port = port_block->port; rule.id = f->cookie; @@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f, struct ocelot_ace_rule rule; int ret; - rule.prio = get_prio(f->common.prio); + rule.prio = f->common.prio; rule.port = port_block->port; rule.id = f->cookie; ret = ocelot_ace_rule_stats_update(&rule); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index d8b7fba96d58..337b0cbfd153 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * setup (if available). */ status = myri10ge_request_irq(mgp); if (status != 0) - goto abort_with_firmware; + goto abort_with_slices; myri10ge_free_irq(mgp); /* Save configuration space to be restored if the diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4054b70d7719..5afcb3c4c2ef 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; - bool first = true, last; + bool first = true, narrow_ld, last; bool needs_inc = false; swreg stack_off_reg; u8 prev_gpr = 255; @@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, needs_inc = true; } + + narrow_ld = clr_gpr && size < 8; + if (lm3) { + unsigned int nop_cnt; + emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); - /* For size < 4 one slot will be filled by zeroing of upper. */ - wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); + /* For size < 4 one slot will be filled by zeroing of upper, + * but be careful, that zeroing could be eliminated by zext + * optimization. + */ + nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3; + wrp_nops(nfp_prog, nop_cnt); } - if (clr_gpr && size < 8) + if (narrow_ld) wrp_zext(nfp_prog, meta, gpr); while (size) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index e209f150c5f2..457bdc60f3ee 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1409,13 +1409,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; struct flow_block_cb *block_cb; - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && - nfp_flower_internal_port_can_offload(app, netdev))) + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !nfp_flower_internal_port_can_offload(app, netdev)) || + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; switch (f->command) { case FLOW_BLOCK_BIND: + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); + if (cb_priv && + flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb, + cb_priv, + &nfp_block_cb_list)) + return -EBUSY; + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); if (!cb_priv) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index 86e968cd5ffd..124a43dc136a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } - if (flow->common.prio != (1 << 16)) { + if (flow->common.prio != 1) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index a7a80f4b722a..f0ee982eb1b5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, flow.daddr = *(__be32 *)n->primary_key; - /* Only concerned with route changes for representors. */ - if (!nfp_netdev_is_nfp_repr(n->dev)) - return NOTIFY_DONE; - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; + if (!nfp_netdev_is_nfp_repr(n->dev) && + !nfp_flower_internal_port_can_offload(app, n->dev)) + return NOTIFY_DONE; + /* Only concerned with changes to routes already added to NFP. */ if (!nfp_tun_has_route(app, flow.daddr)) return NOTIFY_DONE; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 829dd60ab937..1efff7f68ef6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, &drv_version); if (rc) { DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + goto err4; } } @@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, return 0; +err4: + qed_ll2_dealloc_if(cdev); err3: qed_hw_stop(cdev); err2: diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index e1dd6ea60d67..bae0074ab9aa 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, skb = napi_alloc_skb(&tp->napi, pkt_size); if (skb) skb_copy_to_linear_data(skb, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); return skb; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ef8f08931fe8..6cacd5e893ac 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Renesas Ethernet AVB device driver * - * Copyright (C) 2014-2015 Renesas Electronics Corporation + * Copyright (C) 2014-2019 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> * @@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) kfree(ts_skb); if (tag == tfa_tag) { skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); break; + } else { + dev_kfree_skb_any(skb); } } ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); @@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) } goto unmap; } - ts_skb->skb = skb; + ts_skb->skb = skb_get(skb); ts_skb->tag = priv->ts_skb_tag++; priv->ts_skb_tag &= 0x3ff; list_add_tail(&ts_skb->list, &priv->ts_skb_list); @@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev) /* Clear the timestamp list */ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); kfree(ts_skb); } diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 7a5e6c5abb57..276c7cae7cee 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev) printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; - goto err_out_free_page; + goto err_out_free_attrs; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; -err_out_free_page: - free_page((unsigned long) sp->srings); +err_out_free_attrs: + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); err_out_free_dev: free_netdev(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 4644b2aeeba1..e2e469c37a4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) int ret; struct device *dev = &bsp_priv->pdev->dev; - if (!ldo) { - dev_err(dev, "no regulator found\n"); - return -1; - } + if (!ldo) + return 0; if (enable) { ret = regulator_enable(ldo); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 37c0bc699cd9..6c305b6ecad0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv, struct stmmac_tc_entry *entry, *frag = NULL; struct tc_u32_sel *sel = cls->knode.sel; u32 off, data, mask, real_off, rem; - u32 prio = cls->common.prio; + u32 prio = cls->common.prio << 16; int ret; /* Only 1 match per entry */ diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 32a89744972d..a46b8b2e44e1 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev) if (!cpsw) return -ENOMEM; + platform_set_drvdata(pdev, cpsw); cpsw->dev = dev; mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); @@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_cpts; } - platform_set_drvdata(pdev, cpsw); priv = netdev_priv(ndev); priv->cpsw = cpsw; priv->ndev = ndev; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 8479a440527b..12466a72cefc 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit) pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) + if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0) memmove(skb->data, skb->data - NET_IP_ALIGN, pkt_len); data = skb_put(skb, pkt_len); diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 78a7de3fb622..c62f474b6d08 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, static void tsi108_stat_carry(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; u32 carry1, carry2; - spin_lock_irq(&data->misclock); + spin_lock_irqsave(&data->misclock, flags); carry1 = TSI_READ(TSI108_STAT_CARRY1); carry2 = TSI_READ(TSI108_STAT_CARRY2); @@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev) TSI108_STAT_TXPAUSEDROP_CARRY, &data->tx_pause_drop); - spin_unlock_irq(&data->misclock); + spin_unlock_irqrestore(&data->misclock, flags); } /* Read a stat counter atomically with respect to carries. diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3544e1991579..e8fce6d715ef 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + struct netvsc_device *nvdev; struct netvsc_vf_pcpu_stats vf_tot; int i; + rcu_read_lock(); + + nvdev = rcu_dereference(ndev_ctx->nvdev); if (!nvdev) - return; + goto out; netdev_stats_to_stats64(t, &net->stats); @@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } +out: + rcu_read_unlock(); } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index b41696e16bdc..c20e7ef18bc9 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, err = hwsim_subscribe_all_others(phy); if (err < 0) { mutex_unlock(&hwsim_phys_lock); - goto err_reg; + goto err_subscribe; } } list_add_tail(&phy->list, &hwsim_phys); @@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, return idx; +err_subscribe: + ieee802154_unregister_hw(phy->hw); err_reg: kfree(pib); err_pib: @@ -901,9 +903,9 @@ static __init int hwsim_init_module(void) return 0; platform_drv: - genl_unregister_family(&hwsim_genl_family); -platform_dev: platform_device_unregister(mac802154hwsim_dev); +platform_dev: + genl_unregister_family(&hwsim_genl_family); return rc; } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index c5c417a3c0ce..bcc40a236624 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -73,46 +73,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port) debugfs_remove_recursive(nsim_dev_port->ddir); } +static struct net *nsim_devlink_net(struct devlink *devlink) +{ + return &init_net; +} + static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); } static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); } static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); } static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); } static int nsim_dev_resources_register(struct devlink *devlink) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); struct devlink_resource_size_params params = { .size_max = (u64)-1, .size_granularity = 1, .unit = DEVLINK_RESOURCE_UNIT_ENTRY }; + struct net *net = nsim_devlink_net(devlink); int err; u64 n; @@ -126,8 +127,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) goto out; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4, ¶ms); @@ -136,8 +136,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV4, ¶ms); @@ -156,8 +155,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) goto out; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6, ¶ms); @@ -166,8 +164,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV6_FIB_RULES, NSIM_RESOURCE_IPV6, ¶ms); @@ -179,19 +176,19 @@ static int nsim_dev_resources_register(struct devlink *devlink) devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB, nsim_dev_ipv4_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB_RULES, nsim_dev_ipv4_fib_rules_res_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB, nsim_dev_ipv6_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB_RULES, nsim_dev_ipv6_fib_rules_res_occ_get, - nsim_dev); + net); out: return err; } @@ -199,11 +196,11 @@ out: static int nsim_dev_reload(struct devlink *devlink, struct netlink_ext_ack *extack) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES }; + struct net *net = nsim_devlink_net(devlink); int i; for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { @@ -212,8 +209,7 @@ static int nsim_dev_reload(struct devlink *devlink, err = devlink_resource_size_get(devlink, res_ids[i], &val); if (!err) { - err = nsim_fib_set_max(nsim_dev->fib_data, - res_ids[i], val, extack); + err = nsim_fib_set_max(net, res_ids[i], val, extack); if (err) return err; } @@ -285,15 +281,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) mutex_init(&nsim_dev->port_list_lock); nsim_dev->fw_update_status = true; - nsim_dev->fib_data = nsim_fib_create(); - if (IS_ERR(nsim_dev->fib_data)) { - err = PTR_ERR(nsim_dev->fib_data); - goto err_devlink_free; - } - err = nsim_dev_resources_register(devlink); if (err) - goto err_fib_destroy; + goto err_devlink_free; err = devlink_register(devlink, &nsim_bus_dev->dev); if (err) @@ -315,8 +305,6 @@ err_dl_unregister: devlink_unregister(devlink); err_resources_unregister: devlink_resources_unregister(devlink, NULL); -err_fib_destroy: - nsim_fib_destroy(nsim_dev->fib_data); err_devlink_free: devlink_free(devlink); return ERR_PTR(err); @@ -330,7 +318,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev) nsim_dev_debugfs_exit(nsim_dev); devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); - nsim_fib_destroy(nsim_dev->fib_data); mutex_destroy(&nsim_dev->port_list_lock); devlink_free(devlink); } diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 8c57ba747772..f61d094746c0 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -18,6 +18,7 @@ #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/fib_rules.h> +#include <net/netns/generic.h> #include "netdevsim.h" @@ -32,14 +33,15 @@ struct nsim_per_fib_data { }; struct nsim_fib_data { - struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; }; -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max) +static unsigned int nsim_fib_net_id; + +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) { + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; switch (res_id) { @@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, return max ? entry->max : entry->num; } -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack) { + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; int err = 0; @@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_rule_event(struct nsim_fib_data *data, - struct fib_notifier_info *info, bool add) +static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add) { + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_event(struct nsim_fib_data *data, - struct fib_notifier_info *info, bool add) +static int nsim_fib_event(struct fib_notifier_info *info, bool add) { + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data, static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { - struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, - fib_nb); struct fib_notifier_info *info = ptr; int err = 0; switch (event) { case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - err = nsim_fib_rule_event(data, info, - event == FIB_EVENT_RULE_ADD); + err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD); break; case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: - err = nsim_fib_event(data, info, - event == FIB_EVENT_ENTRY_ADD); + err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD); break; } @@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, /* inconsistent dump, trying again */ static void nsim_fib_dump_inconsistent(struct notifier_block *nb) { - struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, - fib_nb); + struct nsim_fib_data *data; + struct net *net; + + rcu_read_lock(); + for_each_net_rcu(net) { + data = net_generic(net, nsim_fib_net_id); + + data->ipv4.fib.num = 0ULL; + data->ipv4.rules.num = 0ULL; - data->ipv4.fib.num = 0ULL; - data->ipv4.rules.num = 0ULL; - data->ipv6.fib.num = 0ULL; - data->ipv6.rules.num = 0ULL; + data->ipv6.fib.num = 0ULL; + data->ipv6.rules.num = 0ULL; + } + rcu_read_unlock(); } -struct nsim_fib_data *nsim_fib_create(void) -{ - struct nsim_fib_data *data; - int err; +static struct notifier_block nsim_fib_nb = { + .notifier_call = nsim_fib_event_nb, +}; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); +/* Initialize per network namespace state */ +static int __net_init nsim_fib_netns_init(struct net *net) +{ + struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id); data->ipv4.fib.max = (u64)-1; data->ipv4.rules.max = (u64)-1; @@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void) data->ipv6.fib.max = (u64)-1; data->ipv6.rules.max = (u64)-1; - data->fib_nb.notifier_call = nsim_fib_event_nb; - err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); - if (err) { - pr_err("Failed to register fib notifier\n"); - goto err_out; - } + return 0; +} - return data; +static struct pernet_operations nsim_fib_net_ops = { + .init = nsim_fib_netns_init, + .id = &nsim_fib_net_id, + .size = sizeof(struct nsim_fib_data), +}; -err_out: - kfree(data); - return ERR_PTR(err); +void nsim_fib_exit(void) +{ + unregister_pernet_subsys(&nsim_fib_net_ops); + unregister_fib_notifier(&nsim_fib_nb); } -void nsim_fib_destroy(struct nsim_fib_data *data) +int nsim_fib_init(void) { - unregister_fib_notifier(&data->fib_nb); - kfree(data); + int err; + + err = register_pernet_subsys(&nsim_fib_net_ops); + if (err < 0) { + pr_err("Failed to register pernet subsystem\n"); + goto err_out; + } + + err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent); + if (err < 0) { + pr_err("Failed to register fib notifier\n"); + goto err_out; + } + +err_out: + return err; } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 0740940f41b1..55f57f76d01b 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -357,12 +357,18 @@ static int __init nsim_module_init(void) if (err) goto err_dev_exit; - err = rtnl_link_register(&nsim_link_ops); + err = nsim_fib_init(); if (err) goto err_bus_exit; + err = rtnl_link_register(&nsim_link_ops); + if (err) + goto err_fib_exit; + return 0; +err_fib_exit: + nsim_fib_exit(); err_bus_exit: nsim_bus_exit(); err_dev_exit: @@ -373,6 +379,7 @@ err_dev_exit: static void __exit nsim_module_exit(void) { rtnl_link_unregister(&nsim_link_ops); + nsim_fib_exit(); nsim_bus_exit(); nsim_dev_exit(); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 79c05af2a7c0..9404637d34b7 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -169,12 +169,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_index); -struct nsim_fib_data *nsim_fib_create(void); -void nsim_fib_destroy(struct nsim_fib_data *fib_data); -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_init(void); +void nsim_fib_exit(void); +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack); #if IS_ENABLED(CONFIG_XFRM_OFFLOAD) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 222ccd9ecfce..6ad8b1c63c34 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -257,36 +257,20 @@ static int at803x_config_init(struct phy_device *phydev) * after HW reset: RX delay enabled and TX delay disabled * after SW reset: RX delay enabled, while TX delay retains the * value before reset. - * - * So let's first disable the RX and TX delays in PHY and enable - * them based on the mode selected (this also takes care of RGMII - * mode where we expect delays to be disabled) */ - - ret = at803x_disable_rx_delay(phydev); - if (ret < 0) - return ret; - ret = at803x_disable_tx_delay(phydev); - if (ret < 0) - return ret; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || - phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - /* If RGMII_ID or RGMII_RXID are specified enable RX delay, - * otherwise keep it disabled - */ + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ret = at803x_enable_rx_delay(phydev); - if (ret < 0) - return ret; - } + else + ret = at803x_disable_rx_delay(phydev); + if (ret < 0) + return ret; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || - phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - /* If RGMII_ID or RGMII_TXID are specified enable TX delay, - * otherwise keep it disabled - */ + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) ret = at803x_enable_tx_delay(phydev); - } + else + ret = at803x_disable_tx_delay(phydev); return ret; } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index bd04fe762056..ce940871331e 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -262,11 +262,6 @@ static struct class mdio_bus_class = { }; #if IS_ENABLED(CONFIG_OF_MDIO) -/* Helper function for of_mdio_find_bus */ -static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) -{ - return dev->of_node == mdio_bus_np; -} /** * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. @@ -287,9 +282,7 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) if (!mdio_bus_np) return NULL; - d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np, - of_mdio_bus_match); - + d = class_find_device_by_of_node(&mdio_bus_class, mdio_bus_np); return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index b9d4145781ca..7935593debb1 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev) int val, devad; bool link = true; + if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (val < 0) + return val; + + /* Autoneg is being started, therefore disregard current + * link status and report link as down. + */ + if (val & MDIO_AN_CTRL1_RESTART) { + phydev->link = 0; + return 0; + } + } + while (mmd_mask && link) { devad = __ffs(mmd_mask); mmd_mask &= ~BIT(devad); @@ -509,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_status); +/** + * genphy_c45_config_aneg - restart auto-negotiation or forced setup + * @phydev: target phy_device struct + * + * Description: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we force a configuration. + */ +int genphy_c45_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} +EXPORT_SYMBOL_GPL(genphy_c45_config_aneg); + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ef7aa738e0dc..6b0f89369b46 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev) * allowed to call genphy_config_aneg() */ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) - return -EOPNOTSUPP; + return genphy_c45_config_aneg(phydev); return genphy_config_aneg(phydev); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 7ddd91df99e3..27ebc2c6c2d0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1752,7 +1752,17 @@ EXPORT_SYMBOL(genphy_aneg_done); */ int genphy_update_link(struct phy_device *phydev) { - int status; + int status = 0, bmcr; + + bmcr = phy_read(phydev, MII_BMCR); + if (bmcr < 0) + return bmcr; + + /* Autoneg is being started, therefore disregard BMSR value and + * report link as down. + */ + if (bmcr & BMCR_ANRESTART) + goto done; /* The link state is latched low so that momentary link * drops can be detected. Do not double-read the status diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index abfa0da9bbd2..e8089def5a46 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team) team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; team->dev->hard_header_len = max_hard_header_len; diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 5519248a791e..32b08b18e120 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) } if (!timeout) { dev_err(&udev->dev, "firmware not ready in time\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto err; } /* enable ethernet mode (?) */ diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index d62b6706a537..fc5895f85cee 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), usb_buf, 24); if (status != 0) - return status; + goto out; memcpy(usb_buf, init_msg_2, 12); status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), usb_buf, 28); if (status != 0) - return status; + goto out; memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); - +out: kfree(usb_buf); return status; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 3d92ea6fcc02..f033fee225a1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out3; + goto out4; } usb_set_intfdata(intf, dev); @@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_phy_init(dev); if (ret < 0) - goto out4; + goto out5; return 0; -out4: +out5: unregister_netdev(netdev); +out4: + usb_free_urb(dev->urb_intr); out3: lan78xx_unbind(dev, intf); out2: diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0cc03a9ff545..04137ac373b0 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, 500); + if (ret < 0) + memset(data, 0xff, size); + else + memcpy(data, tmp, size); - memcpy(data, tmp, size); kfree(tmp); return ret; @@ -4018,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - if (!test_bit(RTL8152_UNPLUG, &tp->flags)) - napi_disable(&tp->napi); + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); @@ -5350,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf, return 0; out1: - netif_napi_del(&tp->napi); usb_set_intfdata(intf, NULL); out: free_netdev(netdev); @@ -5365,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf) if (tp) { rtl_set_unplug(tp); - netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index e9fc168bb734..489cba9b284d 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options) } result = i2400m_barker_db_add(barker); if (result < 0) - goto error_add; + goto error_parse_add; } kfree(options_orig); } return 0; +error_parse_add: error_parse: + kfree(options_orig); error_add: kfree(i2400m_barker_db); return result; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 1f500cddb3a7..55b713255b8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + const struct iwl_cfg iwl22000_2ax_cfg_jf = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 1c1bf1b281cd..6c04f8223aff 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; +extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index cb22d447fcb8..fe776e35b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); - cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + cmd->filter_flags = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); @@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, /* We need the dtim_period to set the MAC as associated */ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u8 ap_sta_id = mvmvif->ap_sta_id; u32 dtim_offs; /* @@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, dtim_offs); ctxt_sta->is_assoc = cpu_to_le32(1); + + /* + * allow multicast data frames only as long as the station is + * authorized, i.e., GTK keys are already installed (if needed) + */ + if (ap_sta_id < IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *sta; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); + if (!IS_ERR_OR_NULL(sta)) { + struct iwl_mvm_sta *mvmsta = + iwl_mvm_sta_from_mac80211(sta); + + if (mvmsta->sta_state == + IEEE80211_STA_AUTHORIZED) + cmd.filter_flags |= + cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + } + + rcu_read_unlock(); + } } else { ctxt_sta->is_assoc = cpu_to_le32(0); @@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | - MAC_FILTER_IN_CRC32); + MAC_FILTER_IN_CRC32 | + MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); /* Allocate sniffer station */ @@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | - MAC_FILTER_IN_PROBE_REQUEST); + MAC_FILTER_IN_PROBE_REQUEST | + MAC_FILTER_ACCEPT_GRP); /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1c904b5226aa..a7bc00d1296f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + /* + * Now that the station is authorized, i.e., keys were already + * installed, need to indicate to the FW that + * multicast data frames can be forwarded to the driver + */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* Multicast data frames are no longer allowed */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + /* disable beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); WARN_ON(ret && diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index de711c1160d3..d9ed53b7c768 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1062,7 +1062,28 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; } + + /* same thing for QuZ... */ + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_quz_hr; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_quz_hr; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + } + #endif pci_set_drvdata(pdev, iwl_trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f5df5b370d78..db62c8314603 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3602,11 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - ((trans->cfg != &iwl_ax200_cfg_cc && - trans->cfg != &killer1650x_2ax_cfg && - trans->cfg != &killer1650w_2ax_cfg && - trans->cfg != &iwl_ax201_cfg_quz_hr) || - trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 38d110338987..9ef6b8fe03c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, u16 len = byte_cnt; __le16 bc_ent; - if (trans_pcie->bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) + if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + @@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + /* Starting from 22560, the HW expects bytes */ + WARN_ON(trans_pcie->bc_table_dword); + WARN_ON(len > 0x3FFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; - else + } else { + /* Until 22560, the HW expects DW */ + WARN_ON(!trans_pcie->bc_table_dword); + len = DIV_ROUND_UP(len, 4); + WARN_ON(len > 0xFFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); scd_bc_tbl->tfd_offset[idx] = bc_ent; + } } /* diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 627ed1fc7b15..645f4d15fb61 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = { .release_buffered_frames = mt76_release_buffered_frames, }; -static int mt76x0u_init_hardware(struct mt76x02_dev *dev) +static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset) { int err; - mt76x0_chip_onoff(dev, true, true); + mt76x0_chip_onoff(dev, true, reset); if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; @@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev) if (err < 0) goto out_err; - err = mt76x0u_init_hardware(dev); + err = mt76x0u_init_hardware(dev, true); if (err < 0) goto out_err; @@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) if (ret < 0) goto err; - ret = mt76x0u_init_hardware(dev); + ret = mt76x0u_init_hardware(dev, false); if (ret) goto err; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index c9b957ac5733..ecbe78b8027b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) } /* + * Clear encryption initialization vectors on start, but keep them + * for watchdog reset. Otherwise we will have wrong IVs and not be + * able to keep connections after reset. + */ + if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags)) + for (i = 0; i < 256; i++) + rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); + + /* * Clear all beacons */ for (i = 0; i < 8; i++) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 7e43690a861c..2b216edd0c7d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -658,6 +658,7 @@ enum rt2x00_state_flags { DEVICE_STATE_ENABLED_RADIO, DEVICE_STATE_SCANNING, DEVICE_STATE_FLUSHING, + DEVICE_STATE_RESET, /* * Driver configuration diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 35414f97a978..9d158237ac67 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) { - int retval; + int retval = 0; if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { /* * This is special case for ieee80211_restart_hw(), otherwise * mac80211 never call start() two times in row without stop(); */ + set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); rt2x00lib_stop(rt2x00dev); } @@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) */ retval = rt2x00lib_load_firmware(rt2x00dev); if (retval) - return retval; + goto out; /* * Initialize the device. */ retval = rt2x00lib_initialize(rt2x00dev); if (retval) - return retval; + goto out; rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; @@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) /* Enable the radio */ retval = rt2x00lib_enable_radio(rt2x00dev); if (retval) - return retval; + goto out; set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); - return 0; +out: + clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); + return retval; } void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1d9940d4e8c7..c9262ffeefe4 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) @@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); kfree_skb(nskb); break; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c258a1ce4b28..d3d6b7bd6903 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2257,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x1179, .mn = "THNSF5256GPUK TOSHIBA", .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * This LiteON CL1-3D*-Q11 firmware version has a race + * condition associated with actions related to suspend to idle + * LiteON has resolved the problem in future firmware + */ + .vid = 0x14a4, + .fr = "22301111", + .quirks = NVME_QUIRK_SIMPLE_SUSPEND, } }; @@ -2597,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; } + if (!(ctrl->ops->flags & NVME_F_FABRICS)) + ctrl->cntlid = le16_to_cpu(id->cntlid); + if (!ctrl->identified) { int i; @@ -2697,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; } } else { - ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); ctrl->hmminds = le32_to_cpu(id->hmminds); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 888d4543894e..af831d3d15d0 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -428,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) srcu_read_unlock(&head->srcu, srcu_idx); } + synchronize_srcu(&ns->head->srcu); kblockd_schedule_work(&ns->head->requeue_work); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 778b3a0b6adb..2d678fb968c7 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -92,6 +92,11 @@ enum nvme_quirks { * Broken Write Zeroes. */ NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), + + /* + * Force simple suspend/resume path. + */ + NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6bd9b1033965..732d5b63ec05 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2876,7 +2876,8 @@ static int nvme_suspend(struct device *dev) * state (which may not be possible if the link is up). */ if (pm_suspend_via_firmware() || !ctrl->npss || - !pcie_aspm_enabled(pdev)) { + !pcie_aspm_enabled(pdev) || + (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) { nvme_dev_disable(ndev, true); return 0; } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index ac5d945be88a..057d1ff87d5d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -76,11 +76,6 @@ static struct bus_type nvmem_bus_type = { .name = "nvmem", }; -static int of_nvmem_match(struct device *dev, const void *nvmem_np) -{ - return dev->of_node == nvmem_np; -} - static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) { struct device *d; @@ -88,7 +83,7 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) if (!nvmem_np) return NULL; - d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); + d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np); if (!d) return NULL; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 44f53496cab1..000b95787df1 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -280,12 +280,6 @@ unregister: } EXPORT_SYMBOL(of_mdiobus_register); -/* Helper function for of_phy_find_device */ -static int of_phy_match(struct device *dev, const void *phy_np) -{ - return dev->of_node == phy_np; -} - /** * of_phy_find_device - Give a PHY node, find the phy_device * @phy_np: Pointer to the phy's device tree node @@ -301,7 +295,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np) if (!phy_np) return NULL; - d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match); + d = bus_find_device_by_of_node(&mdio_bus_type, phy_np); if (d) { mdiodev = to_mdio_device(d); if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7801e25e6895..b47a2292fe8e 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -37,11 +37,6 @@ static const struct of_device_id of_skipped_node_table[] = { {} /* Empty terminated list */ }; -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_find_device_by_node - Find the platform_device associated with a node * @np: Pointer to device tree node @@ -55,7 +50,7 @@ struct platform_device *of_find_device_by_node(struct device_node *np) { struct device *dev; - dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match); + dev = bus_find_device_by_of_node(&platform_bus_type, np); return dev ? to_platform_device(dev) : NULL; } EXPORT_SYMBOL(of_find_device_by_node); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index a3c7338fad86..dbeeb385fb9f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -64,11 +64,6 @@ static struct resource *get_pci_domain_busn_res(int domain_nr) return &r->res; } -static int find_anything(struct device *dev, const void *data) -{ - return 1; -} - /* * Some device drivers need know if PCI is initiated. * Basically, we think PCI is not initiated when there @@ -79,7 +74,7 @@ int no_pci_devices(void) struct device *dev; int no_devices; - dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); + dev = bus_find_next_device(&pci_bus_type, NULL); no_devices = (dev == NULL); put_device(dev); return no_devices; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 208aacf39329..44c4ae1abd00 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) */ if (ioread32(map + 0x2240c) & 0x2) { pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); - ret = pci_reset_function(pdev); + ret = pci_reset_bus(pdev); if (ret < 0) pci_err(pdev, "Failed to reset GPU: %d\n", ret); } diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index e504d255d5ce..430731cdf827 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device) */ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) { - struct ishtp_cl_device *cl_device = dev_get_drvdata(device); + struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device); struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); @@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) */ static int __maybe_unused cros_ec_ishtp_resume(struct device *device) { - struct ishtp_cl_device *cl_device = dev_get_drvdata(device); + struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device); struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 30de448de802..86d88aec94a1 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, USB_CH_IP_CUR_LVL_1P5; break; } + /* Else, fall through */ case USB_STAT_HM_IDGND: dev_err(di->dev, "USB Type - Charging not allowed\n"); di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 9112faa6a9a0..afefb29ce1b0 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -462,16 +462,11 @@ error: return NULL; } -static int of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - struct regulator_dev *of_find_regulator_by_node(struct device_node *np) { struct device *dev; - dev = class_find_device(®ulator_class, NULL, np, of_node_match); + dev = class_find_device_by_of_node(®ulator_class, np); return dev ? dev_to_rdev(dev) : NULL; } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 72b7ddc43116..c93ef33b01d3 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -663,21 +663,12 @@ void rtc_update_irq(struct rtc_device *rtc, } EXPORT_SYMBOL_GPL(rtc_update_irq); -static int __rtc_match(struct device *dev, const void *data) -{ - const char *name = data; - - if (strcmp(dev_name(dev), name) == 0) - return 1; - return 0; -} - struct rtc_device *rtc_class_open(const char *name) { struct device *dev; struct rtc_device *rtc = NULL; - dev = class_find_device(rtc_class, NULL, name, __rtc_match); + dev = class_find_device_by_name(rtc_class, name); if (dev) rtc = to_rtc_device(dev); diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index c522e9313c50..0005ec9285aa 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -581,11 +581,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) } EXPORT_SYMBOL(ccwgroup_driver_register); -static int __ccwgroup_match_all(struct device *dev, const void *data) -{ - return 1; -} - /** * ccwgroup_driver_unregister() - deregister a ccw group driver * @cdriver: driver to be deregistered @@ -597,8 +592,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) struct device *dev; /* We don't want ccwgroup devices to live longer than their driver. */ - while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, - __ccwgroup_match_all))) { + while ((dev = driver_find_next_device(&cdriver->driver, NULL))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); ccwgroup_ungroup(gdev); @@ -608,13 +602,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) } EXPORT_SYMBOL(ccwgroup_driver_unregister); -static int __ccwgroupdev_check_busid(struct device *dev, const void *id) -{ - const char *bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - /** * get_ccwgroupdev_by_busid() - obtain device from a bus id * @gdrv: driver the device is owned by @@ -631,8 +618,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv, { struct device *dev; - dev = driver_find_device(&gdrv->driver, NULL, bus_id, - __ccwgroupdev_check_busid); + dev = driver_find_device_by_name(&gdrv->driver, bus_id); return dev ? to_ccwgroupdev(dev) : NULL; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index c421899be20f..131430bd48d9 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1695,18 +1695,6 @@ int ccw_device_force_console(struct ccw_device *cdev) EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif -/* - * get ccw_device matching the busid, but only if owned by cdrv - */ -static int -__ccwdev_check_busid(struct device *dev, const void *id) -{ - const char *bus_id = id; - - return (strcmp(bus_id, dev_name(dev)) == 0); -} - - /** * get_ccwdev_by_busid() - obtain device from a bus id * @cdrv: driver the device is owned by @@ -1723,8 +1711,7 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, { struct device *dev; - dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, - __ccwdev_check_busid); + dev = driver_find_device_by_name(&cdrv->driver, bus_id); return dev ? to_ccwdev(dev) : NULL; } diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 1058b4b5cc1e..150f6236c9bb 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -133,18 +133,6 @@ struct zcdn_device { static int zcdn_create(const char *name); static int zcdn_destroy(const char *name); -/* helper function, matches the name for find_zcdndev_by_name() */ -static int __match_zcdn_name(struct device *dev, const void *data) -{ - return strcmp(dev_name(dev), (const char *)data) == 0; -} - -/* helper function, matches the devt value for find_zcdndev_by_devt() */ -static int __match_zcdn_devt(struct device *dev, const void *data) -{ - return dev->devt == *((dev_t *) data); -} - /* * Find zcdn device by name. * Returns reference to the zcdn device which needs to be released @@ -152,10 +140,7 @@ static int __match_zcdn_devt(struct device *dev, const void *data) */ static inline struct zcdn_device *find_zcdndev_by_name(const char *name) { - struct device *dev = - class_find_device(zcrypt_class, NULL, - (void *) name, - __match_zcdn_name); + struct device *dev = class_find_device_by_name(zcrypt_class, name); return dev ? to_zcdn_dev(dev) : NULL; } @@ -167,10 +152,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name) */ static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) { - struct device *dev = - class_find_device(zcrypt_class, NULL, - (void *) &devt, - __match_zcdn_devt); + struct device *dev = class_find_device_by_devt(zcrypt_class, devt); return dev ? to_zcdn_dev(dev) : NULL; } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index c7ee07ce3615..28db887d38ed 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -629,6 +629,7 @@ struct qeth_seqno { struct qeth_reply { struct list_head list; struct completion received; + spinlock_t lock; int (*callback)(struct qeth_card *, struct qeth_reply *, unsigned long); u32 seqno; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4d0caeebc802..6502b148541e 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); init_completion(&reply->received); + spin_lock_init(&reply->lock); } return reply; } @@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, if (!reply->callback) { rc = 0; + goto no_callback; + } + + spin_lock_irqsave(&reply->lock, flags); + if (reply->rc) { + /* Bail out when the requestor has already left: */ + rc = reply->rc; } else { if (cmd) { reply->offset = (u16)((char *)cmd - (char *)iob->data); @@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, rc = reply->callback(card, reply, (unsigned long)iob); } } + spin_unlock_irqrestore(&reply->lock, flags); +no_callback: if (rc <= 0) qeth_notify_reply(reply, rc); qeth_put_reply(reply); @@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card, rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; qeth_dequeue_reply(card, reply); + + if (reply_cb) { + /* Wait until the callback for a late reply has completed: */ + spin_lock_irq(&reply->lock); + if (rc) + /* Zap any callback that's still pending: */ + reply->rc = rc; + spin_unlock_irq(&reply->lock); + } + if (!rc) rc = reply->rc; qeth_put_reply(reply); @@ -4354,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) get_user(req_len, &ureq->hdr.req_len)) return -EFAULT; + /* Sanitize user input, to avoid overflows in iob size calculation: */ + if (req_len > QETH_BUFSIZE) + return -EINVAL; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); if (!iob) return -ENOMEM; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index abcad097ff2f..f47b4b281b14 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work) pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); /* Fall through */ #endif + /* Fall through - only for the #else condition above. */ default: error = -ENXIO; pr_err("unhandled device %d\n", dev->dev_type); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 2c3bb8a966e5..bade2e025ecf 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -824,6 +824,7 @@ struct lpfc_hba { uint32_t cfg_cq_poll_threshold; uint32_t cfg_cq_max_proc_limit; uint32_t cfg_fcp_cpu_map; + uint32_t cfg_fcp_mq_threshold; uint32_t cfg_hdw_queue; uint32_t cfg_irq_chann; uint32_t cfg_suppress_rsp; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index ea62322ffe2b..8d8c495b5b60 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE"); /* + * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues + * the driver will advertise it supports to the SCSI layer. + * + * 0 = Set nr_hw_queues by the number of CPUs or HW queues. + * 1,128 = Manually specify the maximum nr_hw_queue value to be set, + * + * Value range is [0,128]. Default value is 8. + */ +LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, + LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, + "Set the number of SCSI Queues advertised"); + +/* * lpfc_hdw_queue: Set the number of Hardware Queues the driver * will advertise it supports to the NVME and SCSI layers. This also * will map to the number of CQ/WQ pairs the driver will create. @@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_cq_poll_threshold, &dev_attr_lpfc_cq_max_proc_limit, &dev_attr_lpfc_fcp_cpu_map, + &dev_attr_lpfc_fcp_mq_threshold, &dev_attr_lpfc_hdw_queue, &dev_attr_lpfc_irq_chann, &dev_attr_lpfc_suppress_rsp, @@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); lpfc_hdw_queue_init(phba, lpfc_hdw_queue); lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a7549ae32542..1ac98becb5ba 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) - shost->nr_hw_queues = phba->cfg_hdw_queue; - else - shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; + if (!phba->cfg_fcp_mq_threshold || + phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) + phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; + + shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), + phba->cfg_fcp_mq_threshold); shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 3aeca387b22a..329f7aa7e169 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -44,6 +44,11 @@ #define LPFC_HBA_HDWQ_MAX 128 #define LPFC_HBA_HDWQ_DEF 0 +/* FCP MQ queue count limiting */ +#define LPFC_FCP_MQ_THRESHOLD_MIN 0 +#define LPFC_FCP_MQ_THRESHOLD_MAX 128 +#define LPFC_FCP_MQ_THRESHOLD_DEF 8 + /* Common buffer size to accomidate SCSI and NVME IO buffers */ #define LPFC_COMMON_IO_BUF_SZ 768 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 8d560c562e9c..6b7b390b2e52 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; + vfree(vha->scan.l); if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2e58cff9d200..98e60a34afd9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3440,6 +3440,12 @@ skip_dpc: return 0; probe_failed: + if (base_vha->gnl.l) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + } + if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; @@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev) if (!atomic_read(&pdev->enable_cnt)) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); - + base_vha->gnl.l = NULL; scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); @@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + vfree(base_vha->scan.l); if (IS_QLAFX00(ha)) @@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, "Alloc failed for scan database.\n"); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; scsi_remove_host(vha->host); return NULL; } diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index c074631086a4..5b313226f11c 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -372,15 +372,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, return err; } -static int always_match(struct device *dev, const void *data) -{ - return 1; -} - static inline struct device *next_scsi_device(struct device *start) { - struct device *next = bus_find_device(&scsi_bus_type, start, NULL, - always_match); + struct device *next = bus_find_next_device(&scsi_bus_type, start); + put_device(start); return next; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e274053109d0..029da74bb2f5 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig index de2e62c3310a..e3eb19b85fa4 100644 --- a/drivers/soc/ixp4xx/Kconfig +++ b/drivers/soc/ixp4xx/Kconfig @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +if ARCH_IXP4XX || COMPILE_TEST + menu "IXP4xx SoC drivers" config IXP4XX_QMGR @@ -15,3 +17,5 @@ config IXP4XX_NPE and is automatically selected by Ethernet and HSS drivers. endmenu + +endif diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index bb77c220b6f8..ccc6d53fe788 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void) } #ifdef CONFIG_SUSPEND -struct wkup_m3_wakeup_src rtc_wake_src(void) +static struct wkup_m3_wakeup_src rtc_wake_src(void) { u32 i; @@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void) return rtc_ext_wakeup; } -int am33xx_rtc_only_idle(unsigned long wfi_flags) +static int am33xx_rtc_only_idle(unsigned long wfi_flags) { omap_rtc_power_off_program(&omap_rtc->dev); am33xx_do_wfi_sram(wfi_flags); @@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state) if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); - if (nvmem) + if (!IS_ERR(nvmem)) nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, (void *)&rtc_magic_val); rtc_only_idle = 1; @@ -278,9 +278,12 @@ static void am33xx_pm_end(void) struct nvmem_device *nvmem; nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); + if (IS_ERR(nvmem)) + return; + m3_ipc->ops->finish_low_power(m3_ipc); if (rtc_only_idle) { - if (retrigger_irq) + if (retrigger_irq) { /* * 32 bits of Interrupt Set-Pending correspond to 32 * 32 interrupts. Compute the bit offset of the @@ -291,8 +294,10 @@ static void am33xx_pm_end(void) writel_relaxed(1 << (retrigger_irq & 31), gic_dist_base + GIC_INT_SET_PENDING_BASE + retrigger_irq / 32 * 4); - nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, - (void *)&val); + } + + nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, + (void *)&val); } rtc_only_idle = 0; @@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void) nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); - if (nvmem) { + if (!IS_ERR(nvmem)) { nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, (void *)&rtc_magic_val); if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 75ac046cae52..c486a6f84c2c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -3652,37 +3652,25 @@ EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF) -static int __spi_of_device_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* must call put_device() when done with returned spi_device device */ struct spi_device *of_find_spi_device_by_node(struct device_node *node) { - struct device *dev = bus_find_device(&spi_bus_type, NULL, node, - __spi_of_device_match); + struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); + return dev ? to_spi_device(dev) : NULL; } EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); #endif /* IS_ENABLED(CONFIG_OF) */ #if IS_ENABLED(CONFIG_OF_DYNAMIC) -static int __spi_of_controller_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* the spi controllers are not using spi_bus, so we find it with another way */ static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; - dev = class_find_device(&spi_master_class, NULL, node, - __spi_of_controller_match); + dev = class_find_device_by_of_node(&spi_master_class, node); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device(&spi_slave_class, NULL, node, - __spi_of_controller_match); + dev = class_find_device_by_of_node(&spi_slave_class, node); if (!dev) return NULL; @@ -3753,11 +3741,6 @@ static int spi_acpi_controller_match(struct device *dev, const void *data) return ACPI_COMPANION(dev->parent) == data; } -static int spi_acpi_device_match(struct device *dev, const void *data) -{ - return ACPI_COMPANION(dev) == data; -} - static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) { struct device *dev; @@ -3777,8 +3760,7 @@ static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) { struct device *dev; - dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); - + dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); return dev ? to_spi_device(dev) : NULL; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 04eda111920e..661bb9358364 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; bool read_len_valid = false; - uint32_t read_len = se_cmd->data_length; + uint32_t read_len; /* * cmd has been completed already from timeout, just reclaim * data area space and free cmd */ - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON_ONCE(se_cmd); goto out; + } list_del_init(&cmd->queue_entry); @@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * goto done; } + read_len = se_cmd->data_length; if (se_cmd->data_direction == DMA_FROM_DEVICE && (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { read_len_valid = true; @@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) */ scsi_status = SAM_STAT_CHECK_CONDITION; list_del_init(&cmd->queue_entry); + cmd->se_cmd = NULL; } else { list_del_init(&cmd->queue_entry); idr_remove(&udev->commands, id); @@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) idr_remove(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); if (err_level == 1) { /* diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 566728fbaf3c..802c1210558f 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2952,17 +2952,11 @@ void do_SAK(struct tty_struct *tty) EXPORT_SYMBOL(do_SAK); -static int dev_match_devt(struct device *dev, const void *data) -{ - const dev_t *devt = data; - return dev->devt == *devt; -} - /* Must put_device() after it's unused! */ static struct device *tty_get_device(struct tty_struct *tty) { dev_t devt = tty_devnum(tty); - return class_find_device(tty_class, NULL, &devt, dev_match_devt); + return class_find_device_by_devt(tty_class, devt); } diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 9987c399819f..275568abc670 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -36,8 +36,7 @@ menuconfig USB_SUPPORT if USB_SUPPORT -config USB_COMMON - tristate +source "drivers/usb/common/Kconfig" config USB_ARCH_HAS_HCD def_bool y @@ -112,6 +111,8 @@ source "drivers/usb/usbip/Kconfig" endif +source "drivers/usb/cdns3/Kconfig" + source "drivers/usb/mtu3/Kconfig" source "drivers/usb/musb/Kconfig" @@ -173,36 +174,4 @@ source "drivers/usb/typec/Kconfig" source "drivers/usb/roles/Kconfig" -config USB_LED_TRIG - bool "USB LED Triggers" - depends on LEDS_CLASS && LEDS_TRIGGERS - select USB_COMMON - help - This option adds LED triggers for USB host and/or gadget activity. - - Say Y here if you are working on a system with led-class supported - LEDs and you want to use them as activity indicators for USB host or - gadget. - -config USB_ULPI_BUS - tristate "USB ULPI PHY interface support" - select USB_COMMON - help - UTMI+ Low Pin Interface (ULPI) is specification for a commonly used - USB 2.0 PHY interface. The ULPI specification defines a standard set - of registers that can be used to detect the vendor and product which - allows ULPI to be handled as a bus. This module is the driver for that - bus. - - The ULPI interfaces (the buses) are registered by the drivers for USB - controllers which support ULPI register access and have ULPI PHY - attached to them. The ULPI PHY drivers themselves are normal PHY - drivers. - - ULPI PHYs provide often functions such as ADP sensing/probing (OTG - protocol) and USB charger detection. - - To compile this driver as a module, choose M here: the module will - be called ulpi. - endif # USB_SUPPORT diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index db064dd59e08..1c1c1d659394 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -13,6 +13,8 @@ obj-$(CONFIG_USB_DWC3) += dwc3/ obj-$(CONFIG_USB_DWC2) += dwc2/ obj-$(CONFIG_USB_ISP1760) += isp1760/ +obj-$(CONFIG_USB_CDNS3) += cdns3/ + obj-$(CONFIG_USB_MON) += mon/ obj-$(CONFIG_USB_MTU3) += mtu3/ diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig new file mode 100644 index 000000000000..d0331613a355 --- /dev/null +++ b/drivers/usb/cdns3/Kconfig @@ -0,0 +1,46 @@ +config USB_CDNS3 + tristate "Cadence USB3 Dual-Role Controller" + depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA + select USB_XHCI_PLATFORM if USB_XHCI_HCD + select USB_ROLE_SWITCH + help + Say Y here if your system has a Cadence USB3 dual-role controller. + It supports: dual-role switch, Host-only, and Peripheral-only. + + If you choose to build this driver is a dynamically linked + as module, the module will be called cdns3.ko. + +if USB_CDNS3 + +config USB_CDNS3_GADGET + bool "Cadence USB3 device controller" + depends on USB_GADGET=y || USB_GADGET=USB_CDNS3 + help + Say Y here to enable device controller functionality of the + Cadence USBSS-DEV driver. + + This controller supports FF, HS and SS mode. It doesn't support + LS and SSP mode. + +config USB_CDNS3_HOST + bool "Cadence USB3 host controller" + depends on USB=y || USB=USB_CDNS3 + help + Say Y here to enable host controller functionality of the + Cadence driver. + + Host controller is compliant with XHCI so it will use + standard XHCI driver. + +config USB_CDNS3_PCI_WRAP + tristate "Cadence USB3 support on PCIe-based platforms" + depends on USB_PCI && ACPI + default USB_CDNS3 + help + If you're using the USBSS Core IP with a PCIe, please say + 'Y' or 'M' here. + + If you choose to build this driver as module it will + be dynamically linked and module will be called cdns3-pci.ko + +endif diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile new file mode 100644 index 000000000000..a703547350bb --- /dev/null +++ b/drivers/usb/cdns3/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# define_trace.h needs to know how to find our header +CFLAGS_trace.o := -I$(src) + +cdns3-y := core.o drd.o + +obj-$(CONFIG_USB_CDNS3) += cdns3.o +cdns3-$(CONFIG_USB_CDNS3_GADGET) += gadget.o ep0.o + +ifneq ($(CONFIG_USB_CDNS3_GADGET),) +cdns3-$(CONFIG_TRACING) += trace.o +endif + +cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o + +obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c new file mode 100644 index 000000000000..c41ddb61b857 --- /dev/null +++ b/drivers/usb/cdns3/cdns3-pci-wrap.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS PCI Glue driver + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> + +struct cdns3_wrap { + struct platform_device *plat_dev; + struct resource dev_res[6]; + int devfn; +}; + +#define RES_IRQ_HOST_ID 0 +#define RES_IRQ_PERIPHERAL_ID 1 +#define RES_IRQ_OTG_ID 2 +#define RES_HOST_ID 3 +#define RES_DEV_ID 4 +#define RES_DRD_ID 5 + +#define PCI_BAR_HOST 0 +#define PCI_BAR_DEV 2 +#define PCI_BAR_OTG 0 + +#define PCI_DEV_FN_HOST_DEVICE 0 +#define PCI_DEV_FN_OTG 1 + +#define PCI_DRIVER_NAME "cdns3-pci-usbss" +#define PLAT_DRIVER_NAME "cdns-usb3" + +#define CDNS_VENDOR_ID 0x17cd +#define CDNS_DEVICE_ID 0x0100 + +static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev) +{ + struct pci_dev *func; + + /* + * Gets the second function. + * It's little tricky, but this platform has two function. + * The fist keeps resources for Host/Device while the second + * keeps resources for DRD/OTG. + */ + func = pci_get_device(pdev->vendor, pdev->device, NULL); + if (unlikely(!func)) + return NULL; + + if (func->devfn == pdev->devfn) { + func = pci_get_device(pdev->vendor, pdev->device, func); + if (unlikely(!func)) + return NULL; + } + + return func; +} + +static int cdns3_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct platform_device_info plat_info; + struct cdns3_wrap *wrap; + struct resource *res; + struct pci_dev *func; + int err; + + /* + * for GADGET/HOST PCI (devfn) function number is 0, + * for OTG PCI (devfn) function number is 1 + */ + if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE && + pdev->devfn != PCI_DEV_FN_OTG)) + return -EINVAL; + + func = cdns3_get_second_fun(pdev); + if (unlikely(!func)) + return -EINVAL; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", err); + return err; + } + + pci_set_master(pdev); + + if (pci_is_enabled(func)) { + wrap = pci_get_drvdata(func); + } else { + wrap = kzalloc(sizeof(*wrap), GFP_KERNEL); + if (!wrap) { + pci_disable_device(pdev); + return -ENOMEM; + } + } + + res = wrap->dev_res; + + if (pdev->devfn == PCI_DEV_FN_HOST_DEVICE) { + /* function 0: host(BAR_0) + device(BAR_1).*/ + dev_dbg(&pdev->dev, "Initialize Device resources\n"); + res[RES_DEV_ID].start = pci_resource_start(pdev, PCI_BAR_DEV); + res[RES_DEV_ID].end = pci_resource_end(pdev, PCI_BAR_DEV); + res[RES_DEV_ID].name = "dev"; + res[RES_DEV_ID].flags = IORESOURCE_MEM; + dev_dbg(&pdev->dev, "USBSS-DEV physical base addr: %pa\n", + &res[RES_DEV_ID].start); + + res[RES_HOST_ID].start = pci_resource_start(pdev, PCI_BAR_HOST); + res[RES_HOST_ID].end = pci_resource_end(pdev, PCI_BAR_HOST); + res[RES_HOST_ID].name = "xhci"; + res[RES_HOST_ID].flags = IORESOURCE_MEM; + dev_dbg(&pdev->dev, "USBSS-XHCI physical base addr: %pa\n", + &res[RES_HOST_ID].start); + + /* Interrupt for XHCI */ + wrap->dev_res[RES_IRQ_HOST_ID].start = pdev->irq; + wrap->dev_res[RES_IRQ_HOST_ID].name = "host"; + wrap->dev_res[RES_IRQ_HOST_ID].flags = IORESOURCE_IRQ; + + /* Interrupt device. It's the same as for HOST. */ + wrap->dev_res[RES_IRQ_PERIPHERAL_ID].start = pdev->irq; + wrap->dev_res[RES_IRQ_PERIPHERAL_ID].name = "peripheral"; + wrap->dev_res[RES_IRQ_PERIPHERAL_ID].flags = IORESOURCE_IRQ; + } else { + res[RES_DRD_ID].start = pci_resource_start(pdev, PCI_BAR_OTG); + res[RES_DRD_ID].end = pci_resource_end(pdev, PCI_BAR_OTG); + res[RES_DRD_ID].name = "otg"; + res[RES_DRD_ID].flags = IORESOURCE_MEM; + dev_dbg(&pdev->dev, "USBSS-DRD physical base addr: %pa\n", + &res[RES_DRD_ID].start); + + /* Interrupt for OTG/DRD. */ + wrap->dev_res[RES_IRQ_OTG_ID].start = pdev->irq; + wrap->dev_res[RES_IRQ_OTG_ID].name = "otg"; + wrap->dev_res[RES_IRQ_OTG_ID].flags = IORESOURCE_IRQ; + } + + if (pci_is_enabled(func)) { + /* set up platform device info */ + memset(&plat_info, 0, sizeof(plat_info)); + plat_info.parent = &pdev->dev; + plat_info.fwnode = pdev->dev.fwnode; + plat_info.name = PLAT_DRIVER_NAME; + plat_info.id = pdev->devfn; + wrap->devfn = pdev->devfn; + plat_info.res = wrap->dev_res; + plat_info.num_res = ARRAY_SIZE(wrap->dev_res); + plat_info.dma_mask = pdev->dma_mask; + /* register platform device */ + wrap->plat_dev = platform_device_register_full(&plat_info); + if (IS_ERR(wrap->plat_dev)) { + pci_disable_device(pdev); + kfree(wrap); + return PTR_ERR(wrap->plat_dev); + } + } + + pci_set_drvdata(pdev, wrap); + return err; +} + +static void cdns3_pci_remove(struct pci_dev *pdev) +{ + struct cdns3_wrap *wrap; + struct pci_dev *func; + + func = cdns3_get_second_fun(pdev); + + wrap = (struct cdns3_wrap *)pci_get_drvdata(pdev); + if (wrap->devfn == pdev->devfn) + platform_device_unregister(wrap->plat_dev); + + if (!pci_is_enabled(func)) + kfree(wrap); +} + +static const struct pci_device_id cdns3_pci_ids[] = { + { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, + { 0, } +}; + +static struct pci_driver cdns3_pci_driver = { + .name = PCI_DRIVER_NAME, + .id_table = cdns3_pci_ids, + .probe = cdns3_pci_probe, + .remove = cdns3_pci_remove, +}; + +module_pci_driver(cdns3_pci_driver); +MODULE_DEVICE_TABLE(pci, cdns3_pci_ids); + +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USBSS PCI wrapperr"); diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c new file mode 100644 index 000000000000..06f1e105be4e --- /dev/null +++ b/drivers/usb/cdns3/core.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver. + * + * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2017-2018 NXP + * Copyright (C) 2019 Texas Instruments + * + * Author: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + * Roger Quadros <rogerq@ti.com> + */ + +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> + +#include "gadget.h" +#include "core.h" +#include "host-export.h" +#include "gadget-export.h" +#include "drd.h" + +static int cdns3_idle_init(struct cdns3 *cdns); + +static inline +struct cdns3_role_driver *cdns3_get_current_role_driver(struct cdns3 *cdns) +{ + WARN_ON(!cdns->roles[cdns->role]); + return cdns->roles[cdns->role]; +} + +static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role) +{ + int ret; + + if (WARN_ON(role > USB_ROLE_DEVICE)) + return 0; + + mutex_lock(&cdns->mutex); + cdns->role = role; + mutex_unlock(&cdns->mutex); + + if (!cdns->roles[role]) + return -ENXIO; + + if (cdns->roles[role]->state == CDNS3_ROLE_STATE_ACTIVE) + return 0; + + mutex_lock(&cdns->mutex); + ret = cdns->roles[role]->start(cdns); + if (!ret) + cdns->roles[role]->state = CDNS3_ROLE_STATE_ACTIVE; + mutex_unlock(&cdns->mutex); + + return ret; +} + +static void cdns3_role_stop(struct cdns3 *cdns) +{ + enum usb_role role = cdns->role; + + if (WARN_ON(role > USB_ROLE_DEVICE)) + return; + + if (cdns->roles[role]->state == CDNS3_ROLE_STATE_INACTIVE) + return; + + mutex_lock(&cdns->mutex); + cdns->roles[role]->stop(cdns); + cdns->roles[role]->state = CDNS3_ROLE_STATE_INACTIVE; + mutex_unlock(&cdns->mutex); +} + +static void cdns3_exit_roles(struct cdns3 *cdns) +{ + cdns3_role_stop(cdns); + cdns3_drd_exit(cdns); +} + +static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns); + +/** + * cdns3_core_init_role - initialize role of operation + * @cdns: Pointer to cdns3 structure + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_core_init_role(struct cdns3 *cdns) +{ + struct device *dev = cdns->dev; + enum usb_dr_mode best_dr_mode; + enum usb_dr_mode dr_mode; + int ret = 0; + + dr_mode = usb_get_dr_mode(dev); + cdns->role = USB_ROLE_NONE; + + /* + * If driver can't read mode by means of usb_get_dr_mode function then + * chooses mode according with Kernel configuration. This setting + * can be restricted later depending on strap pin configuration. + */ + if (dr_mode == USB_DR_MODE_UNKNOWN) { + if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && + IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } + + /* + * At this point cdns->dr_mode contains strap configuration. + * Driver try update this setting considering kernel configuration + */ + best_dr_mode = cdns->dr_mode; + + ret = cdns3_idle_init(cdns); + if (ret) + return ret; + + if (dr_mode == USB_DR_MODE_OTG) { + best_dr_mode = cdns->dr_mode; + } else if (cdns->dr_mode == USB_DR_MODE_OTG) { + best_dr_mode = dr_mode; + } else if (cdns->dr_mode != dr_mode) { + dev_err(dev, "Incorrect DRD configuration\n"); + return -EINVAL; + } + + dr_mode = best_dr_mode; + + if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { + ret = cdns3_host_init(cdns); + if (ret) { + dev_err(dev, "Host initialization failed with %d\n", + ret); + goto err; + } + } + + if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { + ret = cdns3_gadget_init(cdns); + if (ret) { + dev_err(dev, "Device initialization failed with %d\n", + ret); + goto err; + } + } + + cdns->dr_mode = dr_mode; + + ret = cdns3_drd_update_mode(cdns); + if (ret) + goto err; + + if (cdns->dr_mode != USB_DR_MODE_OTG) { + ret = cdns3_hw_role_switch(cdns); + if (ret) + goto err; + } + + return ret; +err: + cdns3_exit_roles(cdns); + return ret; +} + +/** + * cdsn3_hw_role_state_machine - role switch state machine based on hw events. + * @cdns: Pointer to controller structure. + * + * Returns next role to be entered based on hw events. + */ +static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns) +{ + enum usb_role role; + int id, vbus; + + if (cdns->dr_mode != USB_DR_MODE_OTG) + goto not_otg; + + id = cdns3_get_id(cdns); + vbus = cdns3_get_vbus(cdns); + + /* + * Role change state machine + * Inputs: ID, VBUS + * Previous state: cdns->role + * Next state: role + */ + role = cdns->role; + + switch (role) { + case USB_ROLE_NONE: + /* + * Driver treats USB_ROLE_NONE synonymous to IDLE state from + * controller specification. + */ + if (!id) + role = USB_ROLE_HOST; + else if (vbus) + role = USB_ROLE_DEVICE; + break; + case USB_ROLE_HOST: /* from HOST, we can only change to NONE */ + if (id) + role = USB_ROLE_NONE; + break; + case USB_ROLE_DEVICE: /* from GADGET, we can only change to NONE*/ + if (!vbus) + role = USB_ROLE_NONE; + break; + } + + dev_dbg(cdns->dev, "role %d -> %d\n", cdns->role, role); + + return role; + +not_otg: + if (cdns3_is_host(cdns)) + role = USB_ROLE_HOST; + if (cdns3_is_device(cdns)) + role = USB_ROLE_DEVICE; + + return role; +} + +static int cdns3_idle_role_start(struct cdns3 *cdns) +{ + return 0; +} + +static void cdns3_idle_role_stop(struct cdns3 *cdns) +{ + /* Program Lane swap and bring PHY out of RESET */ + phy_reset(cdns->usb3_phy); +} + +static int cdns3_idle_init(struct cdns3 *cdns) +{ + struct cdns3_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = cdns3_idle_role_start; + rdrv->stop = cdns3_idle_role_stop; + rdrv->state = CDNS3_ROLE_STATE_INACTIVE; + rdrv->suspend = NULL; + rdrv->resume = NULL; + rdrv->name = "idle"; + + cdns->roles[USB_ROLE_NONE] = rdrv; + + return 0; +} + +/** + * cdns3_hw_role_switch - switch roles based on HW state + * @cdns3: controller + */ +int cdns3_hw_role_switch(struct cdns3 *cdns) +{ + enum usb_role real_role, current_role; + int ret = 0; + + /* Do nothing if role based on syfs. */ + if (cdns->role_override) + return 0; + + pm_runtime_get_sync(cdns->dev); + + current_role = cdns->role; + real_role = cdsn3_hw_role_state_machine(cdns); + + /* Do nothing if nothing changed */ + if (current_role == real_role) + goto exit; + + cdns3_role_stop(cdns); + + dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role); + + ret = cdns3_role_start(cdns, real_role); + if (ret) { + /* Back to current role */ + dev_err(cdns->dev, "set %d has failed, back to %d\n", + real_role, current_role); + ret = cdns3_role_start(cdns, current_role); + if (ret) + dev_err(cdns->dev, "back to %d failed too\n", + current_role); + } +exit: + pm_runtime_put_sync(cdns->dev); + return ret; +} + +/** + * cdsn3_role_get - get current role of controller. + * + * @dev: Pointer to device structure + * + * Returns role + */ +static enum usb_role cdns3_role_get(struct device *dev) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + + return cdns->role; +} + +/** + * cdns3_role_set - set current role of controller. + * + * @dev: pointer to device object + * @role - the previous role + * Handles below events: + * - Role switch for dual-role devices + * - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices + */ +static int cdns3_role_set(struct device *dev, enum usb_role role) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + int ret = 0; + + pm_runtime_get_sync(cdns->dev); + + /* + * FIXME: switch role framework should be extended to meet + * requirements. Driver assumes that role can be controlled + * by SW or HW. Temporary workaround is to use USB_ROLE_NONE to + * switch from SW to HW control. + * + * For dr_mode == USB_DR_MODE_OTG: + * if user sets USB_ROLE_HOST or USB_ROLE_DEVICE then driver + * sets role_override flag and forces that role. + * if user sets USB_ROLE_NONE, driver clears role_override and lets + * HW state machine take over. + * + * For dr_mode != USB_DR_MODE_OTG: + * Assumptions: + * 1. Restricted user control between NONE and dr_mode. + * 2. Driver doesn't need to rely on role_override flag. + * 3. Driver needs to ensure that HW state machine is never called + * if dr_mode != USB_DR_MODE_OTG. + */ + if (role == USB_ROLE_NONE) + cdns->role_override = 0; + else + cdns->role_override = 1; + + /* + * HW state might have changed so driver need to trigger + * HW state machine if dr_mode == USB_DR_MODE_OTG. + */ + if (!cdns->role_override && cdns->dr_mode == USB_DR_MODE_OTG) { + cdns3_hw_role_switch(cdns); + goto pm_put; + } + + if (cdns->role == role) + goto pm_put; + + if (cdns->dr_mode == USB_DR_MODE_HOST) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_HOST: + break; + default: + ret = -EPERM; + goto pm_put; + } + } + + if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_DEVICE: + break; + default: + ret = -EPERM; + goto pm_put; + } + } + + cdns3_role_stop(cdns); + ret = cdns3_role_start(cdns, role); + if (ret) { + dev_err(cdns->dev, "set role %d has failed\n", role); + ret = -EPERM; + } + +pm_put: + pm_runtime_put_sync(cdns->dev); + return ret; +} + +static const struct usb_role_switch_desc cdns3_switch_desc = { + .set = cdns3_role_set, + .get = cdns3_role_get, + .allow_userspace_control = true, +}; + +/** + * cdns3_probe - probe for cdns3 core device + * @pdev: Pointer to cdns3 core platform device + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct cdns3 *cdns; + void __iomem *regs; + int ret; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "error setting dma mask: %d\n", ret); + return -ENODEV; + } + + cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL); + if (!cdns) + return -ENOMEM; + + cdns->dev = dev; + + platform_set_drvdata(pdev, cdns); + + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host"); + if (!res) { + dev_err(dev, "missing host IRQ\n"); + return -ENODEV; + } + + cdns->xhci_res[0] = *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci"); + if (!res) { + dev_err(dev, "couldn't get xhci resource\n"); + return -ENXIO; + } + + cdns->xhci_res[1] = *res; + + cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral"); + if (cdns->dev_irq == -EPROBE_DEFER) + return cdns->dev_irq; + + if (cdns->dev_irq < 0) + dev_err(dev, "couldn't get peripheral irq\n"); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dev"); + regs = devm_ioremap_resource(dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + cdns->dev_regs = regs; + + cdns->otg_irq = platform_get_irq_byname(pdev, "otg"); + if (cdns->otg_irq == -EPROBE_DEFER) + return cdns->otg_irq; + + if (cdns->otg_irq < 0) { + dev_err(dev, "couldn't get otg irq\n"); + return cdns->otg_irq; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg"); + if (!res) { + dev_err(dev, "couldn't get otg resource\n"); + return -ENXIO; + } + + cdns->otg_res = *res; + + mutex_init(&cdns->mutex); + + cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy"); + if (IS_ERR(cdns->usb2_phy)) + return PTR_ERR(cdns->usb2_phy); + + ret = phy_init(cdns->usb2_phy); + if (ret) + return ret; + + cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy"); + if (IS_ERR(cdns->usb3_phy)) + return PTR_ERR(cdns->usb3_phy); + + ret = phy_init(cdns->usb3_phy); + if (ret) + goto err1; + + ret = phy_power_on(cdns->usb2_phy); + if (ret) + goto err2; + + ret = phy_power_on(cdns->usb3_phy); + if (ret) + goto err3; + + cdns->role_sw = usb_role_switch_register(dev, &cdns3_switch_desc); + if (IS_ERR(cdns->role_sw)) { + ret = PTR_ERR(cdns->role_sw); + dev_warn(dev, "Unable to register Role Switch\n"); + goto err4; + } + + ret = cdns3_drd_init(cdns); + if (ret) + goto err5; + + ret = cdns3_core_init_role(cdns); + if (ret) + goto err5; + + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + /* + * The controller needs less time between bus and controller suspend, + * and we also needs a small delay to avoid frequently entering low + * power mode. + */ + pm_runtime_set_autosuspend_delay(dev, 20); + pm_runtime_mark_last_busy(dev); + pm_runtime_use_autosuspend(dev); + dev_dbg(dev, "Cadence USB3 core: probe succeed\n"); + + return 0; +err5: + cdns3_drd_exit(cdns); + usb_role_switch_unregister(cdns->role_sw); +err4: + phy_power_off(cdns->usb3_phy); + +err3: + phy_power_off(cdns->usb2_phy); +err2: + phy_exit(cdns->usb3_phy); +err1: + phy_exit(cdns->usb2_phy); + + return ret; +} + +/** + * cdns3_remove - unbind drd driver and clean up + * @pdev: Pointer to Linux platform device + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_remove(struct platform_device *pdev) +{ + struct cdns3 *cdns = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + cdns3_exit_roles(cdns); + usb_role_switch_unregister(cdns->role_sw); + phy_power_off(cdns->usb2_phy); + phy_power_off(cdns->usb3_phy); + phy_exit(cdns->usb2_phy); + phy_exit(cdns->usb3_phy); + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int cdns3_suspend(struct device *dev) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + unsigned long flags; + + if (cdns->role == USB_ROLE_HOST) + return 0; + + if (pm_runtime_status_suspended(dev)) + pm_runtime_resume(dev); + + if (cdns->roles[cdns->role]->suspend) { + spin_lock_irqsave(&cdns->gadget_dev->lock, flags); + cdns->roles[cdns->role]->suspend(cdns, false); + spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags); + } + + return 0; +} + +static int cdns3_resume(struct device *dev) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + unsigned long flags; + + if (cdns->role == USB_ROLE_HOST) + return 0; + + if (cdns->roles[cdns->role]->resume) { + spin_lock_irqsave(&cdns->gadget_dev->lock, flags); + cdns->roles[cdns->role]->resume(cdns, false); + spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags); + } + + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; +} +#endif + +static const struct dev_pm_ops cdns3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume) +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_cdns3_match[] = { + { .compatible = "cdns,usb3" }, + { }, +}; +MODULE_DEVICE_TABLE(of, of_cdns3_match); +#endif + +static struct platform_driver cdns3_driver = { + .probe = cdns3_probe, + .remove = cdns3_remove, + .driver = { + .name = "cdns-usb3", + .of_match_table = of_match_ptr(of_cdns3_match), + .pm = &cdns3_pm_ops, + }, +}; + +module_platform_driver(cdns3_driver); + +MODULE_ALIAS("platform:cdns3"); +MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver"); diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h new file mode 100644 index 000000000000..969eb94de204 --- /dev/null +++ b/drivers/usb/cdns3/core.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS DRD Header File. + * + * Copyright (C) 2017-2018 NXP + * Copyright (C) 2018-2019 Cadence. + * + * Authors: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + */ +#include <linux/usb/otg.h> +#include <linux/usb/role.h> + +#ifndef __LINUX_CDNS3_CORE_H +#define __LINUX_CDNS3_CORE_H + +struct cdns3; + +/** + * struct cdns3_role_driver - host/gadget role driver + * @start: start this role + * @stop: stop this role + * @suspend: suspend callback for this role + * @resume: resume callback for this role + * @irq: irq handler for this role + * @name: role name string (host/gadget) + * @state: current state + */ +struct cdns3_role_driver { + int (*start)(struct cdns3 *cdns); + void (*stop)(struct cdns3 *cdns); + int (*suspend)(struct cdns3 *cdns, bool do_wakeup); + int (*resume)(struct cdns3 *cdns, bool hibernated); + const char *name; +#define CDNS3_ROLE_STATE_INACTIVE 0 +#define CDNS3_ROLE_STATE_ACTIVE 1 + int state; +}; + +#define CDNS3_XHCI_RESOURCES_NUM 2 +/** + * struct cdns3 - Representation of Cadence USB3 DRD controller. + * @dev: pointer to Cadence device struct + * @xhci_regs: pointer to base of xhci registers + * @xhci_res: the resource for xhci + * @dev_regs: pointer to base of dev registers + * @otg_res: the resource for otg + * @otg_v0_regs: pointer to base of v0 otg registers + * @otg_v1_regs: pointer to base of v1 otg registers + * @otg_regs: pointer to base of otg registers + * @otg_irq: irq number for otg controller + * @dev_irq: irq number for device controller + * @roles: array of supported roles for this controller + * @role: current role + * @host_dev: the child host device pointer for cdns3 core + * @gadget_dev: the child gadget device pointer for cdns3 core + * @usb2_phy: pointer to USB2 PHY + * @usb3_phy: pointer to USB3 PHY + * @mutex: the mutex for concurrent code at driver + * @dr_mode: supported mode of operation it can be only Host, only Device + * or OTG mode that allow to switch between Device and Host mode. + * This field based on firmware setting, kernel configuration + * and hardware configuration. + * @role_sw: pointer to role switch object. + * @role_override: set 1 if role rely on SW. + */ +struct cdns3 { + struct device *dev; + void __iomem *xhci_regs; + struct resource xhci_res[CDNS3_XHCI_RESOURCES_NUM]; + struct cdns3_usb_regs __iomem *dev_regs; + + struct resource otg_res; + struct cdns3_otg_legacy_regs *otg_v0_regs; + struct cdns3_otg_regs *otg_v1_regs; + struct cdns3_otg_common_regs *otg_regs; +#define CDNS3_CONTROLLER_V0 0 +#define CDNS3_CONTROLLER_V1 1 + u32 version; + + int otg_irq; + int dev_irq; + struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1]; + enum usb_role role; + struct platform_device *host_dev; + struct cdns3_device *gadget_dev; + struct phy *usb2_phy; + struct phy *usb3_phy; + /* mutext used in workqueue*/ + struct mutex mutex; + enum usb_dr_mode dr_mode; + struct usb_role_switch *role_sw; + int role_override; +}; + +int cdns3_hw_role_switch(struct cdns3 *cdns); + +#endif /* __LINUX_CDNS3_CORE_H */ diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/debug.h new file mode 100644 index 000000000000..2c9afbfe988b --- /dev/null +++ b/drivers/usb/cdns3/debug.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS DRD Driver. + * Debug header file. + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ +#ifndef __LINUX_CDNS3_DEBUG +#define __LINUX_CDNS3_DEBUG + +#include "core.h" + +static inline char *cdns3_decode_usb_irq(char *str, + enum usb_device_speed speed, + u32 usb_ists) +{ + int ret; + + ret = sprintf(str, "IRQ %08x = ", usb_ists); + + if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { + ret += sprintf(str + ret, "Connection %s\n", + usb_speed_string(speed)); + } + if (usb_ists & USB_ISTS_DIS2I || usb_ists & USB_ISTS_DISI) + ret += sprintf(str + ret, "Disconnection "); + if (usb_ists & USB_ISTS_L2ENTI) + ret += sprintf(str + ret, "suspended "); + if (usb_ists & USB_ISTS_L1ENTI) + ret += sprintf(str + ret, "L1 enter "); + if (usb_ists & USB_ISTS_L1EXTI) + ret += sprintf(str + ret, "L1 exit "); + if (usb_ists & USB_ISTS_L2ENTI) + ret += sprintf(str + ret, "L2 enter "); + if (usb_ists & USB_ISTS_L2EXTI) + ret += sprintf(str + ret, "L2 exit "); + if (usb_ists & USB_ISTS_U3EXTI) + ret += sprintf(str + ret, "U3 exit "); + if (usb_ists & USB_ISTS_UWRESI) + ret += sprintf(str + ret, "Warm Reset "); + if (usb_ists & USB_ISTS_UHRESI) + ret += sprintf(str + ret, "Hot Reset "); + if (usb_ists & USB_ISTS_U2RESI) + ret += sprintf(str + ret, "Reset"); + + return str; +} + +static inline char *cdns3_decode_ep_irq(char *str, + u32 ep_sts, + const char *ep_name) +{ + int ret; + + ret = sprintf(str, "IRQ for %s: %08x ", ep_name, ep_sts); + + if (ep_sts & EP_STS_SETUP) + ret += sprintf(str + ret, "SETUP "); + if (ep_sts & EP_STS_IOC) + ret += sprintf(str + ret, "IOC "); + if (ep_sts & EP_STS_ISP) + ret += sprintf(str + ret, "ISP "); + if (ep_sts & EP_STS_DESCMIS) + ret += sprintf(str + ret, "DESCMIS "); + if (ep_sts & EP_STS_STREAMR) + ret += sprintf(str + ret, "STREAMR "); + if (ep_sts & EP_STS_MD_EXIT) + ret += sprintf(str + ret, "MD_EXIT "); + if (ep_sts & EP_STS_TRBERR) + ret += sprintf(str + ret, "TRBERR "); + if (ep_sts & EP_STS_NRDY) + ret += sprintf(str + ret, "NRDY "); + if (ep_sts & EP_STS_PRIME) + ret += sprintf(str + ret, "PRIME "); + if (ep_sts & EP_STS_SIDERR) + ret += sprintf(str + ret, "SIDERRT "); + if (ep_sts & EP_STS_OUTSMM) + ret += sprintf(str + ret, "OUTSMM "); + if (ep_sts & EP_STS_ISOERR) + ret += sprintf(str + ret, "ISOERR "); + if (ep_sts & EP_STS_IOT) + ret += sprintf(str + ret, "IOT "); + + return str; +} + +static inline char *cdns3_decode_epx_irq(char *str, + char *ep_name, + u32 ep_sts) +{ + return cdns3_decode_ep_irq(str, ep_sts, ep_name); +} + +static inline char *cdns3_decode_ep0_irq(char *str, + int dir, + u32 ep_sts) +{ + return cdns3_decode_ep_irq(str, ep_sts, + dir ? "ep0IN" : "ep0OUT"); +} + +/** + * Debug a transfer ring. + * + * Prints out all TRBs in the endpoint ring, even those after the Link TRB. + *. + */ +static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *ring, char *str) +{ + dma_addr_t addr = priv_ep->trb_pool_dma; + struct cdns3_trb *trb; + int trb_per_sector; + int ret = 0; + int i; + + trb_per_sector = GET_TRBS_PER_SEGMENT(priv_ep->type); + + trb = &priv_ep->trb_pool[priv_ep->dequeue]; + ret += sprintf(str + ret, "\n\t\tRing contents for %s:", priv_ep->name); + + ret += sprintf(str + ret, + "\n\t\tRing deq index: %d, trb: %p (virt), 0x%llx (dma)\n", + priv_ep->dequeue, trb, + (unsigned long long)cdns3_trb_virt_to_dma(priv_ep, trb)); + + trb = &priv_ep->trb_pool[priv_ep->enqueue]; + ret += sprintf(str + ret, + "\t\tRing enq index: %d, trb: %p (virt), 0x%llx (dma)\n", + priv_ep->enqueue, trb, + (unsigned long long)cdns3_trb_virt_to_dma(priv_ep, trb)); + + ret += sprintf(str + ret, + "\t\tfree trbs: %d, CCS=%d, PCS=%d\n", + priv_ep->free_trbs, priv_ep->ccs, priv_ep->pcs); + + if (trb_per_sector > TRBS_PER_SEGMENT) + trb_per_sector = TRBS_PER_SEGMENT; + + if (trb_per_sector > TRBS_PER_SEGMENT) { + sprintf(str + ret, "\t\tTo big transfer ring %d\n", + trb_per_sector); + return str; + } + + for (i = 0; i < trb_per_sector; ++i) { + trb = &ring[i]; + ret += sprintf(str + ret, + "\t\t@%pad %08x %08x %08x\n", &addr, + le32_to_cpu(trb->buffer), + le32_to_cpu(trb->length), + le32_to_cpu(trb->control)); + addr += sizeof(*trb); + } + + return str; +} + +#endif /*__LINUX_CDNS3_DEBUG*/ diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c new file mode 100644 index 000000000000..16ad485f0b69 --- /dev/null +++ b/drivers/usb/cdns3/drd.c @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver. + * + * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2019 Texas Instruments + * + * Author: Pawel Laszczak <pawell@cadence.com> + * Roger Quadros <rogerq@ti.com> + * + * + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/usb/otg.h> + +#include "gadget.h" +#include "drd.h" +#include "core.h" + +/** + * cdns3_set_mode - change mode of OTG Core + * @cdns: pointer to context structure + * @mode: selected mode from cdns_role + * + * Returns 0 on success otherwise negative errno + */ +int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case USB_DR_MODE_PERIPHERAL: + break; + case USB_DR_MODE_HOST: + break; + case USB_DR_MODE_OTG: + dev_dbg(cdns->dev, "Set controller to OTG mode\n"); + if (cdns->version == CDNS3_CONTROLLER_V1) { + reg = readl(&cdns->otg_v1_regs->override); + reg |= OVERRIDE_IDPULLUP; + writel(reg, &cdns->otg_v1_regs->override); + } else { + reg = readl(&cdns->otg_v0_regs->ctrl1); + reg |= OVERRIDE_IDPULLUP_V0; + writel(reg, &cdns->otg_v0_regs->ctrl1); + } + + /* + * Hardware specification says: "ID_VALUE must be valid within + * 50ms after idpullup is set to '1" so driver must wait + * 50ms before reading this pin. + */ + usleep_range(50000, 60000); + break; + default: + dev_err(cdns->dev, "Unsupported mode of operation %d\n", mode); + return -EINVAL; + } + + return ret; +} + +int cdns3_get_id(struct cdns3 *cdns) +{ + int id; + + id = readl(&cdns->otg_regs->sts) & OTGSTS_ID_VALUE; + dev_dbg(cdns->dev, "OTG ID: %d", id); + + return id; +} + +int cdns3_get_vbus(struct cdns3 *cdns) +{ + int vbus; + + vbus = !!(readl(&cdns->otg_regs->sts) & OTGSTS_VBUS_VALID); + dev_dbg(cdns->dev, "OTG VBUS: %d", vbus); + + return vbus; +} + +int cdns3_is_host(struct cdns3 *cdns) +{ + if (cdns->dr_mode == USB_DR_MODE_HOST) + return 1; + else if (!cdns3_get_id(cdns)) + return 1; + + return 0; +} + +int cdns3_is_device(struct cdns3 *cdns) +{ + if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) + return 1; + else if (cdns->dr_mode == USB_DR_MODE_OTG) + if (cdns3_get_id(cdns)) + return 1; + + return 0; +} + +/** + * cdns3_otg_disable_irq - Disable all OTG interrupts + * @cdns: Pointer to controller context structure + */ +static void cdns3_otg_disable_irq(struct cdns3 *cdns) +{ + writel(0, &cdns->otg_regs->ien); +} + +/** + * cdns3_otg_enable_irq - enable id and sess_valid interrupts + * @cdns: Pointer to controller context structure + */ +static void cdns3_otg_enable_irq(struct cdns3 *cdns) +{ + writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT | + OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien); +} + +/** + * cdns3_drd_switch_host - start/stop host + * @cdns: Pointer to controller context structure + * @on: 1 for start, 0 for stop + * + * Returns 0 on success otherwise negative errno + */ +int cdns3_drd_switch_host(struct cdns3 *cdns, int on) +{ + int ret, val; + u32 reg = OTGCMD_OTG_DIS; + + /* switch OTG core */ + if (on) { + writel(OTGCMD_HOST_BUS_REQ | reg, &cdns->otg_regs->cmd); + + dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n"); + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, + val & OTGSTS_XHCI_READY, + 1, 100000); + if (ret) { + dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); + return ret; + } + } else { + writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP | + OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF, + &cdns->otg_regs->cmd); + /* Waiting till H_IDLE state.*/ + readl_poll_timeout_atomic(&cdns->otg_regs->state, val, + !(val & OTGSTATE_HOST_STATE_MASK), + 1, 2000000); + } + + return 0; +} + +/** + * cdns3_drd_switch_gadget - start/stop gadget + * @cdns: Pointer to controller context structure + * @on: 1 for start, 0 for stop + * + * Returns 0 on success otherwise negative errno + */ +int cdns3_drd_switch_gadget(struct cdns3 *cdns, int on) +{ + int ret, val; + u32 reg = OTGCMD_OTG_DIS; + + /* switch OTG core */ + if (on) { + writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd); + + dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n"); + + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, + val & OTGSTS_DEV_READY, + 1, 100000); + if (ret) { + dev_err(cdns->dev, "timeout waiting for dev_ready\n"); + return ret; + } + } else { + /* + * driver should wait at least 10us after disabling Device + * before turning-off Device (DEV_BUS_DROP) + */ + usleep_range(20, 30); + writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP | + OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF, + &cdns->otg_regs->cmd); + /* Waiting till DEV_IDLE state.*/ + readl_poll_timeout_atomic(&cdns->otg_regs->state, val, + !(val & OTGSTATE_DEV_STATE_MASK), + 1, 2000000); + } + + return 0; +} + +/** + * cdns3_init_otg_mode - initialize drd controller + * @cdns: Pointer to controller context structure + * + * Returns 0 on success otherwise negative errno + */ +static int cdns3_init_otg_mode(struct cdns3 *cdns) +{ + int ret = 0; + + cdns3_otg_disable_irq(cdns); + /* clear all interrupts */ + writel(~0, &cdns->otg_regs->ivect); + + ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG); + if (ret) + return ret; + + cdns3_otg_enable_irq(cdns); + return ret; +} + +/** + * cdns3_drd_update_mode - initialize mode of operation + * @cdns: Pointer to controller context structure + * + * Returns 0 on success otherwise negative errno + */ +int cdns3_drd_update_mode(struct cdns3 *cdns) +{ + int ret = 0; + + switch (cdns->dr_mode) { + case USB_DR_MODE_PERIPHERAL: + ret = cdns3_set_mode(cdns, USB_DR_MODE_PERIPHERAL); + break; + case USB_DR_MODE_HOST: + ret = cdns3_set_mode(cdns, USB_DR_MODE_HOST); + break; + case USB_DR_MODE_OTG: + ret = cdns3_init_otg_mode(cdns); + break; + default: + dev_err(cdns->dev, "Unsupported mode of operation %d\n", + cdns->dr_mode); + return -EINVAL; + } + + return ret; +} + +static irqreturn_t cdns3_drd_thread_irq(int irq, void *data) +{ + struct cdns3 *cdns = data; + + cdns3_hw_role_switch(cdns); + + return IRQ_HANDLED; +} + +/** + * cdns3_drd_irq - interrupt handler for OTG events + * + * @irq: irq number for cdns3 core device + * @data: structure of cdns3 + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns3_drd_irq(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + struct cdns3 *cdns = data; + u32 reg; + + if (cdns->dr_mode != USB_DR_MODE_OTG) + return ret; + + reg = readl(&cdns->otg_regs->ivect); + + if (!reg) + return ret; + + if (reg & OTGIEN_ID_CHANGE_INT) { + dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n", + cdns3_get_id(cdns)); + + ret = IRQ_WAKE_THREAD; + } + + if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) { + dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n", + cdns3_get_vbus(cdns)); + + ret = IRQ_WAKE_THREAD; + } + + writel(~0, &cdns->otg_regs->ivect); + return ret; +} + +int cdns3_drd_init(struct cdns3 *cdns) +{ + void __iomem *regs; + int ret = 0; + u32 state; + + regs = devm_ioremap_resource(cdns->dev, &cdns->otg_res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + /* Detection of DRD version. Controller has been released + * in two versions. Both are similar, but they have same changes + * in register maps. + * The first register in old version is command register and it's read + * only, so driver should read 0 from it. On the other hand, in v1 + * the first register contains device ID number which is not set to 0. + * Driver uses this fact to detect the proper version of + * controller. + */ + cdns->otg_v0_regs = regs; + if (!readl(&cdns->otg_v0_regs->cmd)) { + cdns->version = CDNS3_CONTROLLER_V0; + cdns->otg_v1_regs = NULL; + cdns->otg_regs = regs; + writel(1, &cdns->otg_v0_regs->simulate); + dev_info(cdns->dev, "DRD version v0 (%08x)\n", + readl(&cdns->otg_v0_regs->version)); + } else { + cdns->otg_v0_regs = NULL; + cdns->otg_v1_regs = regs; + cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd; + cdns->version = CDNS3_CONTROLLER_V1; + writel(1, &cdns->otg_v1_regs->simulate); + dev_info(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", + readl(&cdns->otg_v1_regs->did), + readl(&cdns->otg_v1_regs->rid)); + } + + state = OTGSTS_STRAP(readl(&cdns->otg_regs->sts)); + + /* Update dr_mode according to STRAP configuration. */ + cdns->dr_mode = USB_DR_MODE_OTG; + if (state == OTGSTS_STRAP_HOST) { + dev_dbg(cdns->dev, "Controller strapped to HOST\n"); + cdns->dr_mode = USB_DR_MODE_HOST; + } else if (state == OTGSTS_STRAP_GADGET) { + dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n"); + cdns->dr_mode = USB_DR_MODE_PERIPHERAL; + } + + ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq, + cdns3_drd_irq, + cdns3_drd_thread_irq, + IRQF_SHARED, + dev_name(cdns->dev), cdns); + + if (ret) { + dev_err(cdns->dev, "couldn't get otg_irq\n"); + return ret; + } + + state = readl(&cdns->otg_regs->sts); + if (OTGSTS_OTG_NRDY(state) != 0) { + dev_err(cdns->dev, "Cadence USB3 OTG device not ready\n"); + return -ENODEV; + } + + return ret; +} + +int cdns3_drd_exit(struct cdns3 *cdns) +{ + cdns3_otg_disable_irq(cdns); + return 0; +} diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h new file mode 100644 index 000000000000..04e01c4d2377 --- /dev/null +++ b/drivers/usb/cdns3/drd.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USB3 DRD header file. + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ +#ifndef __LINUX_CDNS3_DRD +#define __LINUX_CDNS3_DRD + +#include <linux/usb/otg.h> +#include <linux/phy/phy.h> +#include "core.h" + +/* DRD register interface for version v1. */ +struct cdns3_otg_regs { + __le32 did; + __le32 rid; + __le32 capabilities; + __le32 reserved1; + __le32 cmd; + __le32 sts; + __le32 state; + __le32 reserved2; + __le32 ien; + __le32 ivect; + __le32 refclk; + __le32 tmr; + __le32 reserved3[4]; + __le32 simulate; + __le32 override; + __le32 susp_ctrl; + __le32 reserved4; + __le32 anasts; + __le32 adp_ramp_time; + __le32 ctrl1; + __le32 ctrl2; +}; + +/* DRD register interface for version v0. */ +struct cdns3_otg_legacy_regs { + __le32 cmd; + __le32 sts; + __le32 state; + __le32 refclk; + __le32 ien; + __le32 ivect; + __le32 reserved1[3]; + __le32 tmr; + __le32 reserved2[2]; + __le32 version; + __le32 capabilities; + __le32 reserved3[2]; + __le32 simulate; + __le32 reserved4[5]; + __le32 ctrl1; +}; + +/* + * Common registers interface for both version of DRD. + */ +struct cdns3_otg_common_regs { + __le32 cmd; + __le32 sts; + __le32 state; + __le32 different1; + __le32 ien; + __le32 ivect; +}; + +/* CDNS_RID - bitmasks */ +#define CDNS_RID(p) ((p) & GENMASK(15, 0)) + +/* CDNS_VID - bitmasks */ +#define CDNS_DID(p) ((p) & GENMASK(31, 0)) + +/* OTGCMD - bitmasks */ +/* "Request the bus for Device mode. */ +#define OTGCMD_DEV_BUS_REQ BIT(0) +/* Request the bus for Host mode */ +#define OTGCMD_HOST_BUS_REQ BIT(1) +/* Enable OTG mode. */ +#define OTGCMD_OTG_EN BIT(2) +/* Disable OTG mode */ +#define OTGCMD_OTG_DIS BIT(3) +/*"Configure OTG as A-Device. */ +#define OTGCMD_A_DEV_EN BIT(4) +/*"Configure OTG as A-Device. */ +#define OTGCMD_A_DEV_DIS BIT(5) +/* Drop the bus for Device mod e. */ +#define OTGCMD_DEV_BUS_DROP BIT(8) +/* Drop the bus for Host mode*/ +#define OTGCMD_HOST_BUS_DROP BIT(9) +/* Power Down USBSS-DEV. */ +#define OTGCMD_DEV_POWER_OFF BIT(11) +/* Power Down CDNSXHCI. */ +#define OTGCMD_HOST_POWER_OFF BIT(12) + +/* OTGIEN - bitmasks */ +/* ID change interrupt enable */ +#define OTGIEN_ID_CHANGE_INT BIT(0) +/* Vbusvalid fall detected interrupt enable.*/ +#define OTGIEN_VBUSVALID_RISE_INT BIT(4) +/* Vbusvalid fall detected interrupt enable */ +#define OTGIEN_VBUSVALID_FALL_INT BIT(5) + +/* OTGSTS - bitmasks */ +/* + * Current value of the ID pin. It is only valid when idpullup in + * OTGCTRL1_TYPE register is set to '1'. + */ +#define OTGSTS_ID_VALUE BIT(0) +/* Current value of the vbus_valid */ +#define OTGSTS_VBUS_VALID BIT(1) +/* Current value of the b_sess_vld */ +#define OTGSTS_SESSION_VALID BIT(2) +/*Device mode is active*/ +#define OTGSTS_DEV_ACTIVE BIT(3) +/* Host mode is active. */ +#define OTGSTS_HOST_ACTIVE BIT(4) +/* OTG Controller not ready. */ +#define OTGSTS_OTG_NRDY_MASK BIT(11) +#define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK) +/* + * Value of the strap pins. + * 000 - no default configuration + * 010 - Controller initiall configured as Host + * 100 - Controller initially configured as Device + */ +#define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12) +#define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00 +#define OTGSTS_STRAP_HOST_OTG 0x01 +#define OTGSTS_STRAP_HOST 0x02 +#define OTGSTS_STRAP_GADGET 0x04 +/* Host mode is turned on. */ +#define OTGSTS_XHCI_READY BIT(26) +/* "Device mode is turned on .*/ +#define OTGSTS_DEV_READY BIT(27) + +/* OTGSTATE- bitmasks */ +#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0) +#define OTGSTATE_HOST_STATE_MASK GENMASK(5, 3) +#define OTGSTATE_HOST_STATE_IDLE 0x0 +#define OTGSTATE_HOST_STATE_VBUS_FALL 0x7 +#define OTGSTATE_HOST_STATE(p) (((p) & OTGSTATE_HOST_STATE_MASK) >> 3) + +/* OTGREFCLK - bitmasks */ +#define OTGREFCLK_STB_CLK_SWITCH_EN BIT(31) + +/* OVERRIDE - bitmasks */ +#define OVERRIDE_IDPULLUP BIT(0) +/* Only for CDNS3_CONTROLLER_V0 version */ +#define OVERRIDE_IDPULLUP_V0 BIT(24) + +int cdns3_is_host(struct cdns3 *cdns); +int cdns3_is_device(struct cdns3 *cdns); +int cdns3_get_id(struct cdns3 *cdns); +int cdns3_get_vbus(struct cdns3 *cdns); +int cdns3_drd_init(struct cdns3 *cdns); +int cdns3_drd_exit(struct cdns3 *cdns); +int cdns3_drd_update_mode(struct cdns3 *cdns); +int cdns3_drd_switch_gadget(struct cdns3 *cdns, int on); +int cdns3_drd_switch_host(struct cdns3 *cdns, int on); +int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode); + +#endif /* __LINUX_CDNS3_DRD */ diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c new file mode 100644 index 000000000000..44f652e8b5a2 --- /dev/null +++ b/drivers/usb/cdns3/ep0.c @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver - gadget side. + * + * Copyright (C) 2018 Cadence Design Systems. + * Copyright (C) 2017-2018 NXP + * + * Authors: Pawel Jez <pjez@cadence.com>, + * Pawel Laszczak <pawell@cadence.com> + * Peter Chen <peter.chen@nxp.com> + */ + +#include <linux/usb/composite.h> +#include <linux/iopoll.h> + +#include "gadget.h" +#include "trace.h" + +static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, +}; + +/** + * cdns3_ep0_run_transfer - Do transfer on default endpoint hardware + * @priv_dev: extended gadget object + * @dma_addr: physical address where data is/will be stored + * @length: data length + * @erdy: set it to 1 when ERDY packet should be sent - + * exit from flow control state + */ +static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, + dma_addr_t dma_addr, + unsigned int length, int erdy, int zlp) +{ + struct cdns3_usb_regs __iomem *regs = priv_dev->regs; + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + + priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr); + priv_ep->trb_pool[0].length = TRB_LEN(length); + + if (zlp) { + priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr); + priv_ep->trb_pool[1].length = TRB_LEN(0); + priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL); + } else { + priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[1].control = 0; + } + + trace_cdns3_prepare_trb(priv_ep, priv_ep->trb_pool); + + cdns3_select_ep(priv_dev, priv_dev->ep0_data_dir); + + writel(EP_STS_TRBERR, ®s->ep_sts); + writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), ®s->ep_traddr); + trace_cdns3_doorbell_ep0(priv_dev->ep0_data_dir ? "ep0in" : "ep0out", + readl(®s->ep_traddr)); + + /* TRB should be prepared before starting transfer. */ + writel(EP_CMD_DRDY, ®s->ep_cmd); + + /* Resume controller before arming transfer. */ + __cdns3_gadget_wakeup(priv_dev); + + if (erdy) + writel(EP_CMD_ERDY, &priv_dev->regs->ep_cmd); +} + +/** + * cdns3_ep0_delegate_req - Returns status of handling setup packet + * Setup is handled by gadget driver + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns zero on success or negative value on failure + */ +static int cdns3_ep0_delegate_req(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + int ret; + + spin_unlock(&priv_dev->lock); + priv_dev->setup_pending = 1; + ret = priv_dev->gadget_driver->setup(&priv_dev->gadget, ctrl_req); + priv_dev->setup_pending = 0; + spin_lock(&priv_dev->lock); + return ret; +} + +static void cdns3_prepare_setup_packet(struct cdns3_device *priv_dev) +{ + priv_dev->ep0_data_dir = 0; + priv_dev->ep0_stage = CDNS3_SETUP_STAGE; + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, + sizeof(struct usb_ctrlrequest), 0, 0); +} + +static void cdns3_ep0_complete_setup(struct cdns3_device *priv_dev, + u8 send_stall, u8 send_erdy) +{ + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + struct usb_request *request; + + request = cdns3_next_request(&priv_ep->pending_req_list); + if (request) + list_del_init(&request->list); + + if (send_stall) { + trace_cdns3_halt(priv_ep, send_stall, 0); + /* set_stall on ep0 */ + cdns3_select_ep(priv_dev, 0x00); + writel(EP_CMD_SSTALL, &priv_dev->regs->ep_cmd); + } else { + cdns3_prepare_setup_packet(priv_dev); + } + + priv_dev->ep0_stage = CDNS3_SETUP_STAGE; + writel((send_erdy ? EP_CMD_ERDY : 0) | EP_CMD_REQ_CMPL, + &priv_dev->regs->ep_cmd); + + cdns3_allow_enable_l1(priv_dev, 1); +} + +/** + * cdns3_req_ep0_set_configuration - Handling of SET_CONFIG standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, USB_GADGET_DELAYED_STATUS on deferred status stage, + * error code on error + */ +static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + enum usb_device_state device_state = priv_dev->gadget.state; + struct cdns3_endpoint *priv_ep; + u32 config = le16_to_cpu(ctrl_req->wValue); + int result = 0; + int i; + + switch (device_state) { + case USB_STATE_ADDRESS: + /* Configure non-control EPs */ + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { + priv_ep = priv_dev->eps[i]; + if (!priv_ep) + continue; + + if (priv_ep->flags & EP_CLAIMED) + cdns3_ep_config(priv_ep); + } + + result = cdns3_ep0_delegate_req(priv_dev, ctrl_req); + + if (result) + return result; + + if (config) { + cdns3_set_hw_configuration(priv_dev); + } else { + cdns3_hw_reset_eps_config(priv_dev); + usb_gadget_set_state(&priv_dev->gadget, + USB_STATE_ADDRESS); + } + break; + case USB_STATE_CONFIGURED: + result = cdns3_ep0_delegate_req(priv_dev, ctrl_req); + + if (!config && !result) { + cdns3_hw_reset_eps_config(priv_dev); + usb_gadget_set_state(&priv_dev->gadget, + USB_STATE_ADDRESS); + } + break; + default: + result = -EINVAL; + } + + return result; +} + +/** + * cdns3_req_ep0_set_address - Handling of SET_ADDRESS standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + enum usb_device_state device_state = priv_dev->gadget.state; + u32 reg; + u32 addr; + + addr = le16_to_cpu(ctrl_req->wValue); + + if (addr > USB_DEVICE_MAX_ADDRESS) { + dev_err(priv_dev->dev, + "Device address (%d) cannot be greater than %d\n", + addr, USB_DEVICE_MAX_ADDRESS); + return -EINVAL; + } + + if (device_state == USB_STATE_CONFIGURED) { + dev_err(priv_dev->dev, + "can't set_address from configured state\n"); + return -EINVAL; + } + + reg = readl(&priv_dev->regs->usb_cmd); + + writel(reg | USB_CMD_FADDR(addr) | USB_CMD_SET_ADDR, + &priv_dev->regs->usb_cmd); + + usb_gadget_set_state(&priv_dev->gadget, + (addr ? USB_STATE_ADDRESS : USB_STATE_DEFAULT)); + + return 0; +} + +/** + * cdns3_req_ep0_get_status - Handling of GET_STATUS standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl) +{ + __le16 *response_pkt; + u16 usb_status = 0; + u32 recip; + + recip = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recip) { + case USB_RECIP_DEVICE: + /* self powered */ + if (priv_dev->is_selfpowered) + usb_status = BIT(USB_DEVICE_SELF_POWERED); + + if (priv_dev->wake_up_flag) + usb_status |= BIT(USB_DEVICE_REMOTE_WAKEUP); + + if (priv_dev->gadget.speed != USB_SPEED_SUPER) + break; + + if (priv_dev->u1_allowed) + usb_status |= BIT(USB_DEV_STAT_U1_ENABLED); + + if (priv_dev->u2_allowed) + usb_status |= BIT(USB_DEV_STAT_U2_ENABLED); + + break; + case USB_RECIP_INTERFACE: + return cdns3_ep0_delegate_req(priv_dev, ctrl); + case USB_RECIP_ENDPOINT: + /* check if endpoint is stalled */ + cdns3_select_ep(priv_dev, ctrl->wIndex); + if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts))) + usb_status = BIT(USB_ENDPOINT_HALT); + break; + default: + return -EINVAL; + } + + response_pkt = (__le16 *)priv_dev->setup_buf; + *response_pkt = cpu_to_le16(usb_status); + + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, + sizeof(*response_pkt), 1, 0); + return 0; +} + +static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + enum usb_device_state state; + enum usb_device_speed speed; + int ret = 0; + u32 wValue; + u16 tmode; + + wValue = le16_to_cpu(ctrl->wValue); + state = priv_dev->gadget.state; + speed = priv_dev->gadget.speed; + + switch (wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + priv_dev->wake_up_flag = !!set; + break; + case USB_DEVICE_U1_ENABLE: + if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER) + return -EINVAL; + + priv_dev->u1_allowed = !!set; + break; + case USB_DEVICE_U2_ENABLE: + if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER) + return -EINVAL; + + priv_dev->u2_allowed = !!set; + break; + case USB_DEVICE_LTM_ENABLE: + ret = -EINVAL; + break; + case USB_DEVICE_TEST_MODE: + if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH) + return -EINVAL; + + tmode = le16_to_cpu(ctrl->wIndex); + + if (!set || (tmode & 0xff) != 0) + return -EINVAL; + + switch (tmode >> 8) { + case TEST_J: + case TEST_K: + case TEST_SE0_NAK: + case TEST_PACKET: + cdns3_ep0_complete_setup(priv_dev, 0, 1); + /** + * Little delay to give the controller some time + * for sending status stage. + * This time should be less then 3ms. + */ + usleep_range(1000, 2000); + cdns3_set_register_bit(&priv_dev->regs->usb_cmd, + USB_CMD_STMODE | + USB_STS_TMODE_SEL(tmode - 1)); + break; + default: + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int cdns3_ep0_feature_handle_intf(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + u32 wValue; + int ret = 0; + + wValue = le16_to_cpu(ctrl->wValue); + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + struct cdns3_endpoint *priv_ep; + int ret = 0; + u8 index; + + if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) + return -EINVAL; + + if (!(ctrl->wIndex & ~USB_DIR_IN)) + return 0; + + index = cdns3_ep_addr_to_index(ctrl->wIndex); + priv_ep = priv_dev->eps[index]; + + cdns3_select_ep(priv_dev, ctrl->wIndex); + + if (set) + __cdns3_gadget_ep_set_halt(priv_ep); + else if (!(priv_ep->flags & EP_WEDGE)) + ret = __cdns3_gadget_ep_clear_halt(priv_ep); + + cdns3_select_ep(priv_dev, 0x00); + + return ret; +} + +/** + * cdns3_req_ep0_handle_feature - + * Handling of GET/SET_FEATURE standard USB request + * + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * @set: must be set to 1 for SET_FEATURE request + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_handle_feature(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl, + int set) +{ + int ret = 0; + u32 recip; + + recip = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recip) { + case USB_RECIP_DEVICE: + ret = cdns3_ep0_feature_handle_device(priv_dev, ctrl, set); + break; + case USB_RECIP_INTERFACE: + ret = cdns3_ep0_feature_handle_intf(priv_dev, ctrl, set); + break; + case USB_RECIP_ENDPOINT: + ret = cdns3_ep0_feature_handle_endpoint(priv_dev, ctrl, set); + break; + default: + return -EINVAL; + } + + return ret; +} + +/** + * cdns3_req_ep0_set_sel - Handling of SET_SEL standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + if (priv_dev->gadget.state < USB_STATE_ADDRESS) + return -EINVAL; + + if (ctrl_req->wLength != 6) { + dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", + ctrl_req->wLength); + return -EINVAL; + } + + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, 6, 1, 0); + return 0; +} + +/** + * cdns3_req_ep0_set_isoch_delay - + * Handling of GET_ISOCH_DELAY standard USB request + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + if (ctrl_req->wIndex || ctrl_req->wLength) + return -EINVAL; + + priv_dev->isoch_delay = ctrl_req->wValue; + + return 0; +} + +/** + * cdns3_ep0_standard_request - Handling standard USB requests + * @priv_dev: extended gadget object + * @ctrl_req: pointer to received setup packet + * + * Returns 0 if success, error code on error + */ +static int cdns3_ep0_standard_request(struct cdns3_device *priv_dev, + struct usb_ctrlrequest *ctrl_req) +{ + int ret; + + switch (ctrl_req->bRequest) { + case USB_REQ_SET_ADDRESS: + ret = cdns3_req_ep0_set_address(priv_dev, ctrl_req); + break; + case USB_REQ_SET_CONFIGURATION: + ret = cdns3_req_ep0_set_configuration(priv_dev, ctrl_req); + break; + case USB_REQ_GET_STATUS: + ret = cdns3_req_ep0_get_status(priv_dev, ctrl_req); + break; + case USB_REQ_CLEAR_FEATURE: + ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 0); + break; + case USB_REQ_SET_FEATURE: + ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 1); + break; + case USB_REQ_SET_SEL: + ret = cdns3_req_ep0_set_sel(priv_dev, ctrl_req); + break; + case USB_REQ_SET_ISOCH_DELAY: + ret = cdns3_req_ep0_set_isoch_delay(priv_dev, ctrl_req); + break; + default: + ret = cdns3_ep0_delegate_req(priv_dev, ctrl_req); + break; + } + + return ret; +} + +static void __pending_setup_status_handler(struct cdns3_device *priv_dev) +{ + struct usb_request *request = priv_dev->pending_status_request; + + if (priv_dev->status_completion_no_call && request && + request->complete) { + request->complete(&priv_dev->eps[0]->endpoint, request); + priv_dev->status_completion_no_call = 0; + } +} + +void cdns3_pending_setup_status_handler(struct work_struct *work) +{ + struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, + pending_status_wq); + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + __pending_setup_status_handler(priv_dev); + spin_unlock_irqrestore(&priv_dev->lock, flags); +} + +/** + * cdns3_ep0_setup_phase - Handling setup USB requests + * @priv_dev: extended gadget object + */ +static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev) +{ + struct usb_ctrlrequest *ctrl = priv_dev->setup_buf; + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + int result; + + priv_dev->ep0_data_dir = ctrl->bRequestType & USB_DIR_IN; + + trace_cdns3_ctrl_req(ctrl); + + if (!list_empty(&priv_ep->pending_req_list)) { + struct usb_request *request; + + request = cdns3_next_request(&priv_ep->pending_req_list); + priv_ep->dir = priv_dev->ep0_data_dir; + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), + -ECONNRESET); + } + + if (le16_to_cpu(ctrl->wLength)) + priv_dev->ep0_stage = CDNS3_DATA_STAGE; + else + priv_dev->ep0_stage = CDNS3_STATUS_STAGE; + + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + result = cdns3_ep0_standard_request(priv_dev, ctrl); + else + result = cdns3_ep0_delegate_req(priv_dev, ctrl); + + if (result == USB_GADGET_DELAYED_STATUS) + return; + + if (result < 0) + cdns3_ep0_complete_setup(priv_dev, 1, 1); + else if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) + cdns3_ep0_complete_setup(priv_dev, 0, 1); +} + +static void cdns3_transfer_completed(struct cdns3_device *priv_dev) +{ + struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; + + if (!list_empty(&priv_ep->pending_req_list)) { + struct usb_request *request; + + trace_cdns3_complete_trb(priv_ep, priv_ep->trb_pool); + request = cdns3_next_request(&priv_ep->pending_req_list); + + request->actual = + TRB_LEN(le32_to_cpu(priv_ep->trb_pool->length)); + + priv_ep->dir = priv_dev->ep0_data_dir; + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 0); + } + + cdns3_ep0_complete_setup(priv_dev, 0, 0); +} + +/** + * cdns3_check_new_setup - Check if controller receive new SETUP packet. + * @priv_dev: extended gadget object + * + * The SETUP packet can be kept in on-chip memory or in system memory. + */ +static bool cdns3_check_new_setup(struct cdns3_device *priv_dev) +{ + u32 ep_sts_reg; + + cdns3_select_ep(priv_dev, 0 | USB_DIR_OUT); + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + + return !!(ep_sts_reg & (EP_STS_SETUP | EP_STS_STPWAIT)); +} + +/** + * cdns3_check_ep0_interrupt_proceed - Processes interrupt related to endpoint 0 + * @priv_dev: extended gadget object + * @dir: USB_DIR_IN for IN direction, USB_DIR_OUT for OUT direction + */ +void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir) +{ + u32 ep_sts_reg; + + cdns3_select_ep(priv_dev, dir); + + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + writel(ep_sts_reg, &priv_dev->regs->ep_sts); + + trace_cdns3_ep0_irq(priv_dev, ep_sts_reg); + + __pending_setup_status_handler(priv_dev); + + if (ep_sts_reg & EP_STS_SETUP) + priv_dev->wait_for_setup = 1; + + if (priv_dev->wait_for_setup && ep_sts_reg & EP_STS_IOC) { + priv_dev->wait_for_setup = 0; + cdns3_allow_enable_l1(priv_dev, 0); + cdns3_ep0_setup_phase(priv_dev); + } else if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) { + priv_dev->ep0_data_dir = dir; + cdns3_transfer_completed(priv_dev); + } + + if (ep_sts_reg & EP_STS_DESCMIS) { + if (dir == 0 && !priv_dev->setup_pending) + cdns3_prepare_setup_packet(priv_dev); + } +} + +/** + * cdns3_gadget_ep0_enable + * Function shouldn't be called by gadget driver, + * endpoint 0 is allways active + */ +static int cdns3_gadget_ep0_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + return -EINVAL; +} + +/** + * cdns3_gadget_ep0_disable + * Function shouldn't be called by gadget driver, + * endpoint 0 is allways active + */ +static int cdns3_gadget_ep0_disable(struct usb_ep *ep) +{ + return -EINVAL; +} + +/** + * cdns3_gadget_ep0_set_halt + * @ep: pointer to endpoint zero object + * @value: 1 for set stall, 0 for clear stall + * + * Returns 0 + */ +static int cdns3_gadget_ep0_set_halt(struct usb_ep *ep, int value) +{ + /* TODO */ + return 0; +} + +/** + * cdns3_gadget_ep0_queue Transfer data on endpoint zero + * @ep: pointer to endpoint zero object + * @request: pointer to request object + * @gfp_flags: gfp flags + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_ep0_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + unsigned long flags; + int erdy_sent = 0; + int ret = 0; + u8 zlp = 0; + + trace_cdns3_ep0_queue(priv_dev, request); + + /* cancel the request if controller receive new SETUP packet. */ + if (cdns3_check_new_setup(priv_dev)) + return -ECONNRESET; + + /* send STATUS stage. Should be called only for SET_CONFIGURATION */ + if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) { + spin_lock_irqsave(&priv_dev->lock, flags); + cdns3_select_ep(priv_dev, 0x00); + + erdy_sent = !priv_dev->hw_configured_flag; + cdns3_set_hw_configuration(priv_dev); + + if (!erdy_sent) + cdns3_ep0_complete_setup(priv_dev, 0, 1); + + cdns3_allow_enable_l1(priv_dev, 1); + + request->actual = 0; + priv_dev->status_completion_no_call = true; + priv_dev->pending_status_request = request; + spin_unlock_irqrestore(&priv_dev->lock, flags); + + /* + * Since there is no completion interrupt for status stage, + * it needs to call ->completion in software after + * ep0_queue is back. + */ + queue_work(system_freezable_wq, &priv_dev->pending_status_wq); + return 0; + } + + spin_lock_irqsave(&priv_dev->lock, flags); + if (!list_empty(&priv_ep->pending_req_list)) { + dev_err(priv_dev->dev, + "can't handle multiple requests for ep0\n"); + spin_unlock_irqrestore(&priv_dev->lock, flags); + return -EBUSY; + } + + ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, + priv_dev->ep0_data_dir); + if (ret) { + spin_unlock_irqrestore(&priv_dev->lock, flags); + dev_err(priv_dev->dev, "failed to map request\n"); + return -EINVAL; + } + + request->status = -EINPROGRESS; + list_add_tail(&request->list, &priv_ep->pending_req_list); + + if (request->zero && request->length && + (request->length % ep->maxpacket == 0)) + zlp = 1; + + cdns3_ep0_run_transfer(priv_dev, request->dma, request->length, 1, zlp); + + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_gadget_ep_set_wedge Set wedge on selected endpoint + * @ep: endpoint object + * + * Returns 0 + */ +int cdns3_gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + dev_dbg(priv_dev->dev, "Wedge for %s\n", ep->name); + cdns3_gadget_ep_set_halt(ep, 1); + priv_ep->flags |= EP_WEDGE; + + return 0; +} + +const struct usb_ep_ops cdns3_gadget_ep0_ops = { + .enable = cdns3_gadget_ep0_enable, + .disable = cdns3_gadget_ep0_disable, + .alloc_request = cdns3_gadget_ep_alloc_request, + .free_request = cdns3_gadget_ep_free_request, + .queue = cdns3_gadget_ep0_queue, + .dequeue = cdns3_gadget_ep_dequeue, + .set_halt = cdns3_gadget_ep0_set_halt, + .set_wedge = cdns3_gadget_ep_set_wedge, +}; + +/** + * cdns3_ep0_config - Configures default endpoint + * @priv_dev: extended gadget object + * + * Functions sets parameters: maximal packet size and enables interrupts + */ +void cdns3_ep0_config(struct cdns3_device *priv_dev) +{ + struct cdns3_usb_regs __iomem *regs; + struct cdns3_endpoint *priv_ep; + u32 max_packet_size = 64; + + regs = priv_dev->regs; + + if (priv_dev->gadget.speed == USB_SPEED_SUPER) + max_packet_size = 512; + + priv_ep = priv_dev->eps[0]; + + if (!list_empty(&priv_ep->pending_req_list)) { + struct usb_request *request; + + request = cdns3_next_request(&priv_ep->pending_req_list); + list_del_init(&request->list); + } + + priv_dev->u1_allowed = 0; + priv_dev->u2_allowed = 0; + + priv_dev->gadget.ep0->maxpacket = max_packet_size; + cdns3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(max_packet_size); + + /* init ep out */ + cdns3_select_ep(priv_dev, USB_DIR_OUT); + + if (priv_dev->dev_ver >= DEV_VER_V3) { + cdns3_set_register_bit(&priv_dev->regs->dtrans, + BIT(0) | BIT(16)); + cdns3_set_register_bit(&priv_dev->regs->tdl_from_trb, + BIT(0) | BIT(16)); + } + + writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size), + ®s->ep_cfg); + + writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN, + ®s->ep_sts_en); + + /* init ep in */ + cdns3_select_ep(priv_dev, USB_DIR_IN); + + writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size), + ®s->ep_cfg); + + writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, ®s->ep_sts_en); + + cdns3_set_register_bit(®s->usb_conf, USB_CONF_U1DS | USB_CONF_U2DS); +} + +/** + * cdns3_init_ep0 Initializes software endpoint 0 of gadget + * @priv_dev: extended gadget object + * @ep_priv: extended endpoint object + * + * Returns 0 on success else error code. + */ +int cdns3_init_ep0(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + sprintf(priv_ep->name, "ep0"); + + /* fill linux fields */ + priv_ep->endpoint.ops = &cdns3_gadget_ep0_ops; + priv_ep->endpoint.maxburst = 1; + usb_ep_set_maxpacket_limit(&priv_ep->endpoint, + CDNS3_EP0_MAX_PACKET_LIMIT); + priv_ep->endpoint.address = 0; + priv_ep->endpoint.caps.type_control = 1; + priv_ep->endpoint.caps.dir_in = 1; + priv_ep->endpoint.caps.dir_out = 1; + priv_ep->endpoint.name = priv_ep->name; + priv_ep->endpoint.desc = &cdns3_gadget_ep0_desc; + priv_dev->gadget.ep0 = &priv_ep->endpoint; + priv_ep->type = USB_ENDPOINT_XFER_CONTROL; + + return cdns3_allocate_trb_pool(priv_ep); +} diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h new file mode 100644 index 000000000000..577469eee961 --- /dev/null +++ b/drivers/usb/cdns3/gadget-export.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS DRD Driver - Gadget Export APIs. + * + * Copyright (C) 2017 NXP + * Copyright (C) 2017-2018 NXP + * + * Authors: Peter Chen <peter.chen@nxp.com> + */ +#ifndef __LINUX_CDNS3_GADGET_EXPORT +#define __LINUX_CDNS3_GADGET_EXPORT + +#ifdef CONFIG_USB_CDNS3_GADGET + +int cdns3_gadget_init(struct cdns3 *cdns); +void cdns3_gadget_exit(struct cdns3 *cdns); +#else + +static inline int cdns3_gadget_init(struct cdns3 *cdns) +{ + return -ENXIO; +} + +static inline void cdns3_gadget_exit(struct cdns3 *cdns) { } + +#endif + +#endif /* __LINUX_CDNS3_GADGET_EXPORT */ diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c new file mode 100644 index 000000000000..228cdc4ab886 --- /dev/null +++ b/drivers/usb/cdns3/gadget.c @@ -0,0 +1,2744 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver - gadget side. + * + * Copyright (C) 2018-2019 Cadence Design Systems. + * Copyright (C) 2017-2018 NXP + * + * Authors: Pawel Jez <pjez@cadence.com>, + * Pawel Laszczak <pawell@cadence.com> + * Peter Chen <peter.chen@nxp.com> + */ + +/* + * Work around 1: + * At some situations, the controller may get stale data address in TRB + * at below sequences: + * 1. Controller read TRB includes data address + * 2. Software updates TRBs includes data address and Cycle bit + * 3. Controller read TRB which includes Cycle bit + * 4. DMA run with stale data address + * + * To fix this problem, driver needs to make the first TRB in TD as invalid. + * After preparing all TRBs driver needs to check the position of DMA and + * if the DMA point to the first just added TRB and doorbell is 1, + * then driver must defer making this TRB as valid. This TRB will be make + * as valid during adding next TRB only if DMA is stopped or at TRBERR + * interrupt. + * + * Issue has been fixed in DEV_VER_V3 version of controller. + * + * Work around 2: + * Controller for OUT endpoints has shared on-chip buffers for all incoming + * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA + * in correct order. If the first packet in the buffer will not be handled, + * then the following packets directed for other endpoints and functions + * will be blocked. + * Additionally the packets directed to one endpoint can block entire on-chip + * buffers. In this case transfer to other endpoints also will blocked. + * + * To resolve this issue after raising the descriptor missing interrupt + * driver prepares internal usb_request object and use it to arm DMA transfer. + * + * The problematic situation was observed in case when endpoint has been enabled + * but no usb_request were queued. Driver try detects such endpoints and will + * use this workaround only for these endpoint. + * + * Driver use limited number of buffer. This number can be set by macro + * CDNS3_WA2_NUM_BUFFERS. + * + * Such blocking situation was observed on ACM gadget. For this function + * host send OUT data packet but ACM function is not prepared for this packet. + * It's cause that buffer placed in on chip memory block transfer to other + * endpoints. + * + * Issue has been fixed in DEV_VER_V2 version of controller. + * + */ + +#include <linux/dma-mapping.h> +#include <linux/usb/gadget.h> +#include <linux/module.h> +#include <linux/iopoll.h> + +#include "core.h" +#include "gadget-export.h" +#include "gadget.h" +#include "trace.h" +#include "drd.h" + +static int __cdns3_gadget_ep_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags); + +/** + * cdns3_set_register_bit - set bit in given register. + * @ptr: address of device controller register to be read and changed + * @mask: bits requested to set + */ +void cdns3_set_register_bit(void __iomem *ptr, u32 mask) +{ + mask = readl(ptr) | mask; + writel(mask, ptr); +} + +/** + * cdns3_ep_addr_to_index - Macro converts endpoint address to + * index of endpoint object in cdns3_device.eps[] container + * @ep_addr: endpoint address for which endpoint object is required + * + */ +u8 cdns3_ep_addr_to_index(u8 ep_addr) +{ + return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); +} + +static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + int dma_index; + + dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; + + return dma_index / TRB_SIZE; +} + +/** + * cdns3_next_request - returns next request from list + * @list: list containing requests + * + * Returns request or NULL if no requests in list + */ +struct usb_request *cdns3_next_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct usb_request, list); +} + +/** + * cdns3_next_align_buf - returns next buffer from list + * @list: list containing buffers + * + * Returns buffer or NULL if no buffers in list + */ +struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) +{ + return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); +} + +/** + * cdns3_next_priv_request - returns next request from list + * @list: list containing requests + * + * Returns request or NULL if no requests in list + */ +struct cdns3_request *cdns3_next_priv_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct cdns3_request, list); +} + +/** + * select_ep - selects endpoint + * @priv_dev: extended gadget object + * @ep: endpoint address + */ +void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) +{ + if (priv_dev->selected_ep == ep) + return; + + priv_dev->selected_ep = ep; + writel(ep, &priv_dev->regs->ep_sel); +} + +dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *trb) +{ + u32 offset = (char *)trb - (char *)priv_ep->trb_pool; + + return priv_ep->trb_pool_dma + offset; +} + +int cdns3_ring_size(struct cdns3_endpoint *priv_ep) +{ + switch (priv_ep->type) { + case USB_ENDPOINT_XFER_ISOC: + return TRB_ISO_RING_SIZE; + case USB_ENDPOINT_XFER_CONTROL: + return TRB_CTRL_RING_SIZE; + default: + return TRB_RING_SIZE; + } +} + +/** + * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint + * @priv_ep: endpoint object + * + * Function will return 0 on success or -ENOMEM on allocation error + */ +int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + int ring_size = cdns3_ring_size(priv_ep); + struct cdns3_trb *link_trb; + + if (!priv_ep->trb_pool) { + priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev, + ring_size, + &priv_ep->trb_pool_dma, + GFP_DMA32 | GFP_ATOMIC); + if (!priv_ep->trb_pool) + return -ENOMEM; + } else { + memset(priv_ep->trb_pool, 0, ring_size); + } + + if (!priv_ep->num) + return 0; + + priv_ep->num_trbs = ring_size / TRB_SIZE; + /* Initialize the last TRB as Link TRB. */ + link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); + link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma); + link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE; + + return 0; +} + +static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (priv_ep->trb_pool) { + dma_free_coherent(priv_dev->sysdev, + cdns3_ring_size(priv_ep), + priv_ep->trb_pool, priv_ep->trb_pool_dma); + priv_ep->trb_pool = NULL; + } +} + +/** + * cdns3_ep_stall_flush - Stalls and flushes selected endpoint + * @priv_ep: endpoint object + * + * Endpoint must be selected before call to this function + */ +static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + int val; + + trace_cdns3_halt(priv_ep, 1, 1); + + writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, + &priv_dev->regs->ep_cmd); + + /* wait for DFLUSH cleared */ + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_DFLUSH), 1, 1000); + priv_ep->flags |= EP_STALLED; + priv_ep->flags &= ~EP_STALL_PENDING; +} + +/** + * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. + * @priv_dev: extended gadget object + */ +void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) +{ + writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); + + cdns3_allow_enable_l1(priv_dev, 0); + priv_dev->hw_configured_flag = 0; + priv_dev->onchip_used_size = 0; + priv_dev->out_mem_is_allocated = 0; + priv_dev->wait_for_setup = 0; +} + +/** + * cdns3_ep_inc_trb - increment a trb index. + * @index: Pointer to the TRB index to increment. + * @cs: Cycle state + * @trb_in_seg: number of TRBs in segment + * + * The index should never point to the link TRB. After incrementing, + * if it is point to the link TRB, wrap around to the beginning and revert + * cycle state bit The + * link TRB is always at the last TRB entry. + */ +static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) +{ + (*index)++; + if (*index == (trb_in_seg - 1)) { + *index = 0; + *cs ^= 1; + } +} + +/** + * cdns3_ep_inc_enq - increment endpoint's enqueue pointer + * @priv_ep: The endpoint whose enqueue pointer we're incrementing + */ +static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) +{ + priv_ep->free_trbs--; + cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); +} + +/** + * cdns3_ep_inc_deq - increment endpoint's dequeue pointer + * @priv_ep: The endpoint whose dequeue pointer we're incrementing + */ +static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) +{ + priv_ep->free_trbs++; + cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); +} + +void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) +{ + struct cdns3_endpoint *priv_ep = priv_req->priv_ep; + int current_trb = priv_req->start_trb; + + while (current_trb != priv_req->end_trb) { + cdns3_ep_inc_deq(priv_ep); + current_trb = priv_ep->dequeue; + } + + cdns3_ep_inc_deq(priv_ep); +} + +/** + * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. + * @priv_dev: Extended gadget object + * @enable: Enable/disable permit to transition to L1. + * + * If bit USB_CONF_L1EN is set and device receive Extended Token packet, + * then controller answer with ACK handshake. + * If bit USB_CONF_L1DS is set and device receive Extended Token packet, + * then controller answer with NYET handshake. + */ +void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) +{ + if (enable) + writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); + else + writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); +} + +enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) +{ + u32 reg; + + reg = readl(&priv_dev->regs->usb_sts); + + if (DEV_SUPERSPEED(reg)) + return USB_SPEED_SUPER; + else if (DEV_HIGHSPEED(reg)) + return USB_SPEED_HIGH; + else if (DEV_FULLSPEED(reg)) + return USB_SPEED_FULL; + else if (DEV_LOWSPEED(reg)) + return USB_SPEED_LOW; + return USB_SPEED_UNKNOWN; +} + +/** + * cdns3_start_all_request - add to ring all request not started + * @priv_dev: Extended gadget object + * @priv_ep: The endpoint for whom request will be started. + * + * Returns return ENOMEM if transfer ring i not enough TRBs to start + * all requests. + */ +static int cdns3_start_all_request(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + struct usb_request *request; + int ret = 0; + + while (!list_empty(&priv_ep->deferred_req_list)) { + request = cdns3_next_request(&priv_ep->deferred_req_list); + + ret = cdns3_ep_run_transfer(priv_ep, request); + if (ret) + return ret; + + list_del(&request->list); + list_add_tail(&request->list, + &priv_ep->pending_req_list); + } + + priv_ep->flags &= ~EP_RING_FULL; + return ret; +} + +/* + * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set + * driver try to detect whether endpoint need additional internal + * buffer for unblocking on-chip FIFO buffer. This flag will be cleared + * if before first DESCMISS interrupt the DMA will be armed. + */ +#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \ + if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ + priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ + (reg) |= EP_STS_EN_DESCMISEN; \ + } } while (0) + +/** + * cdns3_wa2_descmiss_copy_data copy data from internal requests to + * request queued by class driver. + * @priv_ep: extended endpoint object + * @request: request object + */ +static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, + struct usb_request *request) +{ + struct usb_request *descmiss_req; + struct cdns3_request *descmiss_priv_req; + + while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { + int chunk_end; + int length; + + descmiss_priv_req = + cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + descmiss_req = &descmiss_priv_req->request; + + /* driver can't touch pending request */ + if (descmiss_priv_req->flags & REQUEST_PENDING) + break; + + chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; + length = request->actual + descmiss_req->actual; + + request->status = descmiss_req->status; + + if (length <= request->length) { + memcpy(&((u8 *)request->buf)[request->actual], + descmiss_req->buf, + descmiss_req->actual); + request->actual = length; + } else { + /* It should never occures */ + request->status = -ENOMEM; + } + + list_del_init(&descmiss_priv_req->list); + + kfree(descmiss_req->buf); + cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); + --priv_ep->wa2_counter; + + if (!chunk_end) + break; + } +} + +struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req) +{ + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && + priv_req->flags & REQUEST_INTERNAL) { + struct usb_request *req; + + req = cdns3_next_request(&priv_ep->deferred_req_list); + + priv_ep->descmis_req = NULL; + + if (!req) + return NULL; + + cdns3_wa2_descmiss_copy_data(priv_ep, req); + if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && + req->length != req->actual) { + /* wait for next part of transfer */ + return NULL; + } + + if (req->status == -EINPROGRESS) + req->status = 0; + + list_del_init(&req->list); + cdns3_start_all_request(priv_dev, priv_ep); + return req; + } + + return &priv_req->request; +} + +int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req) +{ + int deferred = 0; + + /* + * If transfer was queued before DESCMISS appear than we + * can disable handling of DESCMISS interrupt. Driver assumes that it + * can disable special treatment for this endpoint. + */ + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { + u32 reg; + + cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); + priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; + reg = readl(&priv_dev->regs->ep_sts_en); + reg &= ~EP_STS_EN_DESCMISEN; + trace_cdns3_wa2(priv_ep, "workaround disabled\n"); + writel(reg, &priv_dev->regs->ep_sts_en); + } + + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { + u8 pending_empty = list_empty(&priv_ep->pending_req_list); + u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); + + /* + * DESCMISS transfer has been finished, so data will be + * directly copied from internal allocated usb_request + * objects. + */ + if (pending_empty && !descmiss_empty && + !(priv_req->flags & REQUEST_INTERNAL)) { + cdns3_wa2_descmiss_copy_data(priv_ep, + &priv_req->request); + + trace_cdns3_wa2(priv_ep, "get internal stored data"); + + list_add_tail(&priv_req->request.list, + &priv_ep->pending_req_list); + cdns3_gadget_giveback(priv_ep, priv_req, + priv_req->request.status); + + /* + * Intentionally driver returns positive value as + * correct value. It informs that transfer has + * been finished. + */ + return EINPROGRESS; + } + + /* + * Driver will wait for completion DESCMISS transfer, + * before starts new, not DESCMISS transfer. + */ + if (!pending_empty && !descmiss_empty) { + trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); + deferred = 1; + } + + if (priv_req->flags & REQUEST_INTERNAL) + list_add_tail(&priv_req->list, + &priv_ep->wa2_descmiss_req_list); + } + + return deferred; +} + +static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_request *priv_req; + + while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { + u8 chain; + + priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); + + trace_cdns3_wa2(priv_ep, "removes eldest request"); + + kfree(priv_req->request.buf); + cdns3_gadget_ep_free_request(&priv_ep->endpoint, + &priv_req->request); + list_del_init(&priv_req->list); + --priv_ep->wa2_counter; + + if (!chain) + break; + } +} + +/** + * cdns3_wa2_descmissing_packet - handles descriptor missing event. + * @priv_dev: extended gadget object + * + * This function is used only for WA2. For more information see Work around 2 + * description. + */ +static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_request *priv_req; + struct usb_request *request; + + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { + priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; + priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; + } + + trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); + + if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) + cdns3_wa2_remove_old_request(priv_ep); + + request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, + GFP_ATOMIC); + if (!request) + goto err; + + priv_req = to_cdns3_request(request); + priv_req->flags |= REQUEST_INTERNAL; + + /* if this field is still assigned it indicate that transfer related + * with this request has not been finished yet. Driver in this + * case simply allocate next request and assign flag REQUEST_INTERNAL_CH + * flag to previous one. It will indicate that current request is + * part of the previous one. + */ + if (priv_ep->descmis_req) + priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; + + priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, + GFP_ATOMIC); + priv_ep->wa2_counter++; + + if (!priv_req->request.buf) { + cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); + goto err; + } + + priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; + priv_ep->descmis_req = priv_req; + + __cdns3_gadget_ep_queue(&priv_ep->endpoint, + &priv_ep->descmis_req->request, + GFP_ATOMIC); + + return; + +err: + dev_err(priv_ep->cdns3_dev->dev, + "Failed: No sufficient memory for DESCMIS\n"); +} + +/** + * cdns3_gadget_giveback - call struct usb_request's ->complete callback + * @priv_ep: The endpoint to whom the request belongs to + * @priv_req: The request we're giving back + * @status: completion code for the request + * + * Must be called with controller's lock held and interrupts disabled. This + * function will unmap @req and call its ->complete() callback to notify upper + * layers that it has completed. + */ +void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req, + int status) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct usb_request *request = &priv_req->request; + + list_del_init(&request->list); + + if (request->status == -EINPROGRESS) + request->status = status; + + usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, + priv_ep->dir); + + if ((priv_req->flags & REQUEST_UNALIGNED) && + priv_ep->dir == USB_DIR_OUT && !request->status) + memcpy(request->buf, priv_req->aligned_buf->buf, + request->length); + + priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); + trace_cdns3_gadget_giveback(priv_req); + + if (priv_dev->dev_ver < DEV_VER_V2) { + request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, + priv_req); + if (!request) + return; + } + + if (request->complete) { + spin_unlock(&priv_dev->lock); + usb_gadget_giveback_request(&priv_ep->endpoint, + request); + spin_lock(&priv_dev->lock); + } + + if (request->buf == priv_dev->zlp_buf) + cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); +} + +void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) +{ + /* Work around for stale data address in TRB*/ + if (priv_ep->wa1_set) { + trace_cdns3_wa1(priv_ep, "restore cycle bit"); + + priv_ep->wa1_set = 0; + priv_ep->wa1_trb_index = 0xFFFF; + if (priv_ep->wa1_cycle_bit) { + priv_ep->wa1_trb->control = + priv_ep->wa1_trb->control | 0x1; + } else { + priv_ep->wa1_trb->control = + priv_ep->wa1_trb->control & ~0x1; + } + } +} + +static void cdns3_free_aligned_request_buf(struct work_struct *work) +{ + struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, + aligned_buf_wq); + struct cdns3_aligned_buf *buf, *tmp; + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + + list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { + if (!buf->in_use) { + list_del(&buf->list); + + /* + * Re-enable interrupts to free DMA capable memory. + * Driver can't free this memory with disabled + * interrupts. + */ + spin_unlock_irqrestore(&priv_dev->lock, flags); + dma_free_coherent(priv_dev->sysdev, buf->size, + buf->buf, buf->dma); + kfree(buf); + spin_lock_irqsave(&priv_dev->lock, flags); + } + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); +} + +static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) +{ + struct cdns3_endpoint *priv_ep = priv_req->priv_ep; + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_aligned_buf *buf; + + /* check if buffer is aligned to 8. */ + if (!((uintptr_t)priv_req->request.buf & 0x7)) + return 0; + + buf = priv_req->aligned_buf; + + if (!buf || priv_req->request.length > buf->size) { + buf = kzalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + buf->size = priv_req->request.length; + + buf->buf = dma_alloc_coherent(priv_dev->sysdev, + buf->size, + &buf->dma, + GFP_ATOMIC); + if (!buf->buf) { + kfree(buf); + return -ENOMEM; + } + + if (priv_req->aligned_buf) { + trace_cdns3_free_aligned_request(priv_req); + priv_req->aligned_buf->in_use = 0; + queue_work(system_freezable_wq, + &priv_dev->aligned_buf_wq); + } + + buf->in_use = 1; + priv_req->aligned_buf = buf; + + list_add_tail(&buf->list, + &priv_dev->aligned_buf_list); + } + + if (priv_ep->dir == USB_DIR_IN) { + memcpy(buf->buf, priv_req->request.buf, + priv_req->request.length); + } + + priv_req->flags |= REQUEST_UNALIGNED; + trace_cdns3_prepare_aligned_request(priv_req); + + return 0; +} + +static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *trb) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (!priv_ep->wa1_set) { + u32 doorbell; + + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + + if (doorbell) { + priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; + priv_ep->wa1_set = 1; + priv_ep->wa1_trb = trb; + priv_ep->wa1_trb_index = priv_ep->enqueue; + trace_cdns3_wa1(priv_ep, "set guard"); + return 0; + } + } + return 1; +} + +static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + int dma_index; + u32 doorbell; + + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); + + if (!doorbell || dma_index != priv_ep->wa1_trb_index) + cdns3_wa1_restore_cycle_bit(priv_ep); +} + +/** + * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware + * @priv_ep: endpoint object + * + * Returns zero on success or negative value on failure + */ +int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_request *priv_req; + struct cdns3_trb *trb; + dma_addr_t trb_dma; + u32 togle_pcs = 1; + int sg_iter = 0; + int num_trb; + int address; + u32 control; + int pcs; + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + num_trb = priv_ep->interval; + else + num_trb = request->num_sgs ? request->num_sgs : 1; + + if (num_trb > priv_ep->free_trbs) { + priv_ep->flags |= EP_RING_FULL; + return -ENOBUFS; + } + + priv_req = to_cdns3_request(request); + address = priv_ep->endpoint.desc->bEndpointAddress; + + priv_ep->flags |= EP_PENDING_REQUEST; + + /* must allocate buffer aligned to 8 */ + if (priv_req->flags & REQUEST_UNALIGNED) + trb_dma = priv_req->aligned_buf->dma; + else + trb_dma = request->dma; + + trb = priv_ep->trb_pool + priv_ep->enqueue; + priv_req->start_trb = priv_ep->enqueue; + priv_req->trb = trb; + + cdns3_select_ep(priv_ep->cdns3_dev, address); + + /* prepare ring */ + if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { + struct cdns3_trb *link_trb; + int doorbell, dma_index; + u32 ch_bit = 0; + + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); + + /* Driver can't update LINK TRB if it is current processed. */ + if (doorbell && dma_index == priv_ep->num_trbs - 1) { + priv_ep->flags |= EP_DEFERRED_DRDY; + return -ENOBUFS; + } + + /*updating C bt in Link TRB before starting DMA*/ + link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); + /* + * For TRs size equal 2 enabling TRB_CHAIN for epXin causes + * that DMA stuck at the LINK TRB. + * On the other hand, removing TRB_CHAIN for longer TRs for + * epXout cause that DMA stuck after handling LINK TRB. + * To eliminate this strange behavioral driver set TRB_CHAIN + * bit only for TR size > 2. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || + TRBS_PER_SEGMENT > 2) + ch_bit = TRB_CHAIN; + + link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) | + TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit; + } + + if (priv_dev->dev_ver <= DEV_VER_V2) + togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); + + /* set incorrect Cycle Bit for first trb*/ + control = priv_ep->pcs ? 0 : TRB_CYCLE; + + do { + u32 length; + u16 td_size = 0; + + /* fill TRB */ + control |= TRB_TYPE(TRB_NORMAL); + trb->buffer = TRB_BUFFER(request->num_sgs == 0 + ? trb_dma : request->sg[sg_iter].dma_address); + + if (likely(!request->num_sgs)) + length = request->length; + else + length = request->sg[sg_iter].length; + + if (likely(priv_dev->dev_ver >= DEV_VER_V2)) + td_size = DIV_ROUND_UP(length, + priv_ep->endpoint.maxpacket); + + trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) | + TRB_LEN(length); + if (priv_dev->gadget.speed == USB_SPEED_SUPER) + trb->length |= TRB_TDL_SS_SIZE(td_size); + else + control |= TRB_TDL_HS_SIZE(td_size); + + pcs = priv_ep->pcs ? TRB_CYCLE : 0; + + /* + * first trb should be prepared as last to avoid processing + * transfer to early + */ + if (sg_iter != 0) + control |= pcs; + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { + control |= TRB_IOC | TRB_ISP; + } else { + /* for last element in TD or in SG list */ + if (sg_iter == (num_trb - 1) && sg_iter != 0) + control |= pcs | TRB_IOC | TRB_ISP; + } + + if (sg_iter) + trb->control = control; + else + priv_req->trb->control = control; + + control = 0; + ++sg_iter; + priv_req->end_trb = priv_ep->enqueue; + cdns3_ep_inc_enq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->enqueue; + } while (sg_iter < num_trb); + + trb = priv_req->trb; + + priv_req->flags |= REQUEST_PENDING; + + if (sg_iter == 1) + trb->control |= TRB_IOC | TRB_ISP; + + /* + * Memory barrier - cycle bit must be set before other filds in trb. + */ + wmb(); + + /* give the TD to the consumer*/ + if (togle_pcs) + trb->control = trb->control ^ 1; + + if (priv_dev->dev_ver <= DEV_VER_V2) + cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); + + trace_cdns3_prepare_trb(priv_ep, priv_req->trb); + + /* + * Memory barrier - Cycle Bit must be set before trb->length and + * trb->buffer fields. + */ + wmb(); + + /* + * For DMULT mode we can set address to transfer ring only once after + * enabling endpoint. + */ + if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { + /* + * Until SW is not ready to handle the OUT transfer the ISO OUT + * Endpoint should be disabled (EP_CFG.ENABLE = 0). + * EP_CFG_ENABLE must be set before updating ep_traddr. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && + !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { + priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; + cdns3_set_register_bit(&priv_dev->regs->ep_cfg, + EP_CFG_ENABLE); + } + + writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + + priv_req->start_trb * TRB_SIZE), + &priv_dev->regs->ep_traddr); + + priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; + } + + if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { + trace_cdns3_ring(priv_ep); + /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ + writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + trace_cdns3_doorbell_epx(priv_ep->name, + readl(&priv_dev->regs->ep_traddr)); + } + + /* WORKAROUND for transition to L0 */ + __cdns3_gadget_wakeup(priv_dev); + + return 0; +} + +void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) +{ + struct cdns3_endpoint *priv_ep; + struct usb_ep *ep; + int val; + + if (priv_dev->hw_configured_flag) + return; + + writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); + writel(EP_CMD_ERDY | EP_CMD_REQ_CMPL, &priv_dev->regs->ep_cmd); + + cdns3_set_register_bit(&priv_dev->regs->usb_conf, + USB_CONF_U1EN | USB_CONF_U2EN); + + /* wait until configuration set */ + readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val, + val & USB_STS_CFGSTS_MASK, 1, 100); + + priv_dev->hw_configured_flag = 1; + + list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { + if (ep->enabled) { + priv_ep = ep_to_cdns3_ep(ep); + cdns3_start_all_request(priv_dev, priv_ep); + } + } +} + +/** + * cdns3_request_handled - check whether request has been handled by DMA + * + * @priv_ep: extended endpoint object. + * @priv_req: request object for checking + * + * Endpoint must be selected before invoking this function. + * + * Returns false if request has not been handled by DMA, else returns true. + * + * SR - start ring + * ER - end ring + * DQ = priv_ep->dequeue - dequeue position + * EQ = priv_ep->enqueue - enqueue position + * ST = priv_req->start_trb - index of first TRB in transfer ring + * ET = priv_req->end_trb - index of last TRB in transfer ring + * CI = current_index - index of processed TRB by DMA. + * + * As first step, function checks if cycle bit for priv_req->start_trb is + * correct. + * + * some rules: + * 1. priv_ep->dequeue never exceed current_index. + * 2 priv_ep->enqueue never exceed priv_ep->dequeue + * 3. exception: priv_ep->enqueue == priv_ep->dequeue + * and priv_ep->free_trbs is zero. + * This case indicate that TR is full. + * + * Then We can split recognition into two parts: + * Case 1 - priv_ep->dequeue < current_index + * SR ... EQ ... DQ ... CI ... ER + * SR ... DQ ... CI ... EQ ... ER + * + * Request has been handled by DMA if ST and ET is between DQ and CI. + * + * Case 2 - priv_ep->dequeue > current_index + * This situation take place when CI go through the LINK TRB at the end of + * transfer ring. + * SR ... CI ... EQ ... DQ ... ER + * + * Request has been handled by DMA if ET is less then CI or + * ET is greater or equal DQ. + */ +static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_trb *trb = priv_req->trb; + int current_index = 0; + int handled = 0; + int doorbell; + + current_index = cdns3_get_dma_pos(priv_dev, priv_ep); + doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); + + trb = &priv_ep->trb_pool[priv_req->start_trb]; + + if ((trb->control & TRB_CYCLE) != priv_ep->ccs) + goto finish; + + if (doorbell == 1 && current_index == priv_ep->dequeue) + goto finish; + + /* The corner case for TRBS_PER_SEGMENT equal 2). */ + if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { + handled = 1; + goto finish; + } + + if (priv_ep->enqueue == priv_ep->dequeue && + priv_ep->free_trbs == 0) { + handled = 1; + } else if (priv_ep->dequeue < current_index) { + if ((current_index == (priv_ep->num_trbs - 1)) && + !priv_ep->dequeue) + goto finish; + + if (priv_req->end_trb >= priv_ep->dequeue && + priv_req->end_trb < current_index) + handled = 1; + } else if (priv_ep->dequeue > current_index) { + if (priv_req->end_trb < current_index || + priv_req->end_trb >= priv_ep->dequeue) + handled = 1; + } + +finish: + trace_cdns3_request_handled(priv_req, current_index, handled); + + return handled; +} + +static void cdns3_transfer_completed(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + struct cdns3_request *priv_req; + struct usb_request *request; + struct cdns3_trb *trb; + + while (!list_empty(&priv_ep->pending_req_list)) { + request = cdns3_next_request(&priv_ep->pending_req_list); + priv_req = to_cdns3_request(request); + + /* Re-select endpoint. It could be changed by other CPU during + * handling usb_gadget_giveback_request. + */ + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + + if (!cdns3_request_handled(priv_ep, priv_req)) + goto prepare_next_td; + + trb = priv_ep->trb_pool + priv_ep->dequeue; + trace_cdns3_complete_trb(priv_ep, trb); + + if (trb != priv_req->trb) + dev_warn(priv_dev->dev, + "request_trb=0x%p, queue_trb=0x%p\n", + priv_req->trb, trb); + + request->actual = TRB_LEN(le32_to_cpu(trb->length)); + cdns3_move_deq_to_next_trb(priv_req); + cdns3_gadget_giveback(priv_ep, priv_req, 0); + + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && + TRBS_PER_SEGMENT == 2) + break; + } + priv_ep->flags &= ~EP_PENDING_REQUEST; + +prepare_next_td: + if (!(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) + cdns3_start_all_request(priv_dev, priv_ep); +} + +void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + cdns3_wa1_restore_cycle_bit(priv_ep); + + if (rearm) { + trace_cdns3_ring(priv_ep); + + /* Cycle Bit must be updated before arming DMA. */ + wmb(); + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + + __cdns3_gadget_wakeup(priv_dev); + + trace_cdns3_doorbell_epx(priv_ep->name, + readl(&priv_dev->regs->ep_traddr)); + } +} + +/** + * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint + * @priv_ep: endpoint object + * + * Returns 0 + */ +static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + u32 ep_sts_reg; + + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + + trace_cdns3_epx_irq(priv_dev, priv_ep); + + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + writel(ep_sts_reg, &priv_dev->regs->ep_sts); + + if (ep_sts_reg & EP_STS_TRBERR) { + if (priv_ep->flags & EP_STALL_PENDING && + !(ep_sts_reg & EP_STS_DESCMIS && + priv_dev->dev_ver < DEV_VER_V2)) { + cdns3_ep_stall_flush(priv_ep); + } + + /* + * For isochronous transfer driver completes request on + * IOC or on TRBERR. IOC appears only when device receive + * OUT data packet. If host disable stream or lost some packet + * then the only way to finish all queued transfer is to do it + * on TRBERR event. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && + !priv_ep->wa1_set) { + if (!priv_ep->dir) { + u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); + + ep_cfg &= ~EP_CFG_ENABLE; + writel(ep_cfg, &priv_dev->regs->ep_cfg); + priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; + } + cdns3_transfer_completed(priv_dev, priv_ep); + } else if (!(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) { + if (priv_ep->flags & EP_DEFERRED_DRDY) { + priv_ep->flags &= ~EP_DEFERRED_DRDY; + cdns3_start_all_request(priv_dev, priv_ep); + } else { + cdns3_rearm_transfer(priv_ep, + priv_ep->wa1_set); + } + } + } + + if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) { + if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { + if (ep_sts_reg & EP_STS_ISP) + priv_ep->flags |= EP_QUIRK_END_TRANSFER; + else + priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; + } + + cdns3_transfer_completed(priv_dev, priv_ep); + } + + /* + * WA2: this condition should only be meet when + * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or + * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. + * In other cases this interrupt will be disabled/ + */ + if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && + !(priv_ep->flags & EP_STALLED)) + cdns3_wa2_descmissing_packet(priv_ep); + + return 0; +} + +static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) +{ + if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) { + spin_unlock(&priv_dev->lock); + priv_dev->gadget_driver->disconnect(&priv_dev->gadget); + spin_lock(&priv_dev->lock); + } +} + +/** + * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device + * @priv_dev: extended gadget object + * @usb_ists: bitmap representation of device's reported interrupts + * (usb_ists register value) + */ +static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, + u32 usb_ists) +{ + int speed = 0; + + trace_cdns3_usb_irq(priv_dev, usb_ists); + if (usb_ists & USB_ISTS_L1ENTI) { + /* + * WORKAROUND: CDNS3 controller has issue with hardware resuming + * from L1. To fix it, if any DMA transfer is pending driver + * must starts driving resume signal immediately. + */ + if (readl(&priv_dev->regs->drbl)) + __cdns3_gadget_wakeup(priv_dev); + } + + /* Connection detected */ + if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { + speed = cdns3_get_speed(priv_dev); + priv_dev->gadget.speed = speed; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); + cdns3_ep0_config(priv_dev); + } + + /* Disconnection detected */ + if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { + cdns3_disconnect_gadget(priv_dev); + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); + cdns3_hw_reset_eps_config(priv_dev); + } + + if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { + if (priv_dev->gadget_driver && + priv_dev->gadget_driver->suspend) { + spin_unlock(&priv_dev->lock); + priv_dev->gadget_driver->suspend(&priv_dev->gadget); + spin_lock(&priv_dev->lock); + } + } + + if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { + if (priv_dev->gadget_driver && + priv_dev->gadget_driver->resume) { + spin_unlock(&priv_dev->lock); + priv_dev->gadget_driver->resume(&priv_dev->gadget); + spin_lock(&priv_dev->lock); + } + } + + /* reset*/ + if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { + if (priv_dev->gadget_driver) { + spin_unlock(&priv_dev->lock); + usb_gadget_udc_reset(&priv_dev->gadget, + priv_dev->gadget_driver); + spin_lock(&priv_dev->lock); + + /*read again to check the actual speed*/ + speed = cdns3_get_speed(priv_dev); + priv_dev->gadget.speed = speed; + cdns3_hw_reset_eps_config(priv_dev); + cdns3_ep0_config(priv_dev); + } + } +} + +/** + * cdns3_device_irq_handler- interrupt handler for device part of controller + * + * @irq: irq number for cdns3 core device + * @data: structure of cdns3 + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns3_device_irq_handler(int irq, void *data) +{ + struct cdns3_device *priv_dev; + struct cdns3 *cdns = data; + irqreturn_t ret = IRQ_NONE; + u32 reg; + + priv_dev = cdns->gadget_dev; + + /* check USB device interrupt */ + reg = readl(&priv_dev->regs->usb_ists); + if (reg) { + /* After masking interrupts the new interrupts won't be + * reported in usb_ists/ep_ists. In order to not lose some + * of them driver disables only detected interrupts. + * They will be enabled ASAP after clearing source of + * interrupt. This an unusual behavior only applies to + * usb_ists register. + */ + reg = ~reg & readl(&priv_dev->regs->usb_ien); + /* mask deferred interrupt. */ + writel(reg, &priv_dev->regs->usb_ien); + ret = IRQ_WAKE_THREAD; + } + + /* check endpoint interrupt */ + reg = readl(&priv_dev->regs->ep_ists); + if (reg) { + writel(0, &priv_dev->regs->ep_ien); + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +/** + * cdns3_device_thread_irq_handler- interrupt handler for device part + * of controller + * + * @irq: irq number for cdns3 core device + * @data: structure of cdns3 + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) +{ + struct cdns3_device *priv_dev; + struct cdns3 *cdns = data; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + int bit; + u32 reg; + + priv_dev = cdns->gadget_dev; + spin_lock_irqsave(&priv_dev->lock, flags); + + reg = readl(&priv_dev->regs->usb_ists); + if (reg) { + writel(reg, &priv_dev->regs->usb_ists); + writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); + cdns3_check_usb_interrupt_proceed(priv_dev, reg); + ret = IRQ_HANDLED; + } + + reg = readl(&priv_dev->regs->ep_ists); + + /* handle default endpoint OUT */ + if (reg & EP_ISTS_EP_OUT0) { + cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); + ret = IRQ_HANDLED; + } + + /* handle default endpoint IN */ + if (reg & EP_ISTS_EP_IN0) { + cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); + ret = IRQ_HANDLED; + } + + /* check if interrupt from non default endpoint, if no exit */ + reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); + if (!reg) + goto irqend; + + for_each_set_bit(bit, (unsigned long *)®, + sizeof(u32) * BITS_PER_BYTE) { + cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); + ret = IRQ_HANDLED; + } + +irqend: + writel(~0, &priv_dev->regs->ep_ien); + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP + * + * The real reservation will occur during write to EP_CFG register, + * this function is used to check if the 'size' reservation is allowed. + * + * @priv_dev: extended gadget object + * @size: the size (KB) for EP would like to allocate + * @is_in: endpoint direction + * + * Return 0 if the required size can met or negative value on failure + */ +static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, + int size, int is_in) +{ + int remained; + + /* 2KB are reserved for EP0*/ + remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; + + if (is_in) { + if (remained < size) + return -EPERM; + + priv_dev->onchip_used_size += size; + } else { + int required; + + /** + * ALL OUT EPs are shared the same chunk onchip memory, so + * driver checks if it already has assigned enough buffers + */ + if (priv_dev->out_mem_is_allocated >= size) + return 0; + + required = size - priv_dev->out_mem_is_allocated; + + if (required > remained) + return -EPERM; + + priv_dev->out_mem_is_allocated += required; + priv_dev->onchip_used_size += required; + } + + return 0; +} + +void cdns3_configure_dmult(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + struct cdns3_usb_regs __iomem *regs = priv_dev->regs; + + /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ + if (priv_dev->dev_ver <= DEV_VER_V2) + writel(USB_CONF_DMULT, ®s->usb_conf); + + if (priv_dev->dev_ver == DEV_VER_V2) + writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); + + if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { + u32 mask; + + if (priv_ep->dir) + mask = BIT(priv_ep->num + 16); + else + mask = BIT(priv_ep->num); + + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { + cdns3_set_register_bit(®s->tdl_from_trb, mask); + cdns3_set_register_bit(®s->tdl_beh, mask); + cdns3_set_register_bit(®s->tdl_beh2, mask); + cdns3_set_register_bit(®s->dma_adv_td, mask); + } + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) + cdns3_set_register_bit(®s->tdl_from_trb, mask); + + cdns3_set_register_bit(®s->dtrans, mask); + } +} + +/** + * cdns3_ep_config Configure hardware endpoint + * @priv_ep: extended endpoint object + */ +void cdns3_ep_config(struct cdns3_endpoint *priv_ep) +{ + bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + u32 bEndpointAddress = priv_ep->num | priv_ep->dir; + u32 max_packet_size = 0; + u8 maxburst = 0; + u32 ep_cfg = 0; + u8 buffering; + u8 mult = 0; + int ret; + + buffering = CDNS3_EP_BUF_SIZE - 1; + + cdns3_configure_dmult(priv_dev, priv_ep); + + switch (priv_ep->type) { + case USB_ENDPOINT_XFER_INT: + ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); + + if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || + priv_dev->dev_ver > DEV_VER_V2) + ep_cfg |= EP_CFG_TDL_CHK; + break; + case USB_ENDPOINT_XFER_BULK: + ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); + + if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) || + priv_dev->dev_ver > DEV_VER_V2) + ep_cfg |= EP_CFG_TDL_CHK; + break; + default: + ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); + mult = CDNS3_EP_ISO_HS_MULT - 1; + buffering = mult + 1; + } + + switch (priv_dev->gadget.speed) { + case USB_SPEED_FULL: + max_packet_size = is_iso_ep ? 1023 : 64; + break; + case USB_SPEED_HIGH: + max_packet_size = is_iso_ep ? 1024 : 512; + break; + case USB_SPEED_SUPER: + /* It's limitation that driver assumes in driver. */ + mult = 0; + max_packet_size = 1024; + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { + maxburst = CDNS3_EP_ISO_SS_BURST - 1; + buffering = (mult + 1) * + (maxburst + 1); + + if (priv_ep->interval > 1) + buffering++; + } else { + maxburst = CDNS3_EP_BUF_SIZE - 1; + } + break; + default: + /* all other speed are not supported */ + return; + } + + if (max_packet_size == 1024) + priv_ep->trb_burst_size = 128; + else if (max_packet_size >= 512) + priv_ep->trb_burst_size = 64; + else + priv_ep->trb_burst_size = 16; + + ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, + !!priv_ep->dir); + if (ret) { + dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); + return; + } + + ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | + EP_CFG_MULT(mult) | + EP_CFG_BUFFERING(buffering) | + EP_CFG_MAXBURST(maxburst); + + cdns3_select_ep(priv_dev, bEndpointAddress); + writel(ep_cfg, &priv_dev->regs->ep_cfg); + + dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", + priv_ep->name, ep_cfg); +} + +/* Find correct direction for HW endpoint according to description */ +static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, + struct cdns3_endpoint *priv_ep) +{ + return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || + (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); +} + +static struct +cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, + struct usb_endpoint_descriptor *desc) +{ + struct usb_ep *ep; + struct cdns3_endpoint *priv_ep; + + list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { + unsigned long num; + int ret; + /* ep name pattern likes epXin or epXout */ + char c[2] = {ep->name[2], '\0'}; + + ret = kstrtoul(c, 10, &num); + if (ret) + return ERR_PTR(ret); + + priv_ep = ep_to_cdns3_ep(ep); + if (cdns3_ep_dir_is_correct(desc, priv_ep)) { + if (!(priv_ep->flags & EP_CLAIMED)) { + priv_ep->num = num; + return priv_ep; + } + } + } + + return ERR_PTR(-ENOENT); +} + +/* + * Cadence IP has one limitation that all endpoints must be configured + * (Type & MaxPacketSize) before setting configuration through hardware + * register, it means we can't change endpoints configuration after + * set_configuration. + * + * This function set EP_CLAIMED flag which is added when the gadget driver + * uses usb_ep_autoconfig to configure specific endpoint; + * When the udc driver receives set_configurion request, + * it goes through all claimed endpoints, and configure all endpoints + * accordingly. + * + * At usb_ep_ops.enable/disable, we only enable and disable endpoint through + * ep_cfg register which can be changed after set_configuration, and do + * some software operation accordingly. + */ +static struct +usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, + struct usb_endpoint_descriptor *desc, + struct usb_ss_ep_comp_descriptor *comp_desc) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct cdns3_endpoint *priv_ep; + unsigned long flags; + + priv_ep = cdns3_find_available_ep(priv_dev, desc); + if (IS_ERR(priv_ep)) { + dev_err(priv_dev->dev, "no available ep\n"); + return NULL; + } + + dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); + + spin_lock_irqsave(&priv_dev->lock, flags); + priv_ep->endpoint.desc = desc; + priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; + priv_ep->type = usb_endpoint_type(desc); + priv_ep->flags |= EP_CLAIMED; + priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + + spin_unlock_irqrestore(&priv_dev->lock, flags); + return &priv_ep->endpoint; +} + +/** + * cdns3_gadget_ep_alloc_request Allocates request + * @ep: endpoint object associated with request + * @gfp_flags: gfp flags + * + * Returns allocated request address, NULL on allocation error + */ +struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_request *priv_req; + + priv_req = kzalloc(sizeof(*priv_req), gfp_flags); + if (!priv_req) + return NULL; + + priv_req->priv_ep = priv_ep; + + trace_cdns3_alloc_request(priv_req); + return &priv_req->request; +} + +/** + * cdns3_gadget_ep_free_request Free memory occupied by request + * @ep: endpoint object associated with request + * @request: request to free memory + */ +void cdns3_gadget_ep_free_request(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdns3_request *priv_req = to_cdns3_request(request); + + if (priv_req->aligned_buf) + priv_req->aligned_buf->in_use = 0; + + trace_cdns3_free_request(priv_req); + kfree(priv_req); +} + +/** + * cdns3_gadget_ep_enable Enable endpoint + * @ep: endpoint object + * @desc: endpoint descriptor + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct cdns3_endpoint *priv_ep; + struct cdns3_device *priv_dev; + u32 reg = EP_STS_EN_TRBERREN; + u32 bEndpointAddress; + unsigned long flags; + int enable = 1; + int ret; + int val; + + priv_ep = ep_to_cdns3_ep(ep); + priv_dev = priv_ep->cdns3_dev; + + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { + dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); + return -EINVAL; + } + + if (!desc->wMaxPacketSize) { + dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); + return -EINVAL; + } + + if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, + "%s is already enabled\n", priv_ep->name)) + return 0; + + spin_lock_irqsave(&priv_dev->lock, flags); + + priv_ep->endpoint.desc = desc; + priv_ep->type = usb_endpoint_type(desc); + priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + + if (priv_ep->interval > ISO_MAX_INTERVAL && + priv_ep->type == USB_ENDPOINT_XFER_ISOC) { + dev_err(priv_dev->dev, "Driver is limited to %d period\n", + ISO_MAX_INTERVAL); + + ret = -EINVAL; + goto exit; + } + + ret = cdns3_allocate_trb_pool(priv_ep); + + if (ret) + goto exit; + + bEndpointAddress = priv_ep->num | priv_ep->dir; + cdns3_select_ep(priv_dev, bEndpointAddress); + + trace_cdns3_gadget_ep_enable(priv_ep); + + writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + + ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), + 1, 1000); + + if (unlikely(ret)) { + cdns3_free_trb_pool(priv_ep); + ret = -EINVAL; + goto exit; + } + + /* enable interrupt for selected endpoint */ + cdns3_set_register_bit(&priv_dev->regs->ep_ien, + BIT(cdns3_ep_addr_to_index(bEndpointAddress))); + + if (priv_dev->dev_ver < DEV_VER_V2) + cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); + + writel(reg, &priv_dev->regs->ep_sts_en); + + /* + * For some versions of controller at some point during ISO OUT traffic + * DMA reads Transfer Ring for the EP which has never got doorbell. + * This issue was detected only on simulation, but to avoid this issue + * driver add protection against it. To fix it driver enable ISO OUT + * endpoint before setting DRBL. This special treatment of ISO OUT + * endpoints are recommended by controller specification. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) + enable = 0; + + if (enable) + cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE); + + ep->desc = desc; + priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | + EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); + priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; + priv_ep->wa1_set = 0; + priv_ep->enqueue = 0; + priv_ep->dequeue = 0; + reg = readl(&priv_dev->regs->ep_sts); + priv_ep->pcs = !!EP_STS_CCS(reg); + priv_ep->ccs = !!EP_STS_CCS(reg); + /* one TRB is reserved for link TRB used in DMULT mode*/ + priv_ep->free_trbs = priv_ep->num_trbs - 1; +exit: + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_gadget_ep_disable Disable endpoint + * @ep: endpoint object + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_ep_disable(struct usb_ep *ep) +{ + struct cdns3_endpoint *priv_ep; + struct cdns3_request *priv_req; + struct cdns3_device *priv_dev; + struct usb_request *request; + unsigned long flags; + int ret = 0; + u32 ep_cfg; + int val; + + if (!ep) { + pr_err("usbss: invalid parameters\n"); + return -EINVAL; + } + + priv_ep = ep_to_cdns3_ep(ep); + priv_dev = priv_ep->cdns3_dev; + + if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), + "%s is already disabled\n", priv_ep->name)) + return 0; + + spin_lock_irqsave(&priv_dev->lock, flags); + + trace_cdns3_gadget_ep_disable(priv_ep); + + cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); + + ep_cfg = readl(&priv_dev->regs->ep_cfg); + ep_cfg &= ~EP_CFG_ENABLE; + writel(ep_cfg, &priv_dev->regs->ep_cfg); + + /** + * Driver needs some time before resetting endpoint. + * It need waits for clearing DBUSY bit or for timeout expired. + * 10us is enough time for controller to stop transfer. + */ + readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, + !(val & EP_STS_DBUSY), 1, 10); + writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), + 1, 1000); + if (unlikely(ret)) + dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", + priv_ep->name); + + while (!list_empty(&priv_ep->pending_req_list)) { + request = cdns3_next_request(&priv_ep->pending_req_list); + + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), + -ESHUTDOWN); + } + + while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { + priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + + kfree(priv_req->request.buf); + cdns3_gadget_ep_free_request(&priv_ep->endpoint, + &priv_req->request); + list_del_init(&priv_req->list); + --priv_ep->wa2_counter; + } + + while (!list_empty(&priv_ep->deferred_req_list)) { + request = cdns3_next_request(&priv_ep->deferred_req_list); + + cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), + -ESHUTDOWN); + } + + priv_ep->descmis_req = NULL; + + ep->desc = NULL; + priv_ep->flags &= ~EP_ENABLED; + + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +/** + * cdns3_gadget_ep_queue Transfer data on endpoint + * @ep: endpoint object + * @request: request object + * @gfp_flags: gfp flags + * + * Returns 0 on success, error code elsewhere + */ +static int __cdns3_gadget_ep_queue(struct usb_ep *ep, + struct usb_request *request, + gfp_t gfp_flags) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_request *priv_req; + int ret = 0; + + request->actual = 0; + request->status = -EINPROGRESS; + priv_req = to_cdns3_request(request); + trace_cdns3_ep_queue(priv_req); + + if (priv_dev->dev_ver < DEV_VER_V2) { + ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, + priv_req); + + if (ret == EINPROGRESS) + return 0; + } + + ret = cdns3_prepare_aligned_request_buf(priv_req); + if (ret < 0) + return ret; + + ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, + usb_endpoint_dir_in(ep->desc)); + if (ret) + return ret; + + list_add_tail(&request->list, &priv_ep->deferred_req_list); + + /* + * If hardware endpoint configuration has not been set yet then + * just queue request in deferred list. Transfer will be started in + * cdns3_set_hw_configuration. + */ + if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) + cdns3_start_all_request(priv_dev, priv_ep); + + return 0; +} + +static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, + gfp_t gfp_flags) +{ + struct usb_request *zlp_request; + struct cdns3_endpoint *priv_ep; + struct cdns3_device *priv_dev; + unsigned long flags; + int ret; + + if (!request || !ep) + return -EINVAL; + + priv_ep = ep_to_cdns3_ep(ep); + priv_dev = priv_ep->cdns3_dev; + + spin_lock_irqsave(&priv_dev->lock, flags); + + ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); + + if (ret == 0 && request->zero && request->length && + (request->length % ep->maxpacket == 0)) { + struct cdns3_request *priv_req; + + zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); + zlp_request->buf = priv_dev->zlp_buf; + zlp_request->length = 0; + + priv_req = to_cdns3_request(zlp_request); + priv_req->flags |= REQUEST_ZLP; + + dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", + priv_ep->name); + ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); + return ret; +} + +/** + * cdns3_gadget_ep_dequeue Remove request from transfer queue + * @ep: endpoint object associated with request + * @request: request object + * + * Returns 0 on success, error code elsewhere + */ +int cdns3_gadget_ep_dequeue(struct usb_ep *ep, + struct usb_request *request) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct usb_request *req, *req_temp; + struct cdns3_request *priv_req; + struct cdns3_trb *link_trb; + unsigned long flags; + int ret = 0; + + if (!ep || !request || !ep->desc) + return -EINVAL; + + spin_lock_irqsave(&priv_dev->lock, flags); + + priv_req = to_cdns3_request(request); + + trace_cdns3_ep_dequeue(priv_req); + + cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); + + list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, + list) { + if (request == req) + goto found; + } + + list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, + list) { + if (request == req) + goto found; + } + + goto not_found; + +found: + + if (priv_ep->wa1_trb == priv_req->trb) + cdns3_wa1_restore_cycle_bit(priv_ep); + + link_trb = priv_req->trb; + cdns3_move_deq_to_next_trb(priv_req); + cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); + + /* Update ring */ + request = cdns3_next_request(&priv_ep->deferred_req_list); + if (request) { + priv_req = to_cdns3_request(request); + + link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + + (priv_req->start_trb * TRB_SIZE)); + link_trb->control = (link_trb->control & TRB_CYCLE) | + TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE; + } else { + priv_ep->flags |= EP_UPDATE_EP_TRBADDR; + } + +not_found: + spin_unlock_irqrestore(&priv_dev->lock, flags); + return ret; +} + +/** + * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint + * Should be called after acquiring spin_lock and selecting ep + * @ep: endpoint object to set stall on. + */ +void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + trace_cdns3_halt(priv_ep, 1, 0); + + if (!(priv_ep->flags & EP_STALLED)) { + u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); + + if (!(ep_sts_reg & EP_STS_DBUSY)) + cdns3_ep_stall_flush(priv_ep); + else + priv_ep->flags |= EP_STALL_PENDING; + } +} + +/** + * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint + * Should be called after acquiring spin_lock and selecting ep + * @ep: endpoint object to clear stall on + */ +int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct usb_request *request; + int ret; + int val; + + trace_cdns3_halt(priv_ep, 0, 0); + + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + + /* wait for EPRST cleared */ + ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_EPRST), 1, 100); + if (ret) + return -EINVAL; + + priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); + + request = cdns3_next_request(&priv_ep->pending_req_list); + + if (request) + cdns3_rearm_transfer(priv_ep, 1); + + cdns3_start_all_request(priv_dev, priv_ep); + return ret; +} + +/** + * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint + * @ep: endpoint object to set/clear stall on + * @value: 1 for set stall, 0 for clear stall + * + * Returns 0 on success, error code elsewhere + */ +int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + unsigned long flags; + int ret = 0; + + if (!(priv_ep->flags & EP_ENABLED)) + return -EPERM; + + spin_lock_irqsave(&priv_dev->lock, flags); + + cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); + + if (!value) { + priv_ep->flags &= ~EP_WEDGE; + ret = __cdns3_gadget_ep_clear_halt(priv_ep); + } else { + __cdns3_gadget_ep_set_halt(priv_ep); + } + + spin_unlock_irqrestore(&priv_dev->lock, flags); + + return ret; +} + +extern const struct usb_ep_ops cdns3_gadget_ep0_ops; + +static const struct usb_ep_ops cdns3_gadget_ep_ops = { + .enable = cdns3_gadget_ep_enable, + .disable = cdns3_gadget_ep_disable, + .alloc_request = cdns3_gadget_ep_alloc_request, + .free_request = cdns3_gadget_ep_free_request, + .queue = cdns3_gadget_ep_queue, + .dequeue = cdns3_gadget_ep_dequeue, + .set_halt = cdns3_gadget_ep_set_halt, + .set_wedge = cdns3_gadget_ep_set_wedge, +}; + +/** + * cdns3_gadget_get_frame Returns number of actual ITP frame + * @gadget: gadget object + * + * Returns number of actual ITP frame + */ +static int cdns3_gadget_get_frame(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + + return readl(&priv_dev->regs->usb_itpn); +} + +int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) +{ + enum usb_device_speed speed; + + speed = cdns3_get_speed(priv_dev); + + if (speed >= USB_SPEED_SUPER) + return 0; + + /* Start driving resume signaling to indicate remote wakeup. */ + writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); + + return 0; +} + +static int cdns3_gadget_wakeup(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv_dev->lock, flags); + ret = __cdns3_gadget_wakeup(priv_dev); + spin_unlock_irqrestore(&priv_dev->lock, flags); + return ret; +} + +static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, + int is_selfpowered) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + priv_dev->is_selfpowered = !!is_selfpowered; + spin_unlock_irqrestore(&priv_dev->lock, flags); + return 0; +} + +static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + + if (is_on) + writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); + else + writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); + + return 0; +} + +static void cdns3_gadget_config(struct cdns3_device *priv_dev) +{ + struct cdns3_usb_regs __iomem *regs = priv_dev->regs; + u32 reg; + + cdns3_ep0_config(priv_dev); + + /* enable interrupts for endpoint 0 (in and out) */ + writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); + + /* + * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 + * revision of controller. + */ + if (priv_dev->dev_ver == DEV_VER_TI_V1) { + reg = readl(®s->dbg_link1); + + reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; + reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | + DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; + writel(reg, ®s->dbg_link1); + } + + /* + * By default some platforms has set protected access to memory. + * This cause problem with cache, so driver restore non-secure + * access to memory. + */ + reg = readl(®s->dma_axi_ctrl); + reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | + DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); + writel(reg, ®s->dma_axi_ctrl); + + /* enable generic interrupt*/ + writel(USB_IEN_INIT, ®s->usb_ien); + writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); + + cdns3_configure_dmult(priv_dev, NULL); + + cdns3_gadget_pullup(&priv_dev->gadget, 1); +} + +/** + * cdns3_gadget_udc_start Gadget start + * @gadget: gadget object + * @driver: driver which operates on this gadget + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_gadget_udc_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + unsigned long flags; + + spin_lock_irqsave(&priv_dev->lock, flags); + priv_dev->gadget_driver = driver; + cdns3_gadget_config(priv_dev); + spin_unlock_irqrestore(&priv_dev->lock, flags); + return 0; +} + +/** + * cdns3_gadget_udc_stop Stops gadget + * @gadget: gadget object + * + * Returns 0 + */ +static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct cdns3_endpoint *priv_ep; + u32 bEndpointAddress; + struct usb_ep *ep; + int ret = 0; + int val; + + priv_dev->gadget_driver = NULL; + + priv_dev->onchip_used_size = 0; + priv_dev->out_mem_is_allocated = 0; + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + + list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { + priv_ep = ep_to_cdns3_ep(ep); + bEndpointAddress = priv_ep->num | priv_ep->dir; + cdns3_select_ep(priv_dev, bEndpointAddress); + writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, + !(val & EP_CMD_EPRST), 1, 100); + } + + /* disable interrupt for device */ + writel(0, &priv_dev->regs->usb_ien); + writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); + + return ret; +} + +static const struct usb_gadget_ops cdns3_gadget_ops = { + .get_frame = cdns3_gadget_get_frame, + .wakeup = cdns3_gadget_wakeup, + .set_selfpowered = cdns3_gadget_set_selfpowered, + .pullup = cdns3_gadget_pullup, + .udc_start = cdns3_gadget_udc_start, + .udc_stop = cdns3_gadget_udc_stop, + .match_ep = cdns3_gadget_match_ep, +}; + +static void cdns3_free_all_eps(struct cdns3_device *priv_dev) +{ + int i; + + /* ep0 OUT point to ep0 IN. */ + priv_dev->eps[16] = NULL; + + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) + if (priv_dev->eps[i]) { + cdns3_free_trb_pool(priv_dev->eps[i]); + devm_kfree(priv_dev->dev, priv_dev->eps[i]); + } +} + +/** + * cdns3_init_eps Initializes software endpoints of gadget + * @cdns3: extended gadget object + * + * Returns 0 on success, error code elsewhere + */ +static int cdns3_init_eps(struct cdns3_device *priv_dev) +{ + u32 ep_enabled_reg, iso_ep_reg; + struct cdns3_endpoint *priv_ep; + int ep_dir, ep_number; + u32 ep_mask; + int ret = 0; + int i; + + /* Read it from USB_CAP3 to USB_CAP5 */ + ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); + iso_ep_reg = readl(&priv_dev->regs->usb_cap4); + + dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); + + for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { + ep_dir = i >> 4; /* i div 16 */ + ep_number = i & 0xF; /* i % 16 */ + ep_mask = BIT(i); + + if (!(ep_enabled_reg & ep_mask)) + continue; + + if (ep_dir && !ep_number) { + priv_dev->eps[i] = priv_dev->eps[0]; + continue; + } + + priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), + GFP_KERNEL); + if (!priv_ep) + goto err; + + /* set parent of endpoint object */ + priv_ep->cdns3_dev = priv_dev; + priv_dev->eps[i] = priv_ep; + priv_ep->num = ep_number; + priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; + + if (!ep_number) { + ret = cdns3_init_ep0(priv_dev, priv_ep); + if (ret) { + dev_err(priv_dev->dev, "Failed to init ep0\n"); + goto err; + } + } else { + snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", + ep_number, !!ep_dir ? "in" : "out"); + priv_ep->endpoint.name = priv_ep->name; + + usb_ep_set_maxpacket_limit(&priv_ep->endpoint, + CDNS3_EP_MAX_PACKET_LIMIT); + priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; + priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; + if (ep_dir) + priv_ep->endpoint.caps.dir_in = 1; + else + priv_ep->endpoint.caps.dir_out = 1; + + if (iso_ep_reg & ep_mask) + priv_ep->endpoint.caps.type_iso = 1; + + priv_ep->endpoint.caps.type_bulk = 1; + priv_ep->endpoint.caps.type_int = 1; + + list_add_tail(&priv_ep->endpoint.ep_list, + &priv_dev->gadget.ep_list); + } + + priv_ep->flags = 0; + + dev_info(priv_dev->dev, "Initialized %s support: %s %s\n", + priv_ep->name, + priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", + priv_ep->endpoint.caps.type_iso ? "ISO" : ""); + + INIT_LIST_HEAD(&priv_ep->pending_req_list); + INIT_LIST_HEAD(&priv_ep->deferred_req_list); + INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); + } + + return 0; +err: + cdns3_free_all_eps(priv_dev); + return -ENOMEM; +} + +void cdns3_gadget_exit(struct cdns3 *cdns) +{ + struct cdns3_device *priv_dev; + + priv_dev = cdns->gadget_dev; + + devm_free_irq(cdns->dev, cdns->dev_irq, cdns); + + pm_runtime_mark_last_busy(cdns->dev); + pm_runtime_put_autosuspend(cdns->dev); + + usb_del_gadget_udc(&priv_dev->gadget); + + cdns3_free_all_eps(priv_dev); + + while (!list_empty(&priv_dev->aligned_buf_list)) { + struct cdns3_aligned_buf *buf; + + buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); + dma_free_coherent(priv_dev->sysdev, buf->size, + buf->buf, + buf->dma); + + list_del(&buf->list); + kfree(buf); + } + + dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, + priv_dev->setup_dma); + + kfree(priv_dev->zlp_buf); + kfree(priv_dev); + cdns->gadget_dev = NULL; + cdns3_drd_switch_gadget(cdns, 0); +} + +static int cdns3_gadget_start(struct cdns3 *cdns) +{ + struct cdns3_device *priv_dev; + u32 max_speed; + int ret; + + priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); + if (!priv_dev) + return -ENOMEM; + + cdns->gadget_dev = priv_dev; + priv_dev->sysdev = cdns->dev; + priv_dev->dev = cdns->dev; + priv_dev->regs = cdns->dev_regs; + + device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", + &priv_dev->onchip_buffers); + + if (priv_dev->onchip_buffers <= 0) { + u32 reg = readl(&priv_dev->regs->usb_cap2); + + priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); + } + + if (!priv_dev->onchip_buffers) + priv_dev->onchip_buffers = 256; + + max_speed = usb_get_maximum_speed(cdns->dev); + + /* Check the maximum_speed parameter */ + switch (max_speed) { + case USB_SPEED_FULL: + writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); + break; + case USB_SPEED_HIGH: + writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); + break; + case USB_SPEED_SUPER: + break; + default: + dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", + max_speed); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + max_speed = USB_SPEED_SUPER; + break; + } + + /* fill gadget fields */ + priv_dev->gadget.max_speed = max_speed; + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + priv_dev->gadget.ops = &cdns3_gadget_ops; + priv_dev->gadget.name = "usb-ss-gadget"; + priv_dev->gadget.sg_supported = 1; + priv_dev->gadget.quirk_avoids_skb_reserve = 1; + + spin_lock_init(&priv_dev->lock); + INIT_WORK(&priv_dev->pending_status_wq, + cdns3_pending_setup_status_handler); + + INIT_WORK(&priv_dev->aligned_buf_wq, + cdns3_free_aligned_request_buf); + + /* initialize endpoint container */ + INIT_LIST_HEAD(&priv_dev->gadget.ep_list); + INIT_LIST_HEAD(&priv_dev->aligned_buf_list); + + ret = cdns3_init_eps(priv_dev); + if (ret) { + dev_err(priv_dev->dev, "Failed to create endpoints\n"); + goto err1; + } + + /* allocate memory for setup packet buffer */ + priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, + &priv_dev->setup_dma, GFP_DMA); + if (!priv_dev->setup_buf) { + ret = -ENOMEM; + goto err2; + } + + priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); + + dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", + readl(&priv_dev->regs->usb_cap6)); + dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", + readl(&priv_dev->regs->usb_cap1)); + dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", + readl(&priv_dev->regs->usb_cap2)); + + priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); + + priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); + if (!priv_dev->zlp_buf) { + ret = -ENOMEM; + goto err3; + } + + /* add USB gadget device */ + ret = usb_add_gadget_udc(priv_dev->dev, &priv_dev->gadget); + if (ret < 0) { + dev_err(priv_dev->dev, + "Failed to register USB device controller\n"); + goto err4; + } + + return 0; +err4: + kfree(priv_dev->zlp_buf); +err3: + dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, + priv_dev->setup_dma); +err2: + cdns3_free_all_eps(priv_dev); +err1: + cdns->gadget_dev = NULL; + return ret; +} + +static int __cdns3_gadget_init(struct cdns3 *cdns) +{ + int ret = 0; + + cdns3_drd_switch_gadget(cdns, 1); + pm_runtime_get_sync(cdns->dev); + + ret = cdns3_gadget_start(cdns); + if (ret) + return ret; + + /* + * Because interrupt line can be shared with other components in + * driver it can't use IRQF_ONESHOT flag here. + */ + ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, + cdns3_device_irq_handler, + cdns3_device_thread_irq_handler, + IRQF_SHARED, dev_name(cdns->dev), cdns); + + if (ret) + goto err0; + + return 0; +err0: + cdns3_gadget_exit(cdns); + return ret; +} + +static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup) +{ + struct cdns3_device *priv_dev = cdns->gadget_dev; + + cdns3_disconnect_gadget(priv_dev); + + priv_dev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); + cdns3_hw_reset_eps_config(priv_dev); + + /* disable interrupt for device */ + writel(0, &priv_dev->regs->usb_ien); + + cdns3_gadget_pullup(&priv_dev->gadget, 0); + + return 0; +} + +static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated) +{ + struct cdns3_device *priv_dev = cdns->gadget_dev; + + if (!priv_dev->gadget_driver) + return 0; + + cdns3_gadget_config(priv_dev); + + return 0; +} + +/** + * cdns3_gadget_init - initialize device structure + * + * cdns: cdns3 instance + * + * This function initializes the gadget. + */ +int cdns3_gadget_init(struct cdns3 *cdns) +{ + struct cdns3_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = __cdns3_gadget_init; + rdrv->stop = cdns3_gadget_exit; + rdrv->suspend = cdns3_gadget_suspend; + rdrv->resume = cdns3_gadget_resume; + rdrv->state = CDNS3_ROLE_STATE_INACTIVE; + rdrv->name = "gadget"; + cdns->roles[USB_ROLE_DEVICE] = rdrv; + + return 0; +} diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h new file mode 100644 index 000000000000..bc4024041ef2 --- /dev/null +++ b/drivers/usb/cdns3/gadget.h @@ -0,0 +1,1338 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * USBSS device controller driver header file + * + * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2017-2018 NXP + * + * Author: Pawel Laszczak <pawell@cadence.com> + * Pawel Jez <pjez@cadence.com> + * Peter Chen <peter.chen@nxp.com> + */ +#ifndef __LINUX_CDNS3_GADGET +#define __LINUX_CDNS3_GADGET +#include <linux/usb/gadget.h> + +/* + * USBSS-DEV register interface. + * This corresponds to the USBSS Device Controller Interface + */ + +/** + * struct cdns3_usb_regs - device controller registers. + * @usb_conf: Global Configuration. + * @usb_sts: Global Status. + * @usb_cmd: Global Command. + * @usb_itpn: ITP/SOF number. + * @usb_lpm: Global Command. + * @usb_ien: USB Interrupt Enable. + * @usb_ists: USB Interrupt Status. + * @ep_sel: Endpoint Select. + * @ep_traddr: Endpoint Transfer Ring Address. + * @ep_cfg: Endpoint Configuration. + * @ep_cmd: Endpoint Command. + * @ep_sts: Endpoint Status. + * @ep_sts_sid: Endpoint Status. + * @ep_sts_en: Endpoint Status Enable. + * @drbl: Doorbell. + * @ep_ien: EP Interrupt Enable. + * @ep_ists: EP Interrupt Status. + * @usb_pwr: Global Power Configuration. + * @usb_conf2: Global Configuration 2. + * @usb_cap1: Capability 1. + * @usb_cap2: Capability 2. + * @usb_cap3: Capability 3. + * @usb_cap4: Capability 4. + * @usb_cap5: Capability 5. + * @usb_cap6: Capability 6. + * @usb_cpkt1: Custom Packet 1. + * @usb_cpkt2: Custom Packet 2. + * @usb_cpkt3: Custom Packet 3. + * @ep_dma_ext_addr: Upper address for DMA operations. + * @buf_addr: Address for On-chip Buffer operations. + * @buf_data: Data for On-chip Buffer operations. + * @buf_ctrl: On-chip Buffer Access Control. + * @dtrans: DMA Transfer Mode. + * @tdl_from_trb: Source of TD Configuration. + * @tdl_beh: TDL Behavior Configuration. + * @ep_tdl: Endpoint TDL. + * @tdl_beh2: TDL Behavior 2 Configuration. + * @dma_adv_td: DMA Advance TD Configuration. + * @reserved1: Reserved. + * @cfg_regs: Configuration. + * @reserved2: Reserved. + * @dma_axi_ctrl: AXI Control. + * @dma_axi_id: AXI ID register. + * @dma_axi_cap: AXI Capability. + * @dma_axi_ctrl0: AXI Control 0. + * @dma_axi_ctrl1: AXI Control 1. + */ +struct cdns3_usb_regs { + __le32 usb_conf; + __le32 usb_sts; + __le32 usb_cmd; + __le32 usb_itpn; + __le32 usb_lpm; + __le32 usb_ien; + __le32 usb_ists; + __le32 ep_sel; + __le32 ep_traddr; + __le32 ep_cfg; + __le32 ep_cmd; + __le32 ep_sts; + __le32 ep_sts_sid; + __le32 ep_sts_en; + __le32 drbl; + __le32 ep_ien; + __le32 ep_ists; + __le32 usb_pwr; + __le32 usb_conf2; + __le32 usb_cap1; + __le32 usb_cap2; + __le32 usb_cap3; + __le32 usb_cap4; + __le32 usb_cap5; + __le32 usb_cap6; + __le32 usb_cpkt1; + __le32 usb_cpkt2; + __le32 usb_cpkt3; + __le32 ep_dma_ext_addr; + __le32 buf_addr; + __le32 buf_data; + __le32 buf_ctrl; + __le32 dtrans; + __le32 tdl_from_trb; + __le32 tdl_beh; + __le32 ep_tdl; + __le32 tdl_beh2; + __le32 dma_adv_td; + __le32 reserved1[26]; + __le32 cfg_reg1; + __le32 dbg_link1; + __le32 dbg_link2; + __le32 cfg_regs[74]; + __le32 reserved2[51]; + __le32 dma_axi_ctrl; + __le32 dma_axi_id; + __le32 dma_axi_cap; + __le32 dma_axi_ctrl0; + __le32 dma_axi_ctrl1; +}; + +/* USB_CONF - bitmasks */ +/* Reset USB device configuration. */ +#define USB_CONF_CFGRST BIT(0) +/* Set Configuration. */ +#define USB_CONF_CFGSET BIT(1) +/* Disconnect USB device in SuperSpeed. */ +#define USB_CONF_USB3DIS BIT(3) +/* Disconnect USB device in HS/FS */ +#define USB_CONF_USB2DIS BIT(4) +/* Little Endian access - default */ +#define USB_CONF_LENDIAN BIT(5) +/* + * Big Endian access. Driver assume that byte order for + * SFRs access always is as Little Endian so this bit + * is not used. + */ +#define USB_CONF_BENDIAN BIT(6) +/* Device software reset. */ +#define USB_CONF_SWRST BIT(7) +/* Singular DMA transfer mode. Only for VER < DEV_VER_V3*/ +#define USB_CONF_DSING BIT(8) +/* Multiple DMA transfers mode. Only for VER < DEV_VER_V3 */ +#define USB_CONF_DMULT BIT(9) +/* DMA clock turn-off enable. */ +#define USB_CONF_DMAOFFEN BIT(10) +/* DMA clock turn-off disable. */ +#define USB_CONF_DMAOFFDS BIT(11) +/* Clear Force Full Speed. */ +#define USB_CONF_CFORCE_FS BIT(12) +/* Set Force Full Speed. */ +#define USB_CONF_SFORCE_FS BIT(13) +/* Device enable. */ +#define USB_CONF_DEVEN BIT(14) +/* Device disable. */ +#define USB_CONF_DEVDS BIT(15) +/* L1 LPM state entry enable (used in HS/FS mode). */ +#define USB_CONF_L1EN BIT(16) +/* L1 LPM state entry disable (used in HS/FS mode). */ +#define USB_CONF_L1DS BIT(17) +/* USB 2.0 clock gate disable. */ +#define USB_CONF_CLK2OFFEN BIT(18) +/* USB 2.0 clock gate enable. */ +#define USB_CONF_CLK2OFFDS BIT(19) +/* L0 LPM state entry request (used in HS/FS mode). */ +#define USB_CONF_LGO_L0 BIT(20) +/* USB 3.0 clock gate disable. */ +#define USB_CONF_CLK3OFFEN BIT(21) +/* USB 3.0 clock gate enable. */ +#define USB_CONF_CLK3OFFDS BIT(22) +/* Bit 23 is reserved*/ +/* U1 state entry enable (used in SS mode). */ +#define USB_CONF_U1EN BIT(24) +/* U1 state entry disable (used in SS mode). */ +#define USB_CONF_U1DS BIT(25) +/* U2 state entry enable (used in SS mode). */ +#define USB_CONF_U2EN BIT(26) +/* U2 state entry disable (used in SS mode). */ +#define USB_CONF_U2DS BIT(27) +/* U0 state entry request (used in SS mode). */ +#define USB_CONF_LGO_U0 BIT(28) +/* U1 state entry request (used in SS mode). */ +#define USB_CONF_LGO_U1 BIT(29) +/* U2 state entry request (used in SS mode). */ +#define USB_CONF_LGO_U2 BIT(30) +/* SS.Inactive state entry request (used in SS mode) */ +#define USB_CONF_LGO_SSINACT BIT(31) + +/* USB_STS - bitmasks */ +/* + * Configuration status. + * 1 - device is in the configured state. + * 0 - device is not configured. + */ +#define USB_STS_CFGSTS_MASK BIT(0) +#define USB_STS_CFGSTS(p) ((p) & USB_STS_CFGSTS_MASK) +/* + * On-chip memory overflow. + * 0 - On-chip memory status OK. + * 1 - On-chip memory overflow. + */ +#define USB_STS_OV_MASK BIT(1) +#define USB_STS_OV(p) ((p) & USB_STS_OV_MASK) +/* + * SuperSpeed connection status. + * 0 - USB in SuperSpeed mode disconnected. + * 1 - USB in SuperSpeed mode connected. + */ +#define USB_STS_USB3CONS_MASK BIT(2) +#define USB_STS_USB3CONS(p) ((p) & USB_STS_USB3CONS_MASK) +/* + * DMA transfer configuration status. + * 0 - single request. + * 1 - multiple TRB chain + * Supported only for controller version < DEV_VER_V3 + */ +#define USB_STS_DTRANS_MASK BIT(3) +#define USB_STS_DTRANS(p) ((p) & USB_STS_DTRANS_MASK) +/* + * Device speed. + * 0 - Undefined (value after reset). + * 1 - Low speed + * 2 - Full speed + * 3 - High speed + * 4 - Super speed + */ +#define USB_STS_USBSPEED_MASK GENMASK(6, 4) +#define USB_STS_USBSPEED(p) (((p) & USB_STS_USBSPEED_MASK) >> 4) +#define USB_STS_LS (0x1 << 4) +#define USB_STS_FS (0x2 << 4) +#define USB_STS_HS (0x3 << 4) +#define USB_STS_SS (0x4 << 4) +#define DEV_UNDEFSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == (0x0 << 4)) +#define DEV_LOWSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_LS) +#define DEV_FULLSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_FS) +#define DEV_HIGHSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_HS) +#define DEV_SUPERSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_SS) +/* + * Endianness for SFR access. + * 0 - Little Endian order (default after hardware reset). + * 1 - Big Endian order + */ +#define USB_STS_ENDIAN_MASK BIT(7) +#define USB_STS_ENDIAN(p) ((p) & USB_STS_ENDIAN_MASK) +/* + * HS/FS clock turn-off status. + * 0 - hsfs clock is always on. + * 1 - hsfs clock turn-off in L2 (HS/FS mode) is enabled + * (default after hardware reset). + */ +#define USB_STS_CLK2OFF_MASK BIT(8) +#define USB_STS_CLK2OFF(p) ((p) & USB_STS_CLK2OFF_MASK) +/* + * PCLK clock turn-off status. + * 0 - pclk clock is always on. + * 1 - pclk clock turn-off in U3 (SS mode) is enabled + * (default after hardware reset). + */ +#define USB_STS_CLK3OFF_MASK BIT(9) +#define USB_STS_CLK3OFF(p) ((p) & USB_STS_CLK3OFF_MASK) +/* + * Controller in reset state. + * 0 - Internal reset is active. + * 1 - Internal reset is not active and controller is fully operational. + */ +#define USB_STS_IN_RST_MASK BIT(10) +#define USB_STS_IN_RST(p) ((p) & USB_STS_IN_RST_MASK) +/* + * Status of the "TDL calculation basing on TRB" feature. + * 0 - disabled + * 1 - enabled + * Supported only for DEV_VER_V2 controller version. + */ +#define USB_STS_TDL_TRB_ENABLED BIT(11) +/* + * Device enable Status. + * 0 - USB device is disabled (VBUS input is disconnected from internal logic). + * 1 - USB device is enabled (VBUS input is connected to the internal logic). + */ +#define USB_STS_DEVS_MASK BIT(14) +#define USB_STS_DEVS(p) ((p) & USB_STS_DEVS_MASK) +/* + * Address status. + * 0 - USB device is default state. + * 1 - USB device is at least in address state. + */ +#define USB_STS_ADDRESSED_MASK BIT(15) +#define USB_STS_ADDRESSED(p) ((p) & USB_STS_ADDRESSED_MASK) +/* + * L1 LPM state enable status (used in HS/FS mode). + * 0 - Entering to L1 LPM state disabled. + * 1 - Entering to L1 LPM state enabled. + */ +#define USB_STS_L1ENS_MASK BIT(16) +#define USB_STS_L1ENS(p) ((p) & USB_STS_L1ENS_MASK) +/* + * Internal VBUS connection status (used both in HS/FS and SS mode). + * 0 - internal VBUS is not detected. + * 1 - internal VBUS is detected. + */ +#define USB_STS_VBUSS_MASK BIT(17) +#define USB_STS_VBUSS(p) ((p) & USB_STS_VBUSS_MASK) +/* + * HS/FS LPM state (used in FS/HS mode). + * 0 - L0 State + * 1 - L1 State + * 2 - L2 State + * 3 - L3 State + */ +#define USB_STS_LPMST_MASK GENMASK(19, 18) +#define DEV_L0_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x0 << 18)) +#define DEV_L1_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x1 << 18)) +#define DEV_L2_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x2 << 18)) +#define DEV_L3_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x3 << 18)) +/* + * Disable HS status (used in FS/HS mode). + * 0 - the disconnect bit for HS/FS mode is set . + * 1 - the disconnect bit for HS/FS mode is not set. + */ +#define USB_STS_USB2CONS_MASK BIT(20) +#define USB_STS_USB2CONS(p) ((p) & USB_STS_USB2CONS_MASK) +/* + * HS/FS mode connection status (used in FS/HS mode). + * 0 - High Speed operations in USB2.0 (FS/HS) mode not disabled. + * 1 - High Speed operations in USB2.0 (FS/HS). + */ +#define USB_STS_DISABLE_HS_MASK BIT(21) +#define USB_STS_DISABLE_HS(p) ((p) & USB_STS_DISABLE_HS_MASK) +/* + * U1 state enable status (used in SS mode). + * 0 - Entering to U1 state disabled. + * 1 - Entering to U1 state enabled. + */ +#define USB_STS_U1ENS_MASK BIT(24) +#define USB_STS_U1ENS(p) ((p) & USB_STS_U1ENS_MASK) +/* + * U2 state enable status (used in SS mode). + * 0 - Entering to U2 state disabled. + * 1 - Entering to U2 state enabled. + */ +#define USB_STS_U2ENS_MASK BIT(25) +#define USB_STS_U2ENS(p) ((p) & USB_STS_U2ENS_MASK) +/* + * SuperSpeed Link LTSSM state. This field reflects USBSS-DEV current + * SuperSpeed link state + */ +#define USB_STS_LST_MASK GENMASK(29, 26) +#define DEV_LST_U0 (((p) & USB_STS_LST_MASK) == (0x0 << 26)) +#define DEV_LST_U1 (((p) & USB_STS_LST_MASK) == (0x1 << 26)) +#define DEV_LST_U2 (((p) & USB_STS_LST_MASK) == (0x2 << 26)) +#define DEV_LST_U3 (((p) & USB_STS_LST_MASK) == (0x3 << 26)) +#define DEV_LST_DISABLED (((p) & USB_STS_LST_MASK) == (0x4 << 26)) +#define DEV_LST_RXDETECT (((p) & USB_STS_LST_MASK) == (0x5 << 26)) +#define DEV_LST_INACTIVE (((p) & USB_STS_LST_MASK) == (0x6 << 26)) +#define DEV_LST_POLLING (((p) & USB_STS_LST_MASK) == (0x7 << 26)) +#define DEV_LST_RECOVERY (((p) & USB_STS_LST_MASK) == (0x8 << 26)) +#define DEV_LST_HOT_RESET (((p) & USB_STS_LST_MASK) == (0x9 << 26)) +#define DEV_LST_COMP_MODE (((p) & USB_STS_LST_MASK) == (0xa << 26)) +#define DEV_LST_LB_STATE (((p) & USB_STS_LST_MASK) == (0xb << 26)) +/* + * DMA clock turn-off status. + * 0 - DMA clock is always on (default after hardware reset). + * 1 - DMA clock turn-off in U1, U2 and U3 (SS mode) is enabled. + */ +#define USB_STS_DMAOFF_MASK BIT(30) +#define USB_STS_DMAOFF(p) ((p) & USB_STS_DMAOFF_MASK) +/* + * SFR Endian status. + * 0 - Little Endian order (default after hardware reset). + * 1 - Big Endian order. + */ +#define USB_STS_ENDIAN2_MASK BIT(31) +#define USB_STS_ENDIAN2(p) ((p) & USB_STS_ENDIAN2_MASK) + +/* USB_CMD - bitmasks */ +/* Set Function Address */ +#define USB_CMD_SET_ADDR BIT(0) +/* + * Function Address This field is saved to the device only when the field + * SET_ADDR is set '1 ' during write to USB_CMD register. + * Software is responsible for entering the address of the device during + * SET_ADDRESS request service. This field should be set immediately after + * the SETUP packet is decoded, and prior to confirmation of the status phase + */ +#define USB_CMD_FADDR_MASK GENMASK(7, 1) +#define USB_CMD_FADDR(p) (((p) << 1) & USB_CMD_FADDR_MASK) +/* Send Function Wake Device Notification TP (used only in SS mode). */ +#define USB_CMD_SDNFW BIT(8) +/* Set Test Mode (used only in HS/FS mode). */ +#define USB_CMD_STMODE BIT(9) +/* Test mode selector (used only in HS/FS mode) */ +#define USB_STS_TMODE_SEL_MASK GENMASK(11, 10) +#define USB_STS_TMODE_SEL(p) (((p) << 10) & USB_STS_TMODE_SEL_MASK) +/* + * Send Latency Tolerance Message Device Notification TP (used only + * in SS mode). + */ +#define USB_CMD_SDNLTM BIT(12) +/* Send Custom Transaction Packet (used only in SS mode) */ +#define USB_CMD_SPKT BIT(13) +/*Device Notification 'Function Wake' - Interface value (only in SS mode. */ +#define USB_CMD_DNFW_INT_MASK GENMASK(23, 16) +#define USB_STS_DNFW_INT(p) (((p) << 16) & USB_CMD_DNFW_INT_MASK) +/* + * Device Notification 'Latency Tolerance Message' -373 BELT value [7:0] + * (used only in SS mode). + */ +#define USB_CMD_DNLTM_BELT_MASK GENMASK(27, 16) +#define USB_STS_DNLTM_BELT(p) (((p) << 16) & USB_CMD_DNLTM_BELT_MASK) + +/* USB_ITPN - bitmasks */ +/* + * ITP(SS) / SOF (HS/FS) number + * In SS mode this field represent number of last ITP received from host. + * In HS/FS mode this field represent number of last SOF received from host. + */ +#define USB_ITPN_MASK GENMASK(13, 0) +#define USB_ITPN(p) ((p) & USB_ITPN_MASK) + +/* USB_LPM - bitmasks */ +/* Host Initiated Resume Duration. */ +#define USB_LPM_HIRD_MASK GENMASK(3, 0) +#define USB_LPM_HIRD(p) ((p) & USB_LPM_HIRD_MASK) +/* Remote Wakeup Enable (bRemoteWake). */ +#define USB_LPM_BRW BIT(4) + +/* USB_IEN - bitmasks */ +/* SS connection interrupt enable */ +#define USB_IEN_CONIEN BIT(0) +/* SS disconnection interrupt enable. */ +#define USB_IEN_DISIEN BIT(1) +/* USB SS warm reset interrupt enable. */ +#define USB_IEN_UWRESIEN BIT(2) +/* USB SS hot reset interrupt enable */ +#define USB_IEN_UHRESIEN BIT(3) +/* SS link U3 state enter interrupt enable (suspend).*/ +#define USB_IEN_U3ENTIEN BIT(4) +/* SS link U3 state exit interrupt enable (wakeup). */ +#define USB_IEN_U3EXTIEN BIT(5) +/* SS link U2 state enter interrupt enable.*/ +#define USB_IEN_U2ENTIEN BIT(6) +/* SS link U2 state exit interrupt enable.*/ +#define USB_IEN_U2EXTIEN BIT(7) +/* SS link U1 state enter interrupt enable.*/ +#define USB_IEN_U1ENTIEN BIT(8) +/* SS link U1 state exit interrupt enable.*/ +#define USB_IEN_U1EXTIEN BIT(9) +/* ITP/SOF packet detected interrupt enable.*/ +#define USB_IEN_ITPIEN BIT(10) +/* Wakeup interrupt enable.*/ +#define USB_IEN_WAKEIEN BIT(11) +/* Send Custom Packet interrupt enable.*/ +#define USB_IEN_SPKTIEN BIT(12) +/* HS/FS mode connection interrupt enable.*/ +#define USB_IEN_CON2IEN BIT(16) +/* HS/FS mode disconnection interrupt enable.*/ +#define USB_IEN_DIS2IEN BIT(17) +/* USB reset (HS/FS mode) interrupt enable.*/ +#define USB_IEN_U2RESIEN BIT(18) +/* LPM L2 state enter interrupt enable.*/ +#define USB_IEN_L2ENTIEN BIT(20) +/* LPM L2 state exit interrupt enable.*/ +#define USB_IEN_L2EXTIEN BIT(21) +/* LPM L1 state enter interrupt enable.*/ +#define USB_IEN_L1ENTIEN BIT(24) +/* LPM L1 state exit interrupt enable.*/ +#define USB_IEN_L1EXTIEN BIT(25) +/* Configuration reset interrupt enable.*/ +#define USB_IEN_CFGRESIEN BIT(26) +/* Start of the USB SS warm reset interrupt enable.*/ +#define USB_IEN_UWRESSIEN BIT(28) +/* End of the USB SS warm reset interrupt enable.*/ +#define USB_IEN_UWRESEIEN BIT(29) + +#define USB_IEN_INIT (USB_IEN_U2RESIEN | USB_ISTS_DIS2I | USB_IEN_CON2IEN \ + | USB_IEN_UHRESIEN | USB_IEN_UWRESIEN | USB_IEN_DISIEN \ + | USB_IEN_CONIEN | USB_IEN_U3EXTIEN | USB_IEN_L2ENTIEN \ + | USB_IEN_L2EXTIEN | USB_IEN_L1ENTIEN | USB_IEN_U3ENTIEN) + +/* USB_ISTS - bitmasks */ +/* SS Connection detected. */ +#define USB_ISTS_CONI BIT(0) +/* SS Disconnection detected. */ +#define USB_ISTS_DISI BIT(1) +/* UUSB warm reset detectede. */ +#define USB_ISTS_UWRESI BIT(2) +/* USB hot reset detected. */ +#define USB_ISTS_UHRESI BIT(3) +/* U3 link state enter detected (suspend).*/ +#define USB_ISTS_U3ENTI BIT(4) +/* U3 link state exit detected (wakeup). */ +#define USB_ISTS_U3EXTI BIT(5) +/* U2 link state enter detected.*/ +#define USB_ISTS_U2ENTI BIT(6) +/* U2 link state exit detected.*/ +#define USB_ISTS_U2EXTI BIT(7) +/* U1 link state enter detected.*/ +#define USB_ISTS_U1ENTI BIT(8) +/* U1 link state exit detected.*/ +#define USB_ISTS_U1EXTI BIT(9) +/* ITP/SOF packet detected.*/ +#define USB_ISTS_ITPI BIT(10) +/* Wakeup detected.*/ +#define USB_ISTS_WAKEI BIT(11) +/* Send Custom Packet detected.*/ +#define USB_ISTS_SPKTI BIT(12) +/* HS/FS mode connection detected.*/ +#define USB_ISTS_CON2I BIT(16) +/* HS/FS mode disconnection detected.*/ +#define USB_ISTS_DIS2I BIT(17) +/* USB reset (HS/FS mode) detected.*/ +#define USB_ISTS_U2RESI BIT(18) +/* LPM L2 state enter detected.*/ +#define USB_ISTS_L2ENTI BIT(20) +/* LPM L2 state exit detected.*/ +#define USB_ISTS_L2EXTI BIT(21) +/* LPM L1 state enter detected.*/ +#define USB_ISTS_L1ENTI BIT(24) +/* LPM L1 state exit detected.*/ +#define USB_ISTS_L1EXTI BIT(25) +/* USB configuration reset detected.*/ +#define USB_ISTS_CFGRESI BIT(26) +/* Start of the USB warm reset detected.*/ +#define USB_ISTS_UWRESSI BIT(28) +/* End of the USB warm reset detected.*/ +#define USB_ISTS_UWRESEI BIT(29) + +/* USB_SEL - bitmasks */ +#define EP_SEL_EPNO_MASK GENMASK(3, 0) +/* Endpoint number. */ +#define EP_SEL_EPNO(p) ((p) & EP_SEL_EPNO_MASK) +/* Endpoint direction bit - 0 - OUT, 1 - IN. */ +#define EP_SEL_DIR BIT(7) + +#define select_ep_in(nr) (EP_SEL_EPNO(p) | EP_SEL_DIR) +#define select_ep_out (EP_SEL_EPNO(p)) + +/* EP_TRADDR - bitmasks */ +/* Transfer Ring address. */ +#define EP_TRADDR_TRADDR(p) ((p)) + +/* EP_CFG - bitmasks */ +/* Endpoint enable */ +#define EP_CFG_ENABLE BIT(0) +/* + * Endpoint type. + * 1 - isochronous + * 2 - bulk + * 3 - interrupt + */ +#define EP_CFG_EPTYPE_MASK GENMASK(2, 1) +#define EP_CFG_EPTYPE(p) (((p) << 1) & EP_CFG_EPTYPE_MASK) +/* Stream support enable (only in SS mode). */ +#define EP_CFG_STREAM_EN BIT(3) +/* TDL check (only in SS mode for BULK EP). */ +#define EP_CFG_TDL_CHK BIT(4) +/* SID check (only in SS mode for BULK OUT EP). */ +#define EP_CFG_SID_CHK BIT(5) +/* DMA transfer endianness. */ +#define EP_CFG_EPENDIAN BIT(7) +/* Max burst size (used only in SS mode). */ +#define EP_CFG_MAXBURST_MASK GENMASK(11, 8) +#define EP_CFG_MAXBURST(p) (((p) << 8) & EP_CFG_MAXBURST_MASK) +/* ISO max burst. */ +#define EP_CFG_MULT_MASK GENMASK(15, 14) +#define EP_CFG_MULT(p) (((p) << 14) & EP_CFG_MULT_MASK) +/* ISO max burst. */ +#define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16) +#define EP_CFG_MAXPKTSIZE(p) (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK) +/* Max number of buffered packets. */ +#define EP_CFG_BUFFERING_MASK GENMASK(31, 27) +#define EP_CFG_BUFFERING(p) (((p) << 27) & EP_CFG_BUFFERING_MASK) + +/* EP_CMD - bitmasks */ +/* Endpoint reset. */ +#define EP_CMD_EPRST BIT(0) +/* Endpoint STALL set. */ +#define EP_CMD_SSTALL BIT(1) +/* Endpoint STALL clear. */ +#define EP_CMD_CSTALL BIT(2) +/* Send ERDY TP. */ +#define EP_CMD_ERDY BIT(3) +/* Request complete. */ +#define EP_CMD_REQ_CMPL BIT(5) +/* Transfer descriptor ready. */ +#define EP_CMD_DRDY BIT(6) +/* Data flush. */ +#define EP_CMD_DFLUSH BIT(7) +/* + * Transfer Descriptor Length write (used only for Bulk Stream capable + * endpoints in SS mode). + * Bit Removed from DEV_VER_V3 controller version. + */ +#define EP_CMD_STDL BIT(8) +/* + * Transfer Descriptor Length (used only in SS mode for bulk endpoints). + * Bits Removed from DEV_VER_V3 controller version. + */ +#define EP_CMD_TDL_MASK GENMASK(15, 9) +#define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK) +#define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9) + +/* ERDY Stream ID value (used in SS mode). */ +#define EP_CMD_ERDY_SID_MASK GENMASK(31, 16) +#define EP_CMD_ERDY_SID(p) (((p) << 16) & EP_CMD_ERDY_SID_MASK) + +/* EP_STS - bitmasks */ +/* Setup transfer complete. */ +#define EP_STS_SETUP BIT(0) +/* Endpoint STALL status. */ +#define EP_STS_STALL(p) ((p) & BIT(1)) +/* Interrupt On Complete. */ +#define EP_STS_IOC BIT(2) +/* Interrupt on Short Packet. */ +#define EP_STS_ISP BIT(3) +/* Transfer descriptor missing. */ +#define EP_STS_DESCMIS BIT(4) +/* Stream Rejected (used only in SS mode) */ +#define EP_STS_STREAMR BIT(5) +/* EXIT from MOVE DATA State (used only for stream transfers in SS mode). */ +#define EP_STS_MD_EXIT BIT(6) +/* TRB error. */ +#define EP_STS_TRBERR BIT(7) +/* Not ready (used only in SS mode). */ +#define EP_STS_NRDY BIT(8) +/* DMA busy bit. */ +#define EP_STS_DBUSY BIT(9) +/* Endpoint Buffer Empty */ +#define EP_STS_BUFFEMPTY(p) ((p) & BIT(10)) +/* Current Cycle Status */ +#define EP_STS_CCS(p) ((p) & BIT(11)) +/* Prime (used only in SS mode. */ +#define EP_STS_PRIME BIT(12) +/* Stream error (used only in SS mode). */ +#define EP_STS_SIDERR BIT(13) +/* OUT size mismatch. */ +#define EP_STS_OUTSMM BIT(14) +/* ISO transmission error. */ +#define EP_STS_ISOERR BIT(15) +/* Host Packet Pending (only for SS mode). */ +#define EP_STS_HOSTPP(p) ((p) & BIT(16)) +/* Stream Protocol State Machine State (only for Bulk stream endpoints). */ +#define EP_STS_SPSMST_MASK GENMASK(18, 17) +#define EP_STS_SPSMST_DISABLED(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +#define EP_STS_SPSMST_IDLE(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +#define EP_STS_SPSMST_START_STREAM(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +#define EP_STS_SPSMST_MOVE_DATA(p) (((p) & EP_STS_SPSMST_MASK) >> 17) +/* Interrupt On Transfer complete. */ +#define EP_STS_IOT BIT(19) +/* OUT queue endpoint number. */ +#define EP_STS_OUTQ_NO_MASK GENMASK(27, 24) +#define EP_STS_OUTQ_NO(p) (((p) & EP_STS_OUTQ_NO_MASK) >> 24) +/* OUT queue valid flag. */ +#define EP_STS_OUTQ_VAL_MASK BIT(28) +#define EP_STS_OUTQ_VAL(p) ((p) & EP_STS_OUTQ_VAL_MASK) +/* SETUP WAIT. */ +#define EP_STS_STPWAIT BIT(31) + +/* EP_STS_SID - bitmasks */ +/* Stream ID (used only in SS mode). */ +#define EP_STS_SID_MASK GENMASK(15, 0) +#define EP_STS_SID(p) ((p) & EP_STS_SID_MASK) + +/* EP_STS_EN - bitmasks */ +/* SETUP interrupt enable. */ +#define EP_STS_EN_SETUPEN BIT(0) +/* OUT transfer missing descriptor enable. */ +#define EP_STS_EN_DESCMISEN BIT(4) +/* Stream Rejected enable. */ +#define EP_STS_EN_STREAMREN BIT(5) +/* Move Data Exit enable.*/ +#define EP_STS_EN_MD_EXITEN BIT(6) +/* TRB enable. */ +#define EP_STS_EN_TRBERREN BIT(7) +/* NRDY enable. */ +#define EP_STS_EN_NRDYEN BIT(8) +/* Prime enable. */ +#define EP_STS_EN_PRIMEEEN BIT(12) +/* Stream error enable. */ +#define EP_STS_EN_SIDERREN BIT(13) +/* OUT size mismatch enable. */ +#define EP_STS_EN_OUTSMMEN BIT(14) +/* ISO transmission error enable. */ +#define EP_STS_EN_ISOERREN BIT(15) +/* Interrupt on Transmission complete enable. */ +#define EP_STS_EN_IOTEN BIT(19) +/* Setup Wait interrupt enable. */ +#define EP_STS_EN_STPWAITEN BIT(31) + +/* DRBL- bitmasks */ +#define DB_VALUE_BY_INDEX(index) (1 << (index)) +#define DB_VALUE_EP0_OUT BIT(0) +#define DB_VALUE_EP0_IN BIT(16) + +/* EP_IEN - bitmasks */ +#define EP_IEN(index) (1 << (index)) +#define EP_IEN_EP_OUT0 BIT(0) +#define EP_IEN_EP_IN0 BIT(16) + +/* EP_ISTS - bitmasks */ +#define EP_ISTS(index) (1 << (index)) +#define EP_ISTS_EP_OUT0 BIT(0) +#define EP_ISTS_EP_IN0 BIT(16) + +/* USB_PWR- bitmasks */ +/*Power Shut Off capability enable*/ +#define PUSB_PWR_PSO_EN BIT(0) +/*Power Shut Off capability disable*/ +#define PUSB_PWR_PSO_DS BIT(1) +/* + * Enables turning-off Reference Clock. + * This bit is optional and implemented only when support for OTG is + * implemented (indicated by OTG_READY bit set to '1'). + */ +#define PUSB_PWR_STB_CLK_SWITCH_EN BIT(8) +/* + * Status bit indicating that operation required by STB_CLK_SWITCH_EN write + * is completed + */ +#define PUSB_PWR_STB_CLK_SWITCH_DONE BIT(9) +/* This bit informs if Fast Registers Access is enabled. */ +#define PUSB_PWR_FST_REG_ACCESS_STAT BIT(30) +/* Fast Registers Access Enable. */ +#define PUSB_PWR_FST_REG_ACCESS BIT(31) + +/* USB_CONF2- bitmasks */ +/* + * Writing 1 disables TDL calculation basing on TRB feature in controller + * for DMULT mode. + * Bit supported only for DEV_VER_V2 version. + */ +#define USB_CONF2_DIS_TDL_TRB BIT(1) +/* + * Writing 1 enables TDL calculation basing on TRB feature in controller + * for DMULT mode. + * Bit supported only for DEV_VER_V2 version. + */ +#define USB_CONF2_EN_TDL_TRB BIT(2) + +/* USB_CAP1- bitmasks */ +/* + * SFR Interface type + * These field reflects type of SFR interface implemented: + * 0x0 - OCP + * 0x1 - AHB, + * 0x2 - PLB + * 0x3 - AXI + * 0x4-0xF - reserved + */ +#define USB_CAP1_SFR_TYPE_MASK GENMASK(3, 0) +#define DEV_SFR_TYPE_OCP(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x0) +#define DEV_SFR_TYPE_AHB(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x1) +#define DEV_SFR_TYPE_PLB(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x2) +#define DEV_SFR_TYPE_AXI(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x3) +/* + * SFR Interface width + * These field reflects width of SFR interface implemented: + * 0x0 - 8 bit interface, + * 0x1 - 16 bit interface, + * 0x2 - 32 bit interface + * 0x3 - 64 bit interface + * 0x4-0xF - reserved + */ +#define USB_CAP1_SFR_WIDTH_MASK GENMASK(7, 4) +#define DEV_SFR_WIDTH_8(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x0 << 4)) +#define DEV_SFR_WIDTH_16(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x1 << 4)) +#define DEV_SFR_WIDTH_32(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x2 << 4)) +#define DEV_SFR_WIDTH_64(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x3 << 4)) +/* + * DMA Interface type + * These field reflects type of DMA interface implemented: + * 0x0 - OCP + * 0x1 - AHB, + * 0x2 - PLB + * 0x3 - AXI + * 0x4-0xF - reserved + */ +#define USB_CAP1_DMA_TYPE_MASK GENMASK(11, 8) +#define DEV_DMA_TYPE_OCP(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x0 << 8)) +#define DEV_DMA_TYPE_AHB(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x1 << 8)) +#define DEV_DMA_TYPE_PLB(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x2 << 8)) +#define DEV_DMA_TYPE_AXI(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x3 << 8)) +/* + * DMA Interface width + * These field reflects width of DMA interface implemented: + * 0x0 - reserved, + * 0x1 - reserved, + * 0x2 - 32 bit interface + * 0x3 - 64 bit interface + * 0x4-0xF - reserved + */ +#define USB_CAP1_DMA_WIDTH_MASK GENMASK(15, 12) +#define DEV_DMA_WIDTH_32(p) (((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x2 << 12)) +#define DEV_DMA_WIDTH_64(p) (((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x3 << 12)) +/* + * USB3 PHY Interface type + * These field reflects type of USB3 PHY interface implemented: + * 0x0 - USB PIPE, + * 0x1 - RMMI, + * 0x2-0xF - reserved + */ +#define USB_CAP1_U3PHY_TYPE_MASK GENMASK(19, 16) +#define DEV_U3PHY_PIPE(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x0 << 16)) +#define DEV_U3PHY_RMMI(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x1 << 16)) +/* + * USB3 PHY Interface width + * These field reflects width of USB3 PHY interface implemented: + * 0x0 - 8 bit PIPE interface, + * 0x1 - 16 bit PIPE interface, + * 0x2 - 32 bit PIPE interface, + * 0x3 - 64 bit PIPE interface + * 0x4-0xF - reserved + * Note: When SSIC interface is implemented this field shows the width of + * internal PIPE interface. The RMMI interface is always 20bit wide. + */ +#define USB_CAP1_U3PHY_WIDTH_MASK GENMASK(23, 20) +#define DEV_U3PHY_WIDTH_8(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x0 << 20)) +#define DEV_U3PHY_WIDTH_16(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x1 << 16)) +#define DEV_U3PHY_WIDTH_32(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x2 << 20)) +#define DEV_U3PHY_WIDTH_64(p) \ + (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x3 << 16)) + +/* + * USB2 PHY Interface enable + * These field informs if USB2 PHY interface is implemented: + * 0x0 - interface NOT implemented, + * 0x1 - interface implemented + */ +#define USB_CAP1_U2PHY_EN(p) ((p) & BIT(24)) +/* + * USB2 PHY Interface type + * These field reflects type of USB2 PHY interface implemented: + * 0x0 - UTMI, + * 0x1 - ULPI + */ +#define DEV_U2PHY_ULPI(p) ((p) & BIT(25)) +/* + * USB2 PHY Interface width + * These field reflects width of USB2 PHY interface implemented: + * 0x0 - 8 bit interface, + * 0x1 - 16 bit interface, + * Note: The ULPI interface is always 8bit wide. + */ +#define DEV_U2PHY_WIDTH_16(p) ((p) & BIT(26)) +/* + * OTG Ready + * 0x0 - pure device mode + * 0x1 - some features and ports for CDNS USB OTG controller are implemented. + */ +#define USB_CAP1_OTG_READY(p) ((p) & BIT(27)) + +/* + * When set, indicates that controller supports automatic internal TDL + * calculation basing on the size provided in TRB (TRB[22:17]) for DMULT mode + * Supported only for DEV_VER_V2 controller version. + */ +#define USB_CAP1_TDL_FROM_TRB(p) ((p) & BIT(28)) + +/* USB_CAP2- bitmasks */ +/* + * The actual size of the connected On-chip RAM memory in kB: + * - 0 means 256 kB (max supported mem size) + * - value other than 0 reflects the mem size in kB + */ +#define USB_CAP2_ACTUAL_MEM_SIZE(p) ((p) & GENMASK(7, 0)) +/* + * Max supported mem size + * These field reflects width of on-chip RAM address bus width, + * which determines max supported mem size: + * 0x0-0x7 - reserved, + * 0x8 - support for 4kB mem, + * 0x9 - support for 8kB mem, + * 0xA - support for 16kB mem, + * 0xB - support for 32kB mem, + * 0xC - support for 64kB mem, + * 0xD - support for 128kB mem, + * 0xE - support for 256kB mem, + * 0xF - reserved + */ +#define USB_CAP2_MAX_MEM_SIZE(p) ((p) & GENMASK(11, 8)) + +/* USB_CAP3- bitmasks */ +#define EP_IS_IMPLEMENTED(reg, index) ((reg) & (1 << (index))) + +/* USB_CAP4- bitmasks */ +#define EP_SUPPORT_ISO(reg, index) ((reg) & (1 << (index))) + +/* USB_CAP5- bitmasks */ +#define EP_SUPPORT_STREAM(reg, index) ((reg) & (1 << (index))) + +/* USB_CAP6- bitmasks */ +/* The USBSS-DEV Controller Internal build number. */ +#define GET_DEV_BASE_VERSION(p) ((p) & GENMASK(23, 0)) +/* The USBSS-DEV Controller version number. */ +#define GET_DEV_CUSTOM_VERSION(p) ((p) & GENMASK(31, 24)) + +#define DEV_VER_NXP_V1 0x00024502 +#define DEV_VER_TI_V1 0x00024509 +#define DEV_VER_V2 0x0002450C +#define DEV_VER_V3 0x0002450d + +/* DBG_LINK1- bitmasks */ +/* + * LFPS_MIN_DET_U1_EXIT value This parameter configures the minimum + * time required for decoding the received LFPS as an LFPS.U1_Exit. + */ +#define DBG_LINK1_LFPS_MIN_DET_U1_EXIT(p) ((p) & GENMASK(7, 0)) +/* + * LFPS_MIN_GEN_U1_EXIT value This parameter configures the minimum time for + * phytxelecidle deassertion when LFPS.U1_Exit + */ +#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK GENMASK(15, 8) +#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(p) (((p) << 8) & GENMASK(15, 8)) +/* + * RXDET_BREAK_DIS value This parameter configures terminating the Far-end + * Receiver termination detection sequence: + * 0: it is possible that USBSS_DEV will terminate Farend receiver + * termination detection sequence + * 1: USBSS_DEV will not terminate Far-end receiver termination + * detection sequence + */ +#define DBG_LINK1_RXDET_BREAK_DIS BIT(16) +/* LFPS_GEN_PING value This parameter configures the LFPS.Ping generation */ +#define DBG_LINK1_LFPS_GEN_PING(p) (((p) << 17) & GENMASK(21, 17)) +/* + * Set the LFPS_MIN_DET_U1_EXIT value Writing '1' to this bit writes the + * LFPS_MIN_DET_U1_EXIT field value to the device. This bit is automatically + * cleared. Writing '0' has no effect + */ +#define DBG_LINK1_LFPS_MIN_DET_U1_EXIT_SET BIT(24) +/* + * Set the LFPS_MIN_GEN_U1_EXIT value. Writing '1' to this bit writes the + * LFPS_MIN_GEN_U1_EXIT field value to the device. This bit is automatically + * cleared. Writing '0' has no effect + */ +#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET BIT(25) +/* + * Set the RXDET_BREAK_DIS value Writing '1' to this bit writes + * the RXDET_BREAK_DIS field value to the device. This bit is automatically + * cleared. Writing '0' has no effect + */ +#define DBG_LINK1_RXDET_BREAK_DIS_SET BIT(26) +/* + * Set the LFPS_GEN_PING_SET value Writing '1' to this bit writes + * the LFPS_GEN_PING field value to the device. This bit is automatically + * cleared. Writing '0' has no effect." + */ +#define DBG_LINK1_LFPS_GEN_PING_SET BIT(27) + +/* DMA_AXI_CTRL- bitmasks */ +/* The mawprot pin configuration. */ +#define DMA_AXI_CTRL_MARPROT(p) ((p) & GENMASK(2, 0)) +/* The marprot pin configuration. */ +#define DMA_AXI_CTRL_MAWPROT(p) (((p) & GENMASK(2, 0)) << 16) +#define DMA_AXI_CTRL_NON_SECURE 0x02 + +#define gadget_to_cdns3_device(g) (container_of(g, struct cdns3_device, gadget)) + +#define ep_to_cdns3_ep(ep) (container_of(ep, struct cdns3_endpoint, endpoint)) + +/*-------------------------------------------------------------------------*/ +/* + * USBSS-DEV DMA interface. + */ +#define TRBS_PER_SEGMENT 40 + +#define ISO_MAX_INTERVAL 10 + +#if TRBS_PER_SEGMENT < 2 +#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2." +#endif + +/* + *Only for ISOC endpoints - maximum number of TRBs is calculated as + * pow(2, bInterval-1) * number of usb requests. It is limitation made by + * driver to save memory. Controller must prepare TRB for each ITP even + * if bInterval > 1. It's the reason why driver needs so many TRBs for + * isochronous endpoints. + */ +#define TRBS_PER_ISOC_SEGMENT (ISO_MAX_INTERVAL * 8) + +#define GET_TRBS_PER_SEGMENT(ep_type) ((ep_type) == USB_ENDPOINT_XFER_ISOC ? \ + TRBS_PER_ISOC_SEGMENT : TRBS_PER_SEGMENT) +/** + * struct cdns3_trb - represent Transfer Descriptor block. + * @buffer: pointer to buffer data + * @length: length of data + * @control: control flags. + * + * This structure describes transfer block serviced by DMA module. + */ +struct cdns3_trb { + __le32 buffer; + __le32 length; + __le32 control; +}; + +#define TRB_SIZE (sizeof(struct cdns3_trb)) +#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT) +#define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT) +#define TRB_CTRL_RING_SIZE (TRB_SIZE * 2) + +/* TRB bit mask */ +#define TRB_TYPE_BITMASK GENMASK(15, 10) +#define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) + +/* TRB type IDs */ +/* bulk, interrupt, isoc , and control data stage */ +#define TRB_NORMAL 1 +/* TRB for linking ring segments */ +#define TRB_LINK 6 + +/* Cycle bit - indicates TRB ownership by driver or hw*/ +#define TRB_CYCLE BIT(0) +/* + * When set to '1', the device will toggle its interpretation of the Cycle bit + */ +#define TRB_TOGGLE BIT(1) + +/* + * Short Packet (SP). OUT EPs at DMULT=1 only. Indicates if the TRB was + * processed while USB short packet was received. No more buffers defined by + * the TD will be used. DMA will automatically advance to next TD. + * - Shall be set to 0 by Software when putting TRB on the Transfer Ring + * - Shall be set to 1 by Controller when Short Packet condition for this TRB + * is detected independent if ISP is set or not. + */ +#define TRB_SP BIT(1) + +/* Interrupt on short packet*/ +#define TRB_ISP BIT(2) +/*Setting this bit enables FIFO DMA operation mode*/ +#define TRB_FIFO_MODE BIT(3) +/* Set PCIe no snoop attribute */ +#define TRB_CHAIN BIT(4) +/* Interrupt on completion */ +#define TRB_IOC BIT(5) + +/* stream ID bitmasks. */ +#define TRB_STREAM_ID_BITMASK GENMASK(31, 16) +#define TRB_STREAM_ID(p) ((p) << 16) +#define TRB_FIELD_TO_STREAMID(p) (((p) & TRB_STREAM_ID_BITMASK) >> 16) + +/* Size of TD expressed in USB packets for HS/FS mode. */ +#define TRB_TDL_HS_SIZE(p) (((p) << 16) & GENMASK(31, 16)) +#define TRB_TDL_HS_SIZE_GET(p) (((p) & GENMASK(31, 16)) >> 16) + +/* transfer_len bitmasks. */ +#define TRB_LEN(p) ((p) & GENMASK(16, 0)) + +/* Size of TD expressed in USB packets for SS mode. */ +#define TRB_TDL_SS_SIZE(p) (((p) << 17) & GENMASK(23, 17)) +#define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17) + +/* transfer_len bitmasks - bits 31:24 */ +#define TRB_BURST_LEN(p) (((p) << 24) & GENMASK(31, 24)) +#define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24) + +/* Data buffer pointer bitmasks*/ +#define TRB_BUFFER(p) ((p) & GENMASK(31, 0)) + +/*-------------------------------------------------------------------------*/ +/* Driver numeric constants */ + +/* Such declaration should be added to ch9.h */ +#define USB_DEVICE_MAX_ADDRESS 127 + +/* Endpoint init values */ +#define CDNS3_EP_MAX_PACKET_LIMIT 1024 +#define CDNS3_EP_MAX_STREAMS 15 +#define CDNS3_EP0_MAX_PACKET_LIMIT 512 + +/* All endpoints including EP0 */ +#define CDNS3_ENDPOINTS_MAX_COUNT 32 +#define CDNS3_EP_ZLP_BUF_SIZE 1024 + +#define CDNS3_EP_BUF_SIZE 2 /* KB */ +#define CDNS3_EP_ISO_HS_MULT 3 +#define CDNS3_EP_ISO_SS_BURST 3 +#define CDNS3_MAX_NUM_DESCMISS_BUF 32 +#define CDNS3_DESCMIS_BUF_SIZE 2048 /* Bytes */ +#define CDNS3_WA2_NUM_BUFFERS 128 +/*-------------------------------------------------------------------------*/ +/* Used structs */ + +struct cdns3_device; + +/** + * struct cdns3_endpoint - extended device side representation of USB endpoint. + * @endpoint: usb endpoint + * @pending_req_list: list of requests queuing on transfer ring. + * @deferred_req_list: list of requests waiting for queuing on transfer ring. + * @wa2_descmiss_req_list: list of requests internally allocated by driver. + * @trb_pool: transfer ring - array of transaction buffers + * @trb_pool_dma: dma address of transfer ring + * @cdns3_dev: device associated with this endpoint + * @name: a human readable name e.g. ep1out + * @flags: specify the current state of endpoint + * @descmis_req: internal transfer object used for getting data from on-chip + * buffer. It can happen only if function driver doesn't send usb_request + * object on time. + * @dir: endpoint direction + * @num: endpoint number (1 - 15) + * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK + * @interval: interval between packets used for ISOC endpoint. + * @free_trbs: number of free TRBs in transfer ring + * @num_trbs: number of all TRBs in transfer ring + * @pcs: producer cycle state + * @ccs: consumer cycle state + * @enqueue: enqueue index in transfer ring + * @dequeue: dequeue index in transfer ring + * @trb_burst_size: number of burst used in trb. + */ +struct cdns3_endpoint { + struct usb_ep endpoint; + struct list_head pending_req_list; + struct list_head deferred_req_list; + struct list_head wa2_descmiss_req_list; + int wa2_counter; + + struct cdns3_trb *trb_pool; + dma_addr_t trb_pool_dma; + + struct cdns3_device *cdns3_dev; + char name[20]; + +#define EP_ENABLED BIT(0) +#define EP_STALLED BIT(1) +#define EP_STALL_PENDING BIT(2) +#define EP_WEDGE BIT(3) +#define EP_TRANSFER_STARTED BIT(4) +#define EP_UPDATE_EP_TRBADDR BIT(5) +#define EP_PENDING_REQUEST BIT(6) +#define EP_RING_FULL BIT(7) +#define EP_CLAIMED BIT(8) +#define EP_DEFERRED_DRDY BIT(9) +#define EP_QUIRK_ISO_OUT_EN BIT(10) +#define EP_QUIRK_END_TRANSFER BIT(11) +#define EP_QUIRK_EXTRA_BUF_DET BIT(12) +#define EP_QUIRK_EXTRA_BUF_EN BIT(13) + u32 flags; + + struct cdns3_request *descmis_req; + + u8 dir; + u8 num; + u8 type; + int interval; + + int free_trbs; + int num_trbs; + u8 pcs; + u8 ccs; + int enqueue; + int dequeue; + u8 trb_burst_size; + + unsigned int wa1_set:1; + struct cdns3_trb *wa1_trb; + unsigned int wa1_trb_index; + unsigned int wa1_cycle_bit:1; +}; + +/** + * struct cdns3_aligned_buf - represent aligned buffer used for DMA transfer + * @buf: aligned to 8 bytes data buffer. Buffer address used in + * TRB shall be aligned to 8. + * @dma: dma address + * @size: size of buffer + * @in_use: inform if this buffer is associated with usb_request + * @list: used to adding instance of this object to list + */ +struct cdns3_aligned_buf { + void *buf; + dma_addr_t dma; + u32 size; + int in_use:1; + struct list_head list; +}; + +/** + * struct cdns3_request - extended device side representation of usb_request + * object . + * @request: generic usb_request object describing single I/O request. + * @priv_ep: extended representation of usb_ep object + * @trb: the first TRB association with this request + * @start_trb: number of the first TRB in transfer ring + * @end_trb: number of the last TRB in transfer ring + * @aligned_buf: object holds information about aligned buffer associated whit + * this endpoint + * @flags: flag specifying special usage of request + * @list: used by internally allocated request to add to wa2_descmiss_req_list. + */ +struct cdns3_request { + struct usb_request request; + struct cdns3_endpoint *priv_ep; + struct cdns3_trb *trb; + int start_trb; + int end_trb; + struct cdns3_aligned_buf *aligned_buf; +#define REQUEST_PENDING BIT(0) +#define REQUEST_INTERNAL BIT(1) +#define REQUEST_INTERNAL_CH BIT(2) +#define REQUEST_ZLP BIT(3) +#define REQUEST_UNALIGNED BIT(4) + u32 flags; + struct list_head list; +}; + +#define to_cdns3_request(r) (container_of(r, struct cdns3_request, request)) + +/*Stages used during enumeration process.*/ +#define CDNS3_SETUP_STAGE 0x0 +#define CDNS3_DATA_STAGE 0x1 +#define CDNS3_STATUS_STAGE 0x2 + +/** + * struct cdns3_device - represent USB device. + * @dev: pointer to device structure associated whit this controller + * @sysdev: pointer to the DMA capable device + * @gadget: device side representation of the peripheral controller + * @gadget_driver: pointer to the gadget driver + * @dev_ver: device controller version. + * @lock: for synchronizing + * @regs: base address for device side registers + * @setup_buf: used while processing usb control requests + * @setup_dma: dma address for setup_buf + * @zlp_buf - zlp buffer + * @ep0_stage: ep0 stage during enumeration process. + * @ep0_data_dir: direction for control transfer + * @eps: array of pointers to all endpoints with exclusion ep0 + * @aligned_buf_list: list of aligned buffers internally allocated by driver + * @aligned_buf_wq: workqueue freeing no longer used aligned buf. + * @selected_ep: actually selected endpoint. It's used only to improve + * performance. + * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP. + * @u1_allowed: allow device transition to u1 state + * @u2_allowed: allow device transition to u2 state + * @is_selfpowered: device is self powered + * @setup_pending: setup packet is processing by gadget driver + * @hw_configured_flag: hardware endpoint configuration was set. + * @wake_up_flag: allow device to remote up the host + * @status_completion_no_call: indicate that driver is waiting for status s + * stage completion. It's used in deferred SET_CONFIGURATION request. + * @onchip_buffers: number of available on-chip buffers. + * @onchip_used_size: actual size of on-chip memory assigned to endpoints. + * @pending_status_wq: workqueue handling status stage for deferred requests. + * @pending_status_request: request for which status stage was deferred + */ +struct cdns3_device { + struct device *dev; + struct device *sysdev; + + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + +#define CDNS_REVISION_V0 0x00024501 +#define CDNS_REVISION_V1 0x00024509 + u32 dev_ver; + + /* generic spin-lock for drivers */ + spinlock_t lock; + + struct cdns3_usb_regs __iomem *regs; + + struct usb_ctrlrequest *setup_buf; + dma_addr_t setup_dma; + void *zlp_buf; + + u8 ep0_stage; + int ep0_data_dir; + + struct cdns3_endpoint *eps[CDNS3_ENDPOINTS_MAX_COUNT]; + + struct list_head aligned_buf_list; + struct work_struct aligned_buf_wq; + + u32 selected_ep; + u16 isoch_delay; + + unsigned wait_for_setup:1; + unsigned u1_allowed:1; + unsigned u2_allowed:1; + unsigned is_selfpowered:1; + unsigned setup_pending:1; + int hw_configured_flag:1; + int wake_up_flag:1; + unsigned status_completion_no_call:1; + int out_mem_is_allocated; + + struct work_struct pending_status_wq; + struct usb_request *pending_status_request; + + /*in KB */ + u16 onchip_buffers; + u16 onchip_used_size; +}; + +void cdns3_set_register_bit(void __iomem *ptr, u32 mask); +dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, + struct cdns3_trb *trb); +enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev); +void cdns3_pending_setup_status_handler(struct work_struct *work); +void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev); +void cdns3_set_hw_configuration(struct cdns3_device *priv_dev); +void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep); +void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable); +struct usb_request *cdns3_next_request(struct list_head *list); +int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request); +void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm); +int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep); +u8 cdns3_ep_addr_to_index(u8 ep_addr); +int cdns3_gadget_ep_set_wedge(struct usb_ep *ep); +int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value); +void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep); +int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep); +struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags); +void cdns3_gadget_ep_free_request(struct usb_ep *ep, + struct usb_request *request); +int cdns3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request); +void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, + struct cdns3_request *priv_req, + int status); + +int cdns3_init_ep0(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep); +void cdns3_ep0_config(struct cdns3_device *priv_dev); +void cdns3_ep_config(struct cdns3_endpoint *priv_ep); +void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir); +int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev); + +#endif /* __LINUX_CDNS3_GADGET */ diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h new file mode 100644 index 000000000000..b498a170b7e8 --- /dev/null +++ b/drivers/usb/cdns3/host-export.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence USBSS DRD Driver - Host Export APIs + * + * Copyright (C) 2017-2018 NXP + * + * Authors: Peter Chen <peter.chen@nxp.com> + */ +#ifndef __LINUX_CDNS3_HOST_EXPORT +#define __LINUX_CDNS3_HOST_EXPORT + +#ifdef CONFIG_USB_CDNS3_HOST + +int cdns3_host_init(struct cdns3 *cdns); +void cdns3_host_exit(struct cdns3 *cdns); + +#else + +static inline int cdns3_host_init(struct cdns3 *cdns) +{ + return -ENXIO; +} + +static inline void cdns3_host_exit(struct cdns3 *cdns) { } + +#endif /* CONFIG_USB_CDNS3_HOST */ + +#endif /* __LINUX_CDNS3_HOST_EXPORT */ diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c new file mode 100644 index 000000000000..2733a8f71fcd --- /dev/null +++ b/drivers/usb/cdns3/host.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence USBSS DRD Driver - host side + * + * Copyright (C) 2018-2019 Cadence Design Systems. + * Copyright (C) 2017-2018 NXP + * + * Authors: Peter Chen <peter.chen@nxp.com> + * Pawel Laszczak <pawell@cadence.com> + */ + +#include <linux/platform_device.h> +#include "core.h" +#include "drd.h" + +static int __cdns3_host_init(struct cdns3 *cdns) +{ + struct platform_device *xhci; + int ret; + + cdns3_drd_switch_host(cdns, 1); + + xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO); + if (!xhci) { + dev_err(cdns->dev, "couldn't allocate xHCI device\n"); + return -ENOMEM; + } + + xhci->dev.parent = cdns->dev; + cdns->host_dev = xhci; + + ret = platform_device_add_resources(xhci, cdns->xhci_res, + CDNS3_XHCI_RESOURCES_NUM); + if (ret) { + dev_err(cdns->dev, "couldn't add resources to xHCI device\n"); + goto err1; + } + + ret = platform_device_add(xhci); + if (ret) { + dev_err(cdns->dev, "failed to register xHCI device\n"); + goto err1; + } + + return 0; +err1: + platform_device_put(xhci); + return ret; +} + +static void cdns3_host_exit(struct cdns3 *cdns) +{ + platform_device_unregister(cdns->host_dev); + cdns->host_dev = NULL; + cdns3_drd_switch_host(cdns, 0); +} + +int cdns3_host_init(struct cdns3 *cdns) +{ + struct cdns3_role_driver *rdrv; + + rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = __cdns3_host_init; + rdrv->stop = cdns3_host_exit; + rdrv->state = CDNS3_ROLE_STATE_INACTIVE; + rdrv->name = "host"; + + cdns->roles[USB_ROLE_HOST] = rdrv; + + return 0; +} diff --git a/drivers/usb/cdns3/trace.c b/drivers/usb/cdns3/trace.c new file mode 100644 index 000000000000..459fa72d9c74 --- /dev/null +++ b/drivers/usb/cdns3/trace.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USBSS device controller driver Trace Support + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h new file mode 100644 index 000000000000..e92348c9b4d7 --- /dev/null +++ b/drivers/usb/cdns3/trace.h @@ -0,0 +1,493 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * USBSS device controller driver. + * Trace support header file. + * + * Copyright (C) 2018-2019 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cdns3 + +#if !defined(__LINUX_CDNS3_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __LINUX_CDNS3_TRACE + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <asm/byteorder.h> +#include <linux/usb/ch9.h> +#include "core.h" +#include "gadget.h" +#include "debug.h" + +#define CDNS3_MSG_MAX 500 + +TRACE_EVENT(cdns3_halt, + TP_PROTO(struct cdns3_endpoint *ep_priv, u8 halt, u8 flush), + TP_ARGS(ep_priv, halt, flush), + TP_STRUCT__entry( + __string(name, ep_priv->name) + __field(u8, halt) + __field(u8, flush) + ), + TP_fast_assign( + __assign_str(name, ep_priv->name); + __entry->halt = halt; + __entry->flush = flush; + ), + TP_printk("Halt %s for %s: %s", __entry->flush ? " and flush" : "", + __get_str(name), __entry->halt ? "set" : "cleared") +); + +TRACE_EVENT(cdns3_wa1, + TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg), + TP_ARGS(ep_priv, msg), + TP_STRUCT__entry( + __string(ep_name, ep_priv->name) + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(ep_name, ep_priv->name); + __assign_str(msg, msg); + ), + TP_printk("WA1: %s %s", __get_str(ep_name), __get_str(msg)) +); + +TRACE_EVENT(cdns3_wa2, + TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg), + TP_ARGS(ep_priv, msg), + TP_STRUCT__entry( + __string(ep_name, ep_priv->name) + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(ep_name, ep_priv->name); + __assign_str(msg, msg); + ), + TP_printk("WA2: %s %s", __get_str(ep_name), __get_str(msg)) +); + +DECLARE_EVENT_CLASS(cdns3_log_doorbell, + TP_PROTO(const char *ep_name, u32 ep_trbaddr), + TP_ARGS(ep_name, ep_trbaddr), + TP_STRUCT__entry( + __string(name, ep_name) + __field(u32, ep_trbaddr) + ), + TP_fast_assign( + __assign_str(name, ep_name); + __entry->ep_trbaddr = ep_trbaddr; + ), + TP_printk("%s, ep_trbaddr %08x", __get_str(name), + __entry->ep_trbaddr) +); + +DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_ep0, + TP_PROTO(const char *ep_name, u32 ep_trbaddr), + TP_ARGS(ep_name, ep_trbaddr) +); + +DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_epx, + TP_PROTO(const char *ep_name, u32 ep_trbaddr), + TP_ARGS(ep_name, ep_trbaddr) +); + +DECLARE_EVENT_CLASS(cdns3_log_usb_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists), + TP_ARGS(priv_dev, usb_ists), + TP_STRUCT__entry( + __field(enum usb_device_speed, speed) + __field(u32, usb_ists) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __entry->speed = cdns3_get_speed(priv_dev); + __entry->usb_ists = usb_ists; + ), + TP_printk("%s", cdns3_decode_usb_irq(__get_str(str), __entry->speed, + __entry->usb_ists)) +); + +DEFINE_EVENT(cdns3_log_usb_irq, cdns3_usb_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists), + TP_ARGS(priv_dev, usb_ists) +); + +DECLARE_EVENT_CLASS(cdns3_log_epx_irq, + TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_dev, priv_ep), + TP_STRUCT__entry( + __string(ep_name, priv_ep->name) + __field(u32, ep_sts) + __field(u32, ep_traddr) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __assign_str(ep_name, priv_ep->name); + __entry->ep_sts = readl(&priv_dev->regs->ep_sts); + __entry->ep_traddr = readl(&priv_dev->regs->ep_traddr); + ), + TP_printk("%s, ep_traddr: %08x", + cdns3_decode_epx_irq(__get_str(str), + __get_str(ep_name), + __entry->ep_sts), + __entry->ep_traddr) +); + +DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq, + TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_dev, priv_ep) +); + +DECLARE_EVENT_CLASS(cdns3_log_ep0_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts), + TP_ARGS(priv_dev, ep_sts), + TP_STRUCT__entry( + __field(int, ep_dir) + __field(u32, ep_sts) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __entry->ep_dir = priv_dev->ep0_data_dir; + __entry->ep_sts = ep_sts; + ), + TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str), + __entry->ep_dir, + __entry->ep_sts)) +); + +DEFINE_EVENT(cdns3_log_ep0_irq, cdns3_ep0_irq, + TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts), + TP_ARGS(priv_dev, ep_sts) +); + +DECLARE_EVENT_CLASS(cdns3_log_ctrl, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl), + TP_STRUCT__entry( + __field(u8, bRequestType) + __field(u8, bRequest) + __field(u16, wValue) + __field(u16, wIndex) + __field(u16, wLength) + __dynamic_array(char, str, CDNS3_MSG_MAX) + ), + TP_fast_assign( + __entry->bRequestType = ctrl->bRequestType; + __entry->bRequest = ctrl->bRequest; + __entry->wValue = le16_to_cpu(ctrl->wValue); + __entry->wIndex = le16_to_cpu(ctrl->wIndex); + __entry->wLength = le16_to_cpu(ctrl->wLength); + ), + TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNS3_MSG_MAX, + __entry->bRequestType, + __entry->bRequest, __entry->wValue, + __entry->wIndex, __entry->wLength) + ) +); + +DEFINE_EVENT(cdns3_log_ctrl, cdns3_ctrl_req, + TP_PROTO(struct usb_ctrlrequest *ctrl), + TP_ARGS(ctrl) +); + +DECLARE_EVENT_CLASS(cdns3_log_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __string(name, req->priv_ep->name) + __field(struct cdns3_request *, req) + __field(void *, buf) + __field(unsigned int, actual) + __field(unsigned int, length) + __field(int, status) + __field(int, zero) + __field(int, short_not_ok) + __field(int, no_interrupt) + __field(int, start_trb) + __field(int, end_trb) + __field(struct cdns3_trb *, start_trb_addr) + __field(int, flags) + ), + TP_fast_assign( + __assign_str(name, req->priv_ep->name); + __entry->req = req; + __entry->buf = req->request.buf; + __entry->actual = req->request.actual; + __entry->length = req->request.length; + __entry->status = req->request.status; + __entry->zero = req->request.zero; + __entry->short_not_ok = req->request.short_not_ok; + __entry->no_interrupt = req->request.no_interrupt; + __entry->start_trb = req->start_trb; + __entry->end_trb = req->end_trb; + __entry->start_trb_addr = req->trb; + __entry->flags = req->flags; + ), + TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d," + " trb: [start:%d, end:%d: virt addr %pa], flags:%x ", + __get_str(name), __entry->req, __entry->buf, __entry->actual, + __entry->length, + __entry->zero ? "Z" : "z", + __entry->short_not_ok ? "S" : "s", + __entry->no_interrupt ? "I" : "i", + __entry->status, + __entry->start_trb, + __entry->end_trb, + __entry->start_trb_addr, + __entry->flags + ) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_alloc_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_free_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_ep_queue, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_ep_dequeue, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_request, cdns3_gadget_giveback, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +TRACE_EVENT(cdns3_ep0_queue, + TP_PROTO(struct cdns3_device *dev_priv, struct usb_request *request), + TP_ARGS(dev_priv, request), + TP_STRUCT__entry( + __field(int, dir) + __field(int, length) + ), + TP_fast_assign( + __entry->dir = dev_priv->ep0_data_dir; + __entry->length = request->length; + ), + TP_printk("Queue to ep0%s length: %u", __entry->dir ? "in" : "out", + __entry->length) +); + +DECLARE_EVENT_CLASS(cdns3_log_aligned_request, + TP_PROTO(struct cdns3_request *priv_req), + TP_ARGS(priv_req), + TP_STRUCT__entry( + __string(name, priv_req->priv_ep->name) + __field(struct usb_request *, req) + __field(void *, buf) + __field(dma_addr_t, dma) + __field(void *, aligned_buf) + __field(dma_addr_t, aligned_dma) + __field(u32, aligned_buf_size) + ), + TP_fast_assign( + __assign_str(name, priv_req->priv_ep->name); + __entry->req = &priv_req->request; + __entry->buf = priv_req->request.buf; + __entry->dma = priv_req->request.dma; + __entry->aligned_buf = priv_req->aligned_buf->buf; + __entry->aligned_dma = priv_req->aligned_buf->dma; + __entry->aligned_buf_size = priv_req->aligned_buf->size; + ), + TP_printk("%s: req: %p, req buf %p, dma %pad a_buf %p a_dma %pad, size %d", + __get_str(name), __entry->req, __entry->buf, &__entry->dma, + __entry->aligned_buf, &__entry->aligned_dma, + __entry->aligned_buf_size + ) +); + +DEFINE_EVENT(cdns3_log_aligned_request, cdns3_free_aligned_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DECLARE_EVENT_CLASS(cdns3_log_trb, + TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), + TP_ARGS(priv_ep, trb), + TP_STRUCT__entry( + __string(name, priv_ep->name) + __field(struct cdns3_trb *, trb) + __field(u32, buffer) + __field(u32, length) + __field(u32, control) + __field(u32, type) + ), + TP_fast_assign( + __assign_str(name, priv_ep->name); + __entry->trb = trb; + __entry->buffer = trb->buffer; + __entry->length = trb->length; + __entry->control = trb->control; + __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); + ), + TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)", + __get_str(name), __entry->trb, __entry->buffer, + TRB_LEN(__entry->length), + (u8)TRB_BURST_LEN_GET(__entry->length), + __entry->control, + __entry->control & TRB_CYCLE ? "C=1, " : "C=0, ", + __entry->control & TRB_TOGGLE ? "T=1, " : "T=0, ", + __entry->control & TRB_ISP ? "ISP, " : "", + __entry->control & TRB_FIFO_MODE ? "FIFO, " : "", + __entry->control & TRB_CHAIN ? "CHAIN, " : "", + __entry->control & TRB_IOC ? "IOC, " : "", + TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK" + ) +); + +DEFINE_EVENT(cdns3_log_trb, cdns3_prepare_trb, + TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), + TP_ARGS(priv_ep, trb) +); + +DEFINE_EVENT(cdns3_log_trb, cdns3_complete_trb, + TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), + TP_ARGS(priv_ep, trb) +); + +DECLARE_EVENT_CLASS(cdns3_log_ring, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep), + TP_STRUCT__entry( + __dynamic_array(u8, ring, TRB_RING_SIZE) + __dynamic_array(u8, priv_ep, sizeof(struct cdns3_endpoint)) + __dynamic_array(char, buffer, + (TRBS_PER_SEGMENT * 65) + CDNS3_MSG_MAX) + ), + TP_fast_assign( + memcpy(__get_dynamic_array(priv_ep), priv_ep, + sizeof(struct cdns3_endpoint)); + memcpy(__get_dynamic_array(ring), priv_ep->trb_pool, + TRB_RING_SIZE); + ), + + TP_printk("%s", + cdns3_dbg_ring((struct cdns3_endpoint *)__get_str(priv_ep), + (struct cdns3_trb *)__get_str(ring), + __get_str(buffer))) +); + +DEFINE_EVENT(cdns3_log_ring, cdns3_ring, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep) +); + +DECLARE_EVENT_CLASS(cdns3_log_ep, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep), + TP_STRUCT__entry( + __string(name, priv_ep->name) + __field(unsigned int, maxpacket) + __field(unsigned int, maxpacket_limit) + __field(unsigned int, max_streams) + __field(unsigned int, maxburst) + __field(unsigned int, flags) + __field(unsigned int, dir) + __field(u8, enqueue) + __field(u8, dequeue) + ), + TP_fast_assign( + __assign_str(name, priv_ep->name); + __entry->maxpacket = priv_ep->endpoint.maxpacket; + __entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit; + __entry->max_streams = priv_ep->endpoint.max_streams; + __entry->maxburst = priv_ep->endpoint.maxburst; + __entry->flags = priv_ep->flags; + __entry->dir = priv_ep->dir; + __entry->enqueue = priv_ep->enqueue; + __entry->dequeue = priv_ep->dequeue; + ), + TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, " + "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s", + __get_str(name), __entry->maxpacket, + __entry->maxpacket_limit, __entry->max_streams, + __entry->maxburst, __entry->enqueue, + __entry->dequeue, + __entry->flags & EP_ENABLED ? "EN | " : "", + __entry->flags & EP_STALLED ? "STALLED | " : "", + __entry->flags & EP_WEDGE ? "WEDGE | " : "", + __entry->flags & EP_TRANSFER_STARTED ? "STARTED | " : "", + __entry->flags & EP_UPDATE_EP_TRBADDR ? "UPD TRB | " : "", + __entry->flags & EP_PENDING_REQUEST ? "REQ PEN | " : "", + __entry->flags & EP_RING_FULL ? "RING FULL |" : "", + __entry->flags & EP_CLAIMED ? "CLAIMED " : "", + __entry->dir ? "IN" : "OUT" + ) +); + +DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_enable, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep) +); + +DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_disable, + TP_PROTO(struct cdns3_endpoint *priv_ep), + TP_ARGS(priv_ep) +); + +DECLARE_EVENT_CLASS(cdns3_log_request_handled, + TP_PROTO(struct cdns3_request *priv_req, int current_index, + int handled), + TP_ARGS(priv_req, current_index, handled), + TP_STRUCT__entry( + __field(struct cdns3_request *, priv_req) + __field(unsigned int, dma_position) + __field(unsigned int, handled) + __field(unsigned int, dequeue_idx) + __field(unsigned int, enqueue_idx) + __field(unsigned int, start_trb) + __field(unsigned int, end_trb) + ), + TP_fast_assign( + __entry->priv_req = priv_req; + __entry->dma_position = current_index; + __entry->handled = handled; + __entry->dequeue_idx = priv_req->priv_ep->dequeue; + __entry->enqueue_idx = priv_req->priv_ep->enqueue; + __entry->start_trb = priv_req->start_trb; + __entry->end_trb = priv_req->end_trb; + ), + TP_printk("Req: %p %s, DMA pos: %d, ep deq: %d, ep enq: %d," + " start trb: %d, end trb: %d", + __entry->priv_req, + __entry->handled ? "handled" : "not handled", + __entry->dma_position, __entry->dequeue_idx, + __entry->enqueue_idx, __entry->start_trb, + __entry->end_trb + ) +); + +DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled, + TP_PROTO(struct cdns3_request *priv_req, int current_index, + int handled), + TP_ARGS(priv_req, current_index, handled) +); +#endif /* __LINUX_CDNS3_TRACE */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 053432d79bf7..8f18e7b6cadf 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget) struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); unsigned long flags; - spin_lock_irqsave(&ci->lock, flags); - ci->gadget.speed = USB_SPEED_UNKNOWN; - ci->remote_wakeup = 0; - ci->suspended = 0; - spin_unlock_irqrestore(&ci->lock, flags); - /* flush all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_fifo_flush(ep); @@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget) ci->status = NULL; } + spin_lock_irqsave(&ci->lock, flags); + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->remote_wakeup = 0; + ci->suspended = 0; + spin_unlock_irqrestore(&ci->lock, flags); + return 0; } @@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep) return -EBUSY; spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return 0; + } /* only internal SW should disable ctrl endpts */ @@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req, return -EINVAL; spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return 0; + } retval = _ep_queue(ep, req, gfp_flags); spin_unlock_irqrestore(hwep->lock, flags); return retval; @@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) return -EINVAL; spin_lock_irqsave(hwep->lock, flags); - - hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN) + hw_ep_flush(hwep->ci, hwep->num, hwep->dir); list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { dma_pool_free(hwep->td_pool, node->ptr, node->dma); @@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep) } spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return; + } hw_ep_flush(hwep->ci, hwep->num, hwep->dir); @@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget) int ret = 0; spin_lock_irqsave(&ci->lock, flags); + if (ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(&ci->lock, flags); + return 0; + } if (!ci->remote_wakeup) { ret = -EOPNOTSUPP; goto out; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index a7824a51f86d..70afb2ca1eab 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id) { struct wdm_device *desc = file->private_data; - wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); + wait_event(desc->wait, + /* + * needs both flags. We cannot do with one + * because resetting it would cause a race + * with write() yet we need to signal + * a disconnect + */ + !test_bit(WDM_IN_USE, &desc->flags) || + test_bit(WDM_DISCONNECTING, &desc->flags)); /* cannot dereference desc->intf if WDM_DISCONNECTING */ - if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + return -ENODEV; + if (desc->werr < 0) dev_err(&desc->intf->dev, "Error in flush path: %d\n", desc->werr); @@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf) spin_lock_irqsave(&desc->iuspin, flags); set_bit(WDM_DISCONNECTING, &desc->flags); set_bit(WDM_READ, &desc->flags); - /* to terminate pending flushes */ - clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); wake_up_all(&desc->wait); mutex_lock(&desc->rlock); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 7ff831f2fd21..dcd7066ffba2 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -2359,8 +2359,11 @@ static int usbtmc_probe(struct usb_interface *intf, goto err_put; } + retcode = -EINVAL; data->bulk_in = bulk_in->bEndpointAddress; data->wMaxPacketSize = usb_endpoint_maxp(bulk_in); + if (!data->wMaxPacketSize) + goto err_put; dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in); data->bulk_out = bulk_out->bEndpointAddress; diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig new file mode 100644 index 000000000000..d611477aae41 --- /dev/null +++ b/drivers/usb/common/Kconfig @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: GPL-2.0 + +config USB_COMMON + tristate + + +config USB_LED_TRIG + bool "USB LED Triggers" + depends on LEDS_CLASS && LEDS_TRIGGERS + select USB_COMMON + help + This option adds LED triggers for USB host and/or gadget activity. + + Say Y here if you are working on a system with led-class supported + LEDs and you want to use them as activity indicators for USB host or + gadget. + +config USB_ULPI_BUS + tristate "USB ULPI PHY interface support" + select USB_COMMON + help + UTMI+ Low Pin Interface (ULPI) is specification for a commonly used + USB 2.0 PHY interface. The ULPI specification defines a standard set + of registers that can be used to detect the vendor and product which + allows ULPI to be handled as a bus. This module is the driver for that + bus. + + The ULPI interfaces (the buses) are registered by the drivers for USB + controllers which support ULPI register access and have ULPI PHY + attached to them. The ULPI PHY drivers themselves are normal PHY + drivers. + + ULPI PHYs provide often functions such as ADP sensing/probing (OTG + protocol) and USB charger detection. + + To compile this driver as a module, choose M here: the module will + be called ulpi. + +config USB_CONN_GPIO + tristate "USB GPIO Based Connection Detection Driver" + depends on GPIOLIB + select USB_ROLE_SWITCH + help + The driver supports USB role switch between host and device via GPIO + based USB cable detection, used typically if an input GPIO is used + to detect USB ID pin, and another input GPIO may be also used to detect + Vbus pin at the same time, it also can be used to enable/disable + device if an input GPIO is only used to detect Vbus pin. + + To compile the driver as a module, choose M here: the module will + be called usb-conn-gpio.ko diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile index 0a7c45e85481..8ac4d21ef5c8 100644 --- a/drivers/usb/common/Makefile +++ b/drivers/usb/common/Makefile @@ -5,7 +5,9 @@ obj-$(CONFIG_USB_COMMON) += usb-common.o usb-common-y += common.o +usb-common-$(CONFIG_TRACING) += debug.o usb-common-$(CONFIG_USB_LED_TRIG) += led.o +obj-$(CONFIG_USB_CONN_GPIO) += usb-conn-gpio.o obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c new file mode 100644 index 000000000000..92a986aeaa5d --- /dev/null +++ b/drivers/usb/common/debug.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Common USB debugging functions + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Felipe Balbi <balbi@ti.com>, + * Sebastian Andrzej Siewior <bigeasy@linutronix.de> + */ + +#include <linux/usb/ch9.h> + +static void usb_decode_get_status(__u8 bRequestType, __u16 wIndex, + __u16 wLength, char *str, size_t size) +{ + switch (bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + snprintf(str, size, "Get Device Status(Length = %d)", wLength); + break; + case USB_RECIP_INTERFACE: + snprintf(str, size, + "Get Interface Status(Intf = %d, Length = %d)", + wIndex, wLength); + break; + case USB_RECIP_ENDPOINT: + snprintf(str, size, "Get Endpoint Status(ep%d%s)", + wIndex & ~USB_DIR_IN, + wIndex & USB_DIR_IN ? "in" : "out"); + break; + } +} + +static const char *usb_decode_device_feature(u16 wValue) +{ + switch (wValue) { + case USB_DEVICE_SELF_POWERED: + return "Self Powered"; + case USB_DEVICE_REMOTE_WAKEUP: + return "Remote Wakeup"; + case USB_DEVICE_TEST_MODE: + return "Test Mode"; + case USB_DEVICE_U1_ENABLE: + return "U1 Enable"; + case USB_DEVICE_U2_ENABLE: + return "U2 Enable"; + case USB_DEVICE_LTM_ENABLE: + return "LTM Enable"; + default: + return "UNKNOWN"; + } +} + +static const char *usb_decode_test_mode(u16 wIndex) +{ + switch (wIndex) { + case TEST_J: + return ": TEST_J"; + case TEST_K: + return ": TEST_K"; + case TEST_SE0_NAK: + return ": TEST_SE0_NAK"; + case TEST_PACKET: + return ": TEST_PACKET"; + case TEST_FORCE_EN: + return ": TEST_FORCE_EN"; + default: + return ": UNKNOWN"; + } +} + +static void usb_decode_set_clear_feature(__u8 bRequestType, + __u8 bRequest, __u16 wValue, + __u16 wIndex, char *str, size_t size) +{ + switch (bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + snprintf(str, size, "%s Device Feature(%s%s)", + bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set", + usb_decode_device_feature(wValue), + wValue == USB_DEVICE_TEST_MODE ? + usb_decode_test_mode(wIndex) : ""); + break; + case USB_RECIP_INTERFACE: + snprintf(str, size, "%s Interface Feature(%s)", + bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set", + wValue == USB_INTRF_FUNC_SUSPEND ? + "Function Suspend" : "UNKNOWN"); + break; + case USB_RECIP_ENDPOINT: + snprintf(str, size, "%s Endpoint Feature(%s ep%d%s)", + bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set", + wValue == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN", + wIndex & ~USB_DIR_IN, + wIndex & USB_DIR_IN ? "in" : "out"); + break; + } +} + +static void usb_decode_set_address(__u16 wValue, char *str, size_t size) +{ + snprintf(str, size, "Set Address(Addr = %02x)", wValue); +} + +static void usb_decode_get_set_descriptor(__u8 bRequestType, __u8 bRequest, + __u16 wValue, __u16 wIndex, + __u16 wLength, char *str, size_t size) +{ + char *s; + + switch (wValue >> 8) { + case USB_DT_DEVICE: + s = "Device"; + break; + case USB_DT_CONFIG: + s = "Configuration"; + break; + case USB_DT_STRING: + s = "String"; + break; + case USB_DT_INTERFACE: + s = "Interface"; + break; + case USB_DT_ENDPOINT: + s = "Endpoint"; + break; + case USB_DT_DEVICE_QUALIFIER: + s = "Device Qualifier"; + break; + case USB_DT_OTHER_SPEED_CONFIG: + s = "Other Speed Config"; + break; + case USB_DT_INTERFACE_POWER: + s = "Interface Power"; + break; + case USB_DT_OTG: + s = "OTG"; + break; + case USB_DT_DEBUG: + s = "Debug"; + break; + case USB_DT_INTERFACE_ASSOCIATION: + s = "Interface Association"; + break; + case USB_DT_BOS: + s = "BOS"; + break; + case USB_DT_DEVICE_CAPABILITY: + s = "Device Capability"; + break; + case USB_DT_PIPE_USAGE: + s = "Pipe Usage"; + break; + case USB_DT_SS_ENDPOINT_COMP: + s = "SS Endpoint Companion"; + break; + case USB_DT_SSP_ISOC_ENDPOINT_COMP: + s = "SSP Isochronous Endpoint Companion"; + break; + default: + s = "UNKNOWN"; + break; + } + + snprintf(str, size, "%s %s Descriptor(Index = %d, Length = %d)", + bRequest == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set", + s, wValue & 0xff, wLength); +} + +static void usb_decode_get_configuration(__u16 wLength, char *str, size_t size) +{ + snprintf(str, size, "Get Configuration(Length = %d)", wLength); +} + +static void usb_decode_set_configuration(__u8 wValue, char *str, size_t size) +{ + snprintf(str, size, "Set Configuration(Config = %d)", wValue); +} + +static void usb_decode_get_intf(__u16 wIndex, __u16 wLength, char *str, + size_t size) +{ + snprintf(str, size, "Get Interface(Intf = %d, Length = %d)", + wIndex, wLength); +} + +static void usb_decode_set_intf(__u8 wValue, __u16 wIndex, char *str, + size_t size) +{ + snprintf(str, size, "Set Interface(Intf = %d, Alt.Setting = %d)", + wIndex, wValue); +} + +static void usb_decode_synch_frame(__u16 wIndex, __u16 wLength, + char *str, size_t size) +{ + snprintf(str, size, "Synch Frame(Endpoint = %d, Length = %d)", + wIndex, wLength); +} + +static void usb_decode_set_sel(__u16 wLength, char *str, size_t size) +{ + snprintf(str, size, "Set SEL(Length = %d)", wLength); +} + +static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size) +{ + snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue); +} + +/** + * usb_decode_ctrl - returns a string representation of ctrl request + */ +const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) +{ + switch (bRequest) { + case USB_REQ_GET_STATUS: + usb_decode_get_status(bRequestType, wIndex, wLength, str, size); + break; + case USB_REQ_CLEAR_FEATURE: + case USB_REQ_SET_FEATURE: + usb_decode_set_clear_feature(bRequestType, bRequest, wValue, + wIndex, str, size); + break; + case USB_REQ_SET_ADDRESS: + usb_decode_set_address(wValue, str, size); + break; + case USB_REQ_GET_DESCRIPTOR: + case USB_REQ_SET_DESCRIPTOR: + usb_decode_get_set_descriptor(bRequestType, bRequest, wValue, + wIndex, wLength, str, size); + break; + case USB_REQ_GET_CONFIGURATION: + usb_decode_get_configuration(wLength, str, size); + break; + case USB_REQ_SET_CONFIGURATION: + usb_decode_set_configuration(wValue, str, size); + break; + case USB_REQ_GET_INTERFACE: + usb_decode_get_intf(wIndex, wLength, str, size); + break; + case USB_REQ_SET_INTERFACE: + usb_decode_set_intf(wValue, wIndex, str, size); + break; + case USB_REQ_SYNCH_FRAME: + usb_decode_synch_frame(wIndex, wLength, str, size); + break; + case USB_REQ_SET_SEL: + usb_decode_set_sel(wLength, str, size); + break; + case USB_REQ_SET_ISOCH_DELAY: + usb_decode_set_isoch_delay(wValue, str, size); + break; + default: + snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x", + bRequestType, bRequest, + (u8)(cpu_to_le16(wValue) & 0xff), + (u8)(cpu_to_le16(wValue) >> 8), + (u8)(cpu_to_le16(wIndex) & 0xff), + (u8)(cpu_to_le16(wIndex) >> 8), + (u8)(cpu_to_le16(wLength) & 0xff), + (u8)(cpu_to_le16(wLength) >> 8)); + } + + return str; +} +EXPORT_SYMBOL_GPL(usb_decode_ctrl); diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c new file mode 100644 index 000000000000..87338f9eb5be --- /dev/null +++ b/drivers/usb/common/usb-conn-gpio.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB GPIO Based Connection Detection Driver + * + * Copyright (C) 2019 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + * + * Some code borrowed from drivers/extcon/extcon-usb-gpio.c + */ + +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/usb/role.h> + +#define USB_GPIO_DEB_MS 20 /* ms */ +#define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */ + +#define USB_CONN_IRQF \ + (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT) + +struct usb_conn_info { + struct device *dev; + struct usb_role_switch *role_sw; + enum usb_role last_role; + struct regulator *vbus; + struct delayed_work dw_det; + unsigned long debounce_jiffies; + + struct gpio_desc *id_gpiod; + struct gpio_desc *vbus_gpiod; + int id_irq; + int vbus_irq; +}; + +/** + * "DEVICE" = VBUS and "HOST" = !ID, so we have: + * Both "DEVICE" and "HOST" can't be set as active at the same time + * so if "HOST" is active (i.e. ID is 0) we keep "DEVICE" inactive + * even if VBUS is on. + * + * Role | ID | VBUS + * ------------------------------------ + * [1] DEVICE | H | H + * [2] NONE | H | L + * [3] HOST | L | H + * [4] HOST | L | L + * + * In case we have only one of these signals: + * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1 + * - ID only - we want to distinguish between [1] and [4], so VBUS = ID + */ +static void usb_conn_detect_cable(struct work_struct *work) +{ + struct usb_conn_info *info; + enum usb_role role; + int id, vbus, ret; + + info = container_of(to_delayed_work(work), + struct usb_conn_info, dw_det); + + /* check ID and VBUS */ + id = info->id_gpiod ? + gpiod_get_value_cansleep(info->id_gpiod) : 1; + vbus = info->vbus_gpiod ? + gpiod_get_value_cansleep(info->vbus_gpiod) : id; + + if (!id) + role = USB_ROLE_HOST; + else if (vbus) + role = USB_ROLE_DEVICE; + else + role = USB_ROLE_NONE; + + dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n", + info->last_role, role, id, vbus); + + if (info->last_role == role) { + dev_warn(info->dev, "repeated role: %d\n", role); + return; + } + + if (info->last_role == USB_ROLE_HOST) + regulator_disable(info->vbus); + + ret = usb_role_switch_set_role(info->role_sw, role); + if (ret) + dev_err(info->dev, "failed to set role: %d\n", ret); + + if (role == USB_ROLE_HOST) { + ret = regulator_enable(info->vbus); + if (ret) + dev_err(info->dev, "enable vbus regulator failed\n"); + } + + info->last_role = role; + + dev_dbg(info->dev, "vbus regulator is %s\n", + regulator_is_enabled(info->vbus) ? "enabled" : "disabled"); +} + +static void usb_conn_queue_dwork(struct usb_conn_info *info, + unsigned long delay) +{ + queue_delayed_work(system_power_efficient_wq, &info->dw_det, delay); +} + +static irqreturn_t usb_conn_isr(int irq, void *dev_id) +{ + struct usb_conn_info *info = dev_id; + + usb_conn_queue_dwork(info, info->debounce_jiffies); + + return IRQ_HANDLED; +} + +static int usb_conn_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct usb_conn_info *info; + int ret = 0; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->dev = dev; + info->id_gpiod = devm_gpiod_get_optional(dev, "id", GPIOD_IN); + if (IS_ERR(info->id_gpiod)) + return PTR_ERR(info->id_gpiod); + + info->vbus_gpiod = devm_gpiod_get_optional(dev, "vbus", GPIOD_IN); + if (IS_ERR(info->vbus_gpiod)) + return PTR_ERR(info->vbus_gpiod); + + if (!info->id_gpiod && !info->vbus_gpiod) { + dev_err(dev, "failed to get gpios\n"); + return -ENODEV; + } + + if (info->id_gpiod) + ret = gpiod_set_debounce(info->id_gpiod, USB_GPIO_DEB_US); + if (!ret && info->vbus_gpiod) + ret = gpiod_set_debounce(info->vbus_gpiod, USB_GPIO_DEB_US); + if (ret < 0) + info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEB_MS); + + INIT_DELAYED_WORK(&info->dw_det, usb_conn_detect_cable); + + info->vbus = devm_regulator_get(dev, "vbus"); + if (IS_ERR(info->vbus)) { + dev_err(dev, "failed to get vbus\n"); + return PTR_ERR(info->vbus); + } + + info->role_sw = usb_role_switch_get(dev); + if (IS_ERR(info->role_sw)) { + if (PTR_ERR(info->role_sw) != -EPROBE_DEFER) + dev_err(dev, "failed to get role switch\n"); + + return PTR_ERR(info->role_sw); + } + + if (info->id_gpiod) { + info->id_irq = gpiod_to_irq(info->id_gpiod); + if (info->id_irq < 0) { + dev_err(dev, "failed to get ID IRQ\n"); + ret = info->id_irq; + goto put_role_sw; + } + + ret = devm_request_threaded_irq(dev, info->id_irq, NULL, + usb_conn_isr, USB_CONN_IRQF, + pdev->name, info); + if (ret < 0) { + dev_err(dev, "failed to request ID IRQ\n"); + goto put_role_sw; + } + } + + if (info->vbus_gpiod) { + info->vbus_irq = gpiod_to_irq(info->vbus_gpiod); + if (info->vbus_irq < 0) { + dev_err(dev, "failed to get VBUS IRQ\n"); + ret = info->vbus_irq; + goto put_role_sw; + } + + ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL, + usb_conn_isr, USB_CONN_IRQF, + pdev->name, info); + if (ret < 0) { + dev_err(dev, "failed to request VBUS IRQ\n"); + goto put_role_sw; + } + } + + platform_set_drvdata(pdev, info); + + /* Perform initial detection */ + usb_conn_queue_dwork(info, 0); + + return 0; + +put_role_sw: + usb_role_switch_put(info->role_sw); + return ret; +} + +static int usb_conn_remove(struct platform_device *pdev) +{ + struct usb_conn_info *info = platform_get_drvdata(pdev); + + cancel_delayed_work_sync(&info->dw_det); + + if (info->last_role == USB_ROLE_HOST) + regulator_disable(info->vbus); + + usb_role_switch_put(info->role_sw); + + return 0; +} + +static int __maybe_unused usb_conn_suspend(struct device *dev) +{ + struct usb_conn_info *info = dev_get_drvdata(dev); + + if (info->id_gpiod) + disable_irq(info->id_irq); + if (info->vbus_gpiod) + disable_irq(info->vbus_irq); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int __maybe_unused usb_conn_resume(struct device *dev) +{ + struct usb_conn_info *info = dev_get_drvdata(dev); + + pinctrl_pm_select_default_state(dev); + + if (info->id_gpiod) + enable_irq(info->id_irq); + if (info->vbus_gpiod) + enable_irq(info->vbus_irq); + + usb_conn_queue_dwork(info, 0); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(usb_conn_pm_ops, + usb_conn_suspend, usb_conn_resume); + +static const struct of_device_id usb_conn_dt_match[] = { + { .compatible = "gpio-usb-b-connector", }, + { } +}; +MODULE_DEVICE_TABLE(of, usb_conn_dt_match); + +static struct platform_driver usb_conn_driver = { + .probe = usb_conn_probe, + .remove = usb_conn_remove, + .driver = { + .name = "usb-conn-gpio", + .pm = &usb_conn_pm_ops, + .of_match_table = usb_conn_dt_match, + }, +}; + +module_platform_driver(usb_conn_driver); + +MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>"); +MODULE_DESCRIPTION("USB GPIO based connection detection driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 9d6cb709ca7b..151a74a54386 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; struct usb_ssp_cap_descriptor *ssp_cap; - unsigned char *buffer; + unsigned char *buffer, *buffer0; int length, total_len, num, i, ssac; __u8 cap_type; int ret; @@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) ret = -ENOMSG; goto err; } + + buffer0 = buffer; total_len -= length; + buffer += length; for (i = 0; i < num; i++) { - buffer += length; cap = (struct usb_dev_cap_header *)buffer; if (total_len < sizeof(*cap) || total_len < cap->bLength) { @@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev) break; } - total_len -= length; - if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { dev_warn(ddev, "descriptor type invalid, skip\n"); continue; @@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) default: break; } + + total_len -= length; + buffer += length; } + dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0); return 0; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 63f23f567680..3f899552f6e3 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -972,17 +972,11 @@ error: return ret; } -static int match_devt(struct device *dev, const void *data) -{ - return dev->devt == (dev_t)(unsigned long)(void *)data; -} - static struct usb_device *usbdev_lookup_by_devt(dev_t devt) { struct device *dev; - dev = bus_find_device(&usb_bus_type, NULL, - (void *) (unsigned long) devt, match_devt); + dev = bus_find_device_by_devt(&usb_bus_type, devt); if (!dev) return NULL; return to_usb_device(dev); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 03bee698d7eb..9e26b0143a59 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) /* EHCI, OHCI */ hcd->rsrc_start = pci_resource_start(dev, 0); hcd->rsrc_len = pci_resource_len(dev, 0); - if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) { + if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len, driver->description)) { dev_dbg(&dev->dev, "controller already in use\n"); retval = -EBUSY; goto put_hcd; } - hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); retval = -EFAULT; - goto release_mem_region; + goto put_hcd; } } else { @@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) hcd->rsrc_start = pci_resource_start(dev, region); hcd->rsrc_len = pci_resource_len(dev, region); - if (request_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) + if (devm_request_region(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { @@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (retval != 0) - goto unmap_registers; + goto put_hcd; device_wakeup_enable(hcd->self.controller); if (pci_dev_run_wake(dev)) pm_runtime_put_noidle(&dev->dev); return retval; -unmap_registers: - if (driver->flags & HCD_MEMORY) { - iounmap(hcd->regs); -release_mem_region: - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - } else - release_region(hcd->rsrc_start, hcd->rsrc_len); put_hcd: usb_put_hcd(hcd); disable_pci: @@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev) dev_set_drvdata(&dev->dev, NULL); up_read(&companions_rwsem); } - - if (hcd->driver->flags & HCD_MEMORY) { - iounmap(hcd->regs); - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - } else { - release_region(hcd->rsrc_start, hcd->rsrc_len); - } - usb_put_hcd(hcd); pci_disable_device(dev); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index add2af4af766..f225eaa98ff8 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1249,9 +1249,6 @@ EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep); * To support host controllers with limited dma capabilities we provide dma * bounce buffers. This feature can be enabled by initializing * hcd->localmem_pool using usb_hcd_setup_local_mem(). - * For this to work properly the host controller code must first use the - * function dma_declare_coherent_memory() to point out which memory area - * that should be used for dma allocations. * * The initialized hcd->localmem_pool then tells the usb code to allocate all * data for dma using the genalloc API. @@ -2191,6 +2188,9 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) hcd->state = HC_STATE_RESUMING; status = hcd->driver->bus_resume(hcd); clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); + if (status == 0) + status = usb_phy_roothub_calibrate(hcd->phy_roothub); + if (status == 0) { struct usb_device *udev; int port1; @@ -2763,6 +2763,10 @@ int usb_add_hcd(struct usb_hcd *hcd, } hcd->rh_pollable = 1; + retval = usb_phy_roothub_calibrate(hcd->phy_roothub); + if (retval) + goto err_hcd_driver_setup; + /* NOTE: root hub and controller capabilities may not be the same */ if (device_can_wakeup(hcd->self.controller) && device_can_wakeup(&hcd->self.root_hub->dev)) diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c index 7580493b867a..fb1588e7c282 100644 --- a/drivers/usb/core/phy.c +++ b/drivers/usb/core/phy.c @@ -151,6 +151,27 @@ err_out: } EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode); +int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub) +{ + struct usb_phy_roothub *roothub_entry; + struct list_head *head; + int err; + + if (!phy_roothub) + return 0; + + head = &phy_roothub->list; + + list_for_each_entry(roothub_entry, head, list) { + err = phy_calibrate(roothub_entry->phy); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate); + int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h index dad564e2d2d4..20a267cd986b 100644 --- a/drivers/usb/core/phy.h +++ b/drivers/usb/core/phy.h @@ -18,6 +18,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub); int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub, enum phy_mode mode); +int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub); int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub); void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub); diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 0ab8738047da..f16c26dc079d 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -933,228 +933,6 @@ void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, } EXPORT_SYMBOL_GPL(usb_free_coherent); -/** - * usb_buffer_map - create DMA mapping(s) for an urb - * @urb: urb whose transfer_buffer/setup_packet will be mapped - * - * URB_NO_TRANSFER_DMA_MAP is added to urb->transfer_flags if the operation - * succeeds. If the device is connected to this system through a non-DMA - * controller, this operation always succeeds. - * - * This call would normally be used for an urb which is reused, perhaps - * as the target of a large periodic transfer, with usb_buffer_dmasync() - * calls to synchronize memory and dma state. - * - * Reverse the effect of this call with usb_buffer_unmap(). - * - * Return: Either %NULL (indicating no buffer could be mapped), or @urb. - * - */ -#if 0 -struct urb *usb_buffer_map(struct urb *urb) -{ - struct usb_bus *bus; - struct device *controller; - - if (!urb - || !urb->dev - || !(bus = urb->dev->bus) - || !(controller = bus->sysdev)) - return NULL; - - if (controller->dma_mask) { - urb->transfer_dma = dma_map_single(controller, - urb->transfer_buffer, urb->transfer_buffer_length, - usb_pipein(urb->pipe) - ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - /* FIXME generic api broken like pci, can't report errors */ - /* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */ - } else - urb->transfer_dma = ~0; - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - return urb; -} -EXPORT_SYMBOL_GPL(usb_buffer_map); -#endif /* 0 */ - -/* XXX DISABLED, no users currently. If you wish to re-enable this - * XXX please determine whether the sync is to transfer ownership of - * XXX the buffer from device to cpu or vice verse, and thusly use the - * XXX appropriate _for_{cpu,device}() method. -DaveM - */ -#if 0 - -/** - * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) - * @urb: urb whose transfer_buffer/setup_packet will be synchronized - */ -void usb_buffer_dmasync(struct urb *urb) -{ - struct usb_bus *bus; - struct device *controller; - - if (!urb - || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) - || !urb->dev - || !(bus = urb->dev->bus) - || !(controller = bus->sysdev)) - return; - - if (controller->dma_mask) { - dma_sync_single_for_cpu(controller, - urb->transfer_dma, urb->transfer_buffer_length, - usb_pipein(urb->pipe) - ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - if (usb_pipecontrol(urb->pipe)) - dma_sync_single_for_cpu(controller, - urb->setup_dma, - sizeof(struct usb_ctrlrequest), - DMA_TO_DEVICE); - } -} -EXPORT_SYMBOL_GPL(usb_buffer_dmasync); -#endif - -/** - * usb_buffer_unmap - free DMA mapping(s) for an urb - * @urb: urb whose transfer_buffer will be unmapped - * - * Reverses the effect of usb_buffer_map(). - */ -#if 0 -void usb_buffer_unmap(struct urb *urb) -{ - struct usb_bus *bus; - struct device *controller; - - if (!urb - || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) - || !urb->dev - || !(bus = urb->dev->bus) - || !(controller = bus->sysdev)) - return; - - if (controller->dma_mask) { - dma_unmap_single(controller, - urb->transfer_dma, urb->transfer_buffer_length, - usb_pipein(urb->pipe) - ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - } - urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; -} -EXPORT_SYMBOL_GPL(usb_buffer_unmap); -#endif /* 0 */ - -#if 0 -/** - * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint - * @dev: device to which the scatterlist will be mapped - * @is_in: mapping transfer direction - * @sg: the scatterlist to map - * @nents: the number of entries in the scatterlist - * - * Return: Either < 0 (indicating no buffers could be mapped), or the - * number of DMA mapping array entries in the scatterlist. - * - * Note: - * The caller is responsible for placing the resulting DMA addresses from - * the scatterlist into URB transfer buffer pointers, and for setting the - * URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs. - * - * Top I/O rates come from queuing URBs, instead of waiting for each one - * to complete before starting the next I/O. This is particularly easy - * to do with scatterlists. Just allocate and submit one URB for each DMA - * mapping entry returned, stopping on the first error or when all succeed. - * Better yet, use the usb_sg_*() calls, which do that (and more) for you. - * - * This call would normally be used when translating scatterlist requests, - * rather than usb_buffer_map(), since on some hardware (with IOMMUs) it - * may be able to coalesce mappings for improved I/O efficiency. - * - * Reverse the effect of this call with usb_buffer_unmap_sg(). - */ -int usb_buffer_map_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int nents) -{ - struct usb_bus *bus; - struct device *controller; - - if (!dev - || !(bus = dev->bus) - || !(controller = bus->sysdev) - || !controller->dma_mask) - return -EINVAL; - - /* FIXME generic api broken like pci, can't report errors */ - return dma_map_sg(controller, sg, nents, - is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM; -} -EXPORT_SYMBOL_GPL(usb_buffer_map_sg); -#endif - -/* XXX DISABLED, no users currently. If you wish to re-enable this - * XXX please determine whether the sync is to transfer ownership of - * XXX the buffer from device to cpu or vice verse, and thusly use the - * XXX appropriate _for_{cpu,device}() method. -DaveM - */ -#if 0 - -/** - * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s) - * @dev: device to which the scatterlist will be mapped - * @is_in: mapping transfer direction - * @sg: the scatterlist to synchronize - * @n_hw_ents: the positive return value from usb_buffer_map_sg - * - * Use this when you are re-using a scatterlist's data buffers for - * another USB request. - */ -void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int n_hw_ents) -{ - struct usb_bus *bus; - struct device *controller; - - if (!dev - || !(bus = dev->bus) - || !(controller = bus->sysdev) - || !controller->dma_mask) - return; - - dma_sync_sg_for_cpu(controller, sg, n_hw_ents, - is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); -} -EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); -#endif - -#if 0 -/** - * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist - * @dev: device to which the scatterlist will be mapped - * @is_in: mapping transfer direction - * @sg: the scatterlist to unmap - * @n_hw_ents: the positive return value from usb_buffer_map_sg - * - * Reverses the effect of usb_buffer_map_sg(). - */ -void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int n_hw_ents) -{ - struct usb_bus *bus; - struct device *controller; - - if (!dev - || !(bus = dev->bus) - || !(controller = bus->sysdev) - || !controller->dma_mask) - return; - - dma_unmap_sg(controller, sg, n_hw_ents, - is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); -} -EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg); -#endif - /* * Notifications of device and interface registration */ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index bff48a8a1984..6be10e496e10 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3224,14 +3224,15 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *ep, int result) { - struct dwc2_hsotg_req *req, *treq; unsigned int size; ep->req = NULL; - list_for_each_entry_safe(req, treq, &ep->queue, queue) - dwc2_hsotg_complete_request(hsotg, ep, req, - result); + while (!list_empty(&ep->queue)) { + struct dwc2_hsotg_req *req = get_ep_head(ep); + + dwc2_hsotg_complete_request(hsotg, ep, req, result); + } if (!hsotg->dedicated_fifos) return; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 98bce85c29d0..999ce5e84d3c 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -168,7 +168,6 @@ static void __dwc3_set_mode(struct work_struct *work) otg_set_vbus(dwc->usb2_phy->otg, true); phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); - phy_calibrate(dwc->usb2_generic_phy); } break; case DWC3_GCTL_PRTCAP_DEVICE: @@ -252,12 +251,25 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) reg |= DWC3_DCTL_CSFTRST; dwc3_writel(dwc->regs, DWC3_DCTL, reg); + /* + * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit + * is cleared only after all the clocks are synchronized. This can + * take a little more than 50ms. Set the polling rate at 20ms + * for 10 times instead. + */ + if (dwc3_is_usb31(dwc) && dwc->revision >= DWC3_USB31_REVISION_190A) + retries = 10; + do { reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (!(reg & DWC3_DCTL_CSFTRST)) goto done; - udelay(1); + if (dwc3_is_usb31(dwc) && + dwc->revision >= DWC3_USB31_REVISION_190A) + msleep(20); + else + udelay(1); } while (--retries); phy_exit(dwc->usb3_generic_phy); @@ -267,11 +279,11 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) done: /* - * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared, - * we must wait at least 50ms before accessing the PHY domain - * (synchronization delay). DWC_usb31 programming guide section 1.3.2. + * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit + * is cleared, we must wait at least 50ms before accessing the PHY + * domain (synchronization delay). */ - if (dwc3_is_usb31(dwc)) + if (dwc3_is_usb31(dwc) && dwc->revision <= DWC3_USB31_REVISION_180A) msleep(50); return 0; @@ -686,8 +698,7 @@ static void dwc3_core_exit(struct dwc3 *dwc) usb_phy_set_suspend(dwc->usb3_phy, 1); phy_power_off(dwc->usb2_generic_phy); phy_power_off(dwc->usb3_generic_phy); - clk_bulk_disable(dwc->num_clks, dwc->clks); - clk_bulk_unprepare(dwc->num_clks, dwc->clks); + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); reset_control_assert(dwc->reset); } @@ -1165,7 +1176,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) dev_err(dev, "failed to initialize host\n"); return ret; } - phy_calibrate(dwc->usb2_generic_phy); break; case USB_DR_MODE_OTG: INIT_WORK(&dwc->drd_work, __dwc3_set_mode); @@ -1309,8 +1319,7 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; - dwc->hird_threshold = hird_threshold - | (dwc->is_utmi_l1_suspend << 4); + dwc->hird_threshold = hird_threshold; dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd; dwc->rx_max_burst_prd = rx_max_burst_prd; @@ -1435,7 +1444,7 @@ static int dwc3_probe(struct platform_device *pdev) if (dev->of_node) { dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); - ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); + ret = devm_clk_bulk_get(dev, dwc->num_clks, dwc->clks); if (ret == -EPROBE_DEFER) return ret; /* @@ -1448,16 +1457,12 @@ static int dwc3_probe(struct platform_device *pdev) ret = reset_control_deassert(dwc->reset); if (ret) - goto put_clks; + return ret; - ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); + ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks); if (ret) goto assert_reset; - ret = clk_bulk_enable(dwc->num_clks, dwc->clks); - if (ret) - goto unprepare_clks; - if (!dwc3_core_is_valid(dwc)) { dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); ret = -ENODEV; @@ -1530,13 +1535,9 @@ err1: pm_runtime_disable(&pdev->dev); disable_clks: - clk_bulk_disable(dwc->num_clks, dwc->clks); -unprepare_clks: - clk_bulk_unprepare(dwc->num_clks, dwc->clks); + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); assert_reset: reset_control_assert(dwc->reset); -put_clks: - clk_bulk_put(dwc->num_clks, dwc->clks); return ret; } @@ -1559,7 +1560,6 @@ static int dwc3_remove(struct platform_device *pdev) dwc3_free_event_buffers(dwc); dwc3_free_scratch_buffers(dwc); - clk_bulk_put(dwc->num_clks, dwc->clks); return 0; } @@ -1573,14 +1573,10 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc) if (ret) return ret; - ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); + ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks); if (ret) goto assert_reset; - ret = clk_bulk_enable(dwc->num_clks, dwc->clks); - if (ret) - goto unprepare_clks; - ret = dwc3_core_init(dwc); if (ret) goto disable_clks; @@ -1588,9 +1584,7 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc) return 0; disable_clks: - clk_bulk_disable(dwc->num_clks, dwc->clks); -unprepare_clks: - clk_bulk_unprepare(dwc->num_clks, dwc->clks); + clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); assert_reset: reset_control_assert(dwc->reset); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 3dd783b889cb..1c8b349379af 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1137,6 +1137,8 @@ struct dwc3 { #define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31) #define DWC3_USB31_REVISION_160A (0x3136302a | DWC3_REVISION_IS_DWC31) #define DWC3_USB31_REVISION_170A (0x3137302a | DWC3_REVISION_IS_DWC31) +#define DWC3_USB31_REVISION_180A (0x3138302a | DWC3_REVISION_IS_DWC31) +#define DWC3_USB31_REVISION_190A (0x3139302a | DWC3_REVISION_IS_DWC31) u32 version_type; diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 068259fdfb0c..9baabed87d61 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -246,258 +246,6 @@ static inline const char *dwc3_gadget_event_string(char *str, size_t size, return str; } -static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str, - size_t size) -{ - switch (t & USB_RECIP_MASK) { - case USB_RECIP_DEVICE: - snprintf(str, size, "Get Device Status(Length = %d)", l); - break; - case USB_RECIP_INTERFACE: - snprintf(str, size, "Get Interface Status(Intf = %d, Length = %d)", - i, l); - break; - case USB_RECIP_ENDPOINT: - snprintf(str, size, "Get Endpoint Status(ep%d%s)", - i & ~USB_DIR_IN, - i & USB_DIR_IN ? "in" : "out"); - break; - } -} - -static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v, - __u16 i, char *str, size_t size) -{ - switch (t & USB_RECIP_MASK) { - case USB_RECIP_DEVICE: - snprintf(str, size, "%s Device Feature(%s%s)", - b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set", - ({char *s; - switch (v) { - case USB_DEVICE_SELF_POWERED: - s = "Self Powered"; - break; - case USB_DEVICE_REMOTE_WAKEUP: - s = "Remote Wakeup"; - break; - case USB_DEVICE_TEST_MODE: - s = "Test Mode"; - break; - case USB_DEVICE_U1_ENABLE: - s = "U1 Enable"; - break; - case USB_DEVICE_U2_ENABLE: - s = "U2 Enable"; - break; - case USB_DEVICE_LTM_ENABLE: - s = "LTM Enable"; - break; - default: - s = "UNKNOWN"; - } s; }), - v == USB_DEVICE_TEST_MODE ? - ({ char *s; - switch (i) { - case TEST_J: - s = ": TEST_J"; - break; - case TEST_K: - s = ": TEST_K"; - break; - case TEST_SE0_NAK: - s = ": TEST_SE0_NAK"; - break; - case TEST_PACKET: - s = ": TEST_PACKET"; - break; - case TEST_FORCE_EN: - s = ": TEST_FORCE_EN"; - break; - default: - s = ": UNKNOWN"; - } s; }) : ""); - break; - case USB_RECIP_INTERFACE: - snprintf(str, size, "%s Interface Feature(%s)", - b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set", - v == USB_INTRF_FUNC_SUSPEND ? - "Function Suspend" : "UNKNOWN"); - break; - case USB_RECIP_ENDPOINT: - snprintf(str, size, "%s Endpoint Feature(%s ep%d%s)", - b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set", - v == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN", - i & ~USB_DIR_IN, - i & USB_DIR_IN ? "in" : "out"); - break; - } -} - -static inline void dwc3_decode_set_address(__u16 v, char *str, size_t size) -{ - snprintf(str, size, "Set Address(Addr = %02x)", v); -} - -static inline void dwc3_decode_get_set_descriptor(__u8 t, __u8 b, __u16 v, - __u16 i, __u16 l, char *str, size_t size) -{ - snprintf(str, size, "%s %s Descriptor(Index = %d, Length = %d)", - b == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set", - ({ char *s; - switch (v >> 8) { - case USB_DT_DEVICE: - s = "Device"; - break; - case USB_DT_CONFIG: - s = "Configuration"; - break; - case USB_DT_STRING: - s = "String"; - break; - case USB_DT_INTERFACE: - s = "Interface"; - break; - case USB_DT_ENDPOINT: - s = "Endpoint"; - break; - case USB_DT_DEVICE_QUALIFIER: - s = "Device Qualifier"; - break; - case USB_DT_OTHER_SPEED_CONFIG: - s = "Other Speed Config"; - break; - case USB_DT_INTERFACE_POWER: - s = "Interface Power"; - break; - case USB_DT_OTG: - s = "OTG"; - break; - case USB_DT_DEBUG: - s = "Debug"; - break; - case USB_DT_INTERFACE_ASSOCIATION: - s = "Interface Association"; - break; - case USB_DT_BOS: - s = "BOS"; - break; - case USB_DT_DEVICE_CAPABILITY: - s = "Device Capability"; - break; - case USB_DT_PIPE_USAGE: - s = "Pipe Usage"; - break; - case USB_DT_SS_ENDPOINT_COMP: - s = "SS Endpoint Companion"; - break; - case USB_DT_SSP_ISOC_ENDPOINT_COMP: - s = "SSP Isochronous Endpoint Companion"; - break; - default: - s = "UNKNOWN"; - break; - } s; }), v & 0xff, l); -} - - -static inline void dwc3_decode_get_configuration(__u16 l, char *str, - size_t size) -{ - snprintf(str, size, "Get Configuration(Length = %d)", l); -} - -static inline void dwc3_decode_set_configuration(__u8 v, char *str, size_t size) -{ - snprintf(str, size, "Set Configuration(Config = %d)", v); -} - -static inline void dwc3_decode_get_intf(__u16 i, __u16 l, char *str, - size_t size) -{ - snprintf(str, size, "Get Interface(Intf = %d, Length = %d)", i, l); -} - -static inline void dwc3_decode_set_intf(__u8 v, __u16 i, char *str, size_t size) -{ - snprintf(str, size, "Set Interface(Intf = %d, Alt.Setting = %d)", i, v); -} - -static inline void dwc3_decode_synch_frame(__u16 i, __u16 l, char *str, - size_t size) -{ - snprintf(str, size, "Synch Frame(Endpoint = %d, Length = %d)", i, l); -} - -static inline void dwc3_decode_set_sel(__u16 l, char *str, size_t size) -{ - snprintf(str, size, "Set SEL(Length = %d)", l); -} - -static inline void dwc3_decode_set_isoch_delay(__u8 v, char *str, size_t size) -{ - snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", v); -} - -/** - * dwc3_decode_ctrl - returns a string represetion of ctrl request - */ -static inline const char *dwc3_decode_ctrl(char *str, size_t size, - __u8 bRequestType, __u8 bRequest, __u16 wValue, __u16 wIndex, - __u16 wLength) -{ - switch (bRequest) { - case USB_REQ_GET_STATUS: - dwc3_decode_get_status(bRequestType, wIndex, wLength, str, - size); - break; - case USB_REQ_CLEAR_FEATURE: - case USB_REQ_SET_FEATURE: - dwc3_decode_set_clear_feature(bRequestType, bRequest, wValue, - wIndex, str, size); - break; - case USB_REQ_SET_ADDRESS: - dwc3_decode_set_address(wValue, str, size); - break; - case USB_REQ_GET_DESCRIPTOR: - case USB_REQ_SET_DESCRIPTOR: - dwc3_decode_get_set_descriptor(bRequestType, bRequest, wValue, - wIndex, wLength, str, size); - break; - case USB_REQ_GET_CONFIGURATION: - dwc3_decode_get_configuration(wLength, str, size); - break; - case USB_REQ_SET_CONFIGURATION: - dwc3_decode_set_configuration(wValue, str, size); - break; - case USB_REQ_GET_INTERFACE: - dwc3_decode_get_intf(wIndex, wLength, str, size); - break; - case USB_REQ_SET_INTERFACE: - dwc3_decode_set_intf(wValue, wIndex, str, size); - break; - case USB_REQ_SYNCH_FRAME: - dwc3_decode_synch_frame(wIndex, wLength, str, size); - break; - case USB_REQ_SET_SEL: - dwc3_decode_set_sel(wLength, str, size); - break; - case USB_REQ_SET_ISOCH_DELAY: - dwc3_decode_set_isoch_delay(wValue, str, size); - break; - default: - snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x", - bRequestType, bRequest, - cpu_to_le16(wValue) & 0xff, - cpu_to_le16(wValue) >> 8, - cpu_to_le16(wIndex) & 0xff, - cpu_to_le16(wIndex) >> 8, - cpu_to_le16(wLength) & 0xff, - cpu_to_le16(wLength) >> 8); - } - - return str; -} - /** * dwc3_ep_event_string - returns event name * @event: then event code diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index a69eb4a7b832..1e14a6f4884b 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c @@ -81,7 +81,6 @@ static int kdwc3_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; struct dwc3_keystone *kdwc; - struct resource *res; int error, irq; kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL); @@ -92,8 +91,7 @@ static int kdwc3_probe(struct platform_device *pdev) kdwc->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - kdwc->usbss = devm_ioremap_resource(dev, res); + kdwc->usbss = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(kdwc->usbss)) return PTR_ERR(kdwc->usbss); diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c index d73ccd9e1366..8a3ec1a951fe 100644 --- a/drivers/usb/dwc3/dwc3-meson-g12a.c +++ b/drivers/usb/dwc3/dwc3-meson-g12a.c @@ -562,7 +562,13 @@ static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev) static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev) { struct dwc3_meson_g12a *priv = dev_get_drvdata(dev); - int i; + int i, ret; + + if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) { + ret = regulator_disable(priv->vbus); + if (ret) + return ret; + } for (i = 0 ; i < PHY_COUNT ; ++i) { phy_power_off(priv->phys[i]); @@ -597,6 +603,12 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev) return ret; } + if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) { + ret = regulator_enable(priv->vbus); + if (ret) + return ret; + } + return 0; } diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 6f711d58d82f..8c3de2d258bf 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -14,7 +14,6 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/platform_device.h> -#include <linux/platform_data/dwc3-omap.h> #include <linux/pm_runtime.h> #include <linux/dma-mapping.h> #include <linux/ioport.h> @@ -106,6 +105,12 @@ #define USBOTGSS_UTMI_OTG_CTRL_SESSVALID BIT(2) #define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID BIT(1) +enum dwc3_omap_utmi_mode { + DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, + DWC3_OMAP_UTMI_MODE_HW, + DWC3_OMAP_UTMI_MODE_SW, +}; + struct dwc3_omap { struct device *dev; @@ -446,7 +451,6 @@ static int dwc3_omap_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct dwc3_omap *omap; - struct resource *res; struct device *dev = &pdev->dev; struct regulator *vbus_reg = NULL; @@ -472,8 +476,7 @@ static int dwc3_omap_probe(struct platform_device *pdev) if (irq < 0) return irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 16081383c401..c682420f25ca 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -255,24 +255,26 @@ static int st_dwc3_probe(struct platform_device *pdev) if (!child) { dev_err(&pdev->dev, "failed to find dwc3 core node\n"); ret = -ENODEV; - goto undo_softreset; + goto err_node_put; } /* Allocate and initialize the core */ ret = of_platform_populate(node, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to add dwc3 core\n"); - goto undo_softreset; + goto err_node_put; } child_pdev = of_find_device_by_node(child); if (!child_pdev) { dev_err(dev, "failed to find dwc3 core device\n"); ret = -ENODEV; - goto undo_softreset; + goto err_node_put; } dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev); + of_node_put(child); + of_dev_put(child_pdev); /* * Configure the USB port as device or host according to the static @@ -292,6 +294,8 @@ static int st_dwc3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dwc3_data); return 0; +err_node_put: + of_node_put(child); undo_softreset: reset_control_assert(dwc3_data->rstc_rst); undo_powerdown: diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 173f5329d3d9..8adb59f8e4f1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2078,6 +2078,26 @@ static void dwc3_gadget_config_params(struct usb_gadget *g, { struct dwc3 *dwc = gadget_to_dwc(g); + params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED; + params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED; + + /* Recommended BESL */ + if (!dwc->dis_enblslpm_quirk) { + /* + * If the recommended BESL baseline is 0 or if the BESL deep is + * less than 2, Microsoft's Windows 10 host usb stack will issue + * a usb reset immediately after it receives the extended BOS + * descriptor and the enumeration will fail. To maintain + * compatibility with the Windows' usb stack, let's set the + * recommended BESL baseline to 1 and clamp the BESL deep to be + * within 2 to 15. + */ + params->besl_baseline = 1; + if (dwc->is_utmi_l1_suspend) + params->besl_deep = + clamp_t(u8, dwc->hird_threshold, 2, 15); + } + /* U1 Device exit Latency */ if (dwc->dis_u1_entry_quirk) params->bU1devExitLat = 0; @@ -2868,7 +2888,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); - reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold); + reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold | + (dwc->is_utmi_l1_suspend << 4)); /* * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and @@ -3318,7 +3339,6 @@ int dwc3_gadget_init(struct dwc3 *dwc) dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; dwc->gadget.name = "dwc3-gadget"; - dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; dwc->gadget.lpm_capable = true; /* diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index f55947294f7c..8deea8c91e03 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -85,7 +85,7 @@ int dwc3_host_init(struct dwc3 *dwc) DWC3_XHCI_RESOURCES_NUM); if (ret) { dev_err(dwc->dev, "couldn't add resources to xHCI device\n"); - goto err1; + goto err; } memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); @@ -112,37 +112,23 @@ int dwc3_host_init(struct dwc3 *dwc) ret = platform_device_add_properties(xhci, props); if (ret) { dev_err(dwc->dev, "failed to add properties to xHCI\n"); - goto err1; + goto err; } } - phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(dwc->dev)); - phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(dwc->dev)); - ret = platform_device_add(xhci); if (ret) { dev_err(dwc->dev, "failed to register xHCI device\n"); - goto err2; + goto err; } return 0; -err2: - phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(dwc->dev)); - phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(dwc->dev)); -err1: +err: platform_device_put(xhci); return ret; } void dwc3_host_exit(struct dwc3 *dwc) { - phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(dwc->dev)); - phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(dwc->dev)); platform_device_unregister(dwc->xhci); } diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index 818a63da1a44..9edff17111f7 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl, __entry->wIndex = le16_to_cpu(ctrl->wIndex); __entry->wLength = le16_to_cpu(ctrl->wLength); ), - TP_printk("%s", dwc3_decode_ctrl(__get_str(str), DWC3_MSG_MAX, + TP_printk("%s", usb_decode_ctrl(__get_str(str), DWC3_MSG_MAX, __entry->bRequestType, __entry->bRequest, __entry->wValue, __entry->wIndex, __entry->wLength) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 76883ff4f5bb..d516e8d6cd7f 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -612,6 +612,7 @@ static int bos_desc(struct usb_composite_dev *cdev) struct usb_ext_cap_descriptor *usb_ext; struct usb_dcd_config_params dcd_config_params; struct usb_bos_descriptor *bos = cdev->req->buf; + unsigned int besl = 0; bos->bLength = USB_DT_BOS_SIZE; bos->bDescriptorType = USB_DT_BOS; @@ -619,6 +620,29 @@ static int bos_desc(struct usb_composite_dev *cdev) bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE); bos->bNumDeviceCaps = 0; + /* Get Controller configuration */ + if (cdev->gadget->ops->get_config_params) { + cdev->gadget->ops->get_config_params(cdev->gadget, + &dcd_config_params); + } else { + dcd_config_params.besl_baseline = + USB_DEFAULT_BESL_UNSPECIFIED; + dcd_config_params.besl_deep = + USB_DEFAULT_BESL_UNSPECIFIED; + dcd_config_params.bU1devExitLat = + USB_DEFAULT_U1_DEV_EXIT_LAT; + dcd_config_params.bU2DevExitLat = + cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); + } + + if (dcd_config_params.besl_baseline != USB_DEFAULT_BESL_UNSPECIFIED) + besl = USB_BESL_BASELINE_VALID | + USB_SET_BESL_BASELINE(dcd_config_params.besl_baseline); + + if (dcd_config_params.besl_deep != USB_DEFAULT_BESL_UNSPECIFIED) + besl |= USB_BESL_DEEP_VALID | + USB_SET_BESL_DEEP(dcd_config_params.besl_deep); + /* * A SuperSpeed device shall include the USB2.0 extension descriptor * and shall support LPM when operating in USB2.0 HS mode. @@ -629,7 +653,8 @@ static int bos_desc(struct usb_composite_dev *cdev) usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; - usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT); + usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | + USB_BESL_SUPPORT | besl); /* * The Superspeed USB Capability descriptor shall be implemented by all @@ -650,17 +675,6 @@ static int bos_desc(struct usb_composite_dev *cdev) USB_HIGH_SPEED_OPERATION | USB_5GBPS_OPERATION); ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION; - - /* Get Controller configuration */ - if (cdev->gadget->ops->get_config_params) { - cdev->gadget->ops->get_config_params(cdev->gadget, - &dcd_config_params); - } else { - dcd_config_params.bU1devExitLat = - USB_DEFAULT_U1_DEV_EXIT_LAT; - dcd_config_params.bU2DevExitLat = - cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); - } ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c index c08d385e2723..90b134d5dca9 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c @@ -65,14 +65,16 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, void ast_vhub_nuke(struct ast_vhub_ep *ep, int status) { struct ast_vhub_req *req; - - EPDBG(ep, "Nuking\n"); + int count = 0; /* Beware, lock will be dropped & req-acquired by done() */ while (!list_empty(&ep->queue)) { req = list_first_entry(&ep->queue, struct ast_vhub_req, queue); ast_vhub_done(ep, req, status); + count++; } + if (count) + EPDBG(ep, "Nuked %d request(s)\n", count); } struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep, diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c index 6b1b16b17d7d..4008e7a51188 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c @@ -50,11 +50,14 @@ void ast_vhub_dev_irq(struct ast_vhub_dev *d) static void ast_vhub_dev_enable(struct ast_vhub_dev *d) { - u32 reg, hmsk; + u32 reg, hmsk, i; if (d->enabled) return; + /* Cleanup EP0 state */ + ast_vhub_reset_ep0(d); + /* Enable device and its EP0 interrupts */ reg = VHUB_DEV_EN_ENABLE_PORT | VHUB_DEV_EN_EP0_IN_ACK_IRQEN | @@ -73,6 +76,19 @@ static void ast_vhub_dev_enable(struct ast_vhub_dev *d) /* Set EP0 DMA buffer address */ writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA); + /* Clear stall on all EPs */ + for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) { + struct ast_vhub_ep *ep = d->epns[i]; + + if (ep && (ep->epn.stalled || ep->epn.wedged)) { + ep->epn.stalled = false; + ep->epn.wedged = false; + ast_vhub_update_epn_stall(ep); + } + } + + /* Additional cleanups */ + d->wakeup_en = false; d->enabled = true; } @@ -93,7 +109,6 @@ static void ast_vhub_dev_disable(struct ast_vhub_dev *d) writel(0, d->regs + AST_VHUB_DEV_EN_CTRL); d->gadget.speed = USB_SPEED_UNKNOWN; d->enabled = false; - d->suspended = false; } static int ast_vhub_dev_feature(struct ast_vhub_dev *d, @@ -201,14 +216,19 @@ int ast_vhub_std_dev_request(struct ast_vhub_ep *ep, u16 wValue, wIndex; /* No driver, we shouldn't be enabled ... */ - if (!d->driver || !d->enabled || d->suspended) { + if (!d->driver || !d->enabled) { EPDBG(ep, - "Device is wrong state driver=%p enabled=%d" - " suspended=%d\n", - d->driver, d->enabled, d->suspended); + "Device is wrong state driver=%p enabled=%d\n", + d->driver, d->enabled); return std_req_stall; } + /* + * Note: we used to reject/stall requests while suspended, + * we don't do that anymore as we seem to have cases of + * mass storage getting very upset. + */ + /* First packet, grab speed */ if (d->gadget.speed == USB_SPEED_UNKNOWN) { d->gadget.speed = ep->vhub->speed; @@ -449,8 +469,7 @@ static const struct usb_gadget_ops ast_vhub_udc_ops = { void ast_vhub_dev_suspend(struct ast_vhub_dev *d) { - d->suspended = true; - if (d->driver) { + if (d->driver && d->driver->suspend) { spin_unlock(&d->vhub->lock); d->driver->suspend(&d->gadget); spin_lock(&d->vhub->lock); @@ -459,8 +478,7 @@ void ast_vhub_dev_suspend(struct ast_vhub_dev *d) void ast_vhub_dev_resume(struct ast_vhub_dev *d) { - d->suspended = false; - if (d->driver) { + if (d->driver && d->driver->resume) { spin_unlock(&d->vhub->lock); d->driver->resume(&d->gadget); spin_lock(&d->vhub->lock); @@ -469,46 +487,28 @@ void ast_vhub_dev_resume(struct ast_vhub_dev *d) void ast_vhub_dev_reset(struct ast_vhub_dev *d) { - /* - * If speed is not set, we enable the port. If it is, - * send reset to the gadget and reset "speed". - * - * Speed is an indication that we have got the first - * setup packet to the device. - */ - if (d->gadget.speed == USB_SPEED_UNKNOWN && !d->enabled) { - DDBG(d, "Reset at unknown speed of disabled device, enabling...\n"); - ast_vhub_dev_enable(d); - d->suspended = false; + /* No driver, just disable the device and return */ + if (!d->driver) { + ast_vhub_dev_disable(d); + return; } - if (d->gadget.speed != USB_SPEED_UNKNOWN && d->driver) { - unsigned int i; - DDBG(d, "Reset at known speed of bound device, resetting...\n"); + /* If the port isn't enabled, just enable it */ + if (!d->enabled) { + DDBG(d, "Reset of disabled device, enabling...\n"); + ast_vhub_dev_enable(d); + } else { + DDBG(d, "Reset of enabled device, resetting...\n"); spin_unlock(&d->vhub->lock); - d->driver->reset(&d->gadget); + usb_gadget_udc_reset(&d->gadget, d->driver); spin_lock(&d->vhub->lock); /* - * Disable/re-enable HW, this will clear the address + * Disable and maybe re-enable HW, this will clear the address * and speed setting. */ ast_vhub_dev_disable(d); ast_vhub_dev_enable(d); - - /* Clear stall on all EPs */ - for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) { - struct ast_vhub_ep *ep = d->epns[i]; - - if (ep && ep->epn.stalled) { - ep->epn.stalled = false; - ast_vhub_update_epn_stall(ep); - } - } - - /* Additional cleanups */ - d->wakeup_en = false; - d->suspended = false; } } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c index e2927fb083cf..022b777b85f8 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c @@ -105,18 +105,20 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep) (crq.bRequestType & USB_DIR_IN) ? "in" : "out", ep->ep0.state); - /* Check our state, cancel pending requests if needed */ - if (ep->ep0.state != ep0_state_token) { + /* + * Check our state, cancel pending requests if needed + * + * Note: Under some circumstances, we can get a new setup + * packet while waiting for the stall ack, just accept it. + * + * In any case, a SETUP packet in wrong state should have + * reset the HW state machine, so let's just log, nuke + * requests, move on. + */ + if (ep->ep0.state != ep0_state_token && + ep->ep0.state != ep0_state_stall) { EPDBG(ep, "wrong state\n"); ast_vhub_nuke(ep, -EIO); - - /* - * Accept the packet regardless, this seems to happen - * when stalling a SETUP packet that has an OUT data - * phase. - */ - ast_vhub_nuke(ep, 0); - goto stall; } /* Calculate next state for EP0 */ @@ -165,7 +167,7 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep) stall: EPDBG(ep, "stalling\n"); writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); - ep->ep0.state = ep0_state_status; + ep->ep0.state = ep0_state_stall; ep->ep0.dir_in = false; return; @@ -299,8 +301,8 @@ void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack) if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) || (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) || (ep->ep0.dir_in != in_ack)) { + /* In that case, ignore interrupt */ dev_warn(dev, "irq state mismatch"); - stall = true; break; } /* @@ -335,12 +337,22 @@ void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack) dev_warn(dev, "status direction mismatch\n"); stall = true; } + break; + case ep0_state_stall: + /* + * There shouldn't be any request left, but nuke just in case + * otherwise the stale request will block subsequent ones + */ + ast_vhub_nuke(ep, -EIO); + break; } - /* Reset to token state */ - ep->ep0.state = ep0_state_token; - if (stall) + /* Reset to token state or stall */ + if (stall) { writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); + ep->ep0.state = ep0_state_stall; + } else + ep->ep0.state = ep0_state_token; } static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req, @@ -367,7 +379,7 @@ static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req, return -EINVAL; /* Disabled device */ - if (ep->dev && (!ep->dev->enabled || ep->dev->suspended)) + if (ep->dev && !ep->dev->enabled) return -ESHUTDOWN; /* Data, no buffer and not internal ? */ @@ -390,8 +402,12 @@ static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req, spin_lock_irqsave(&vhub->lock, flags); /* EP0 can only support a single request at a time */ - if (!list_empty(&ep->queue) || ep->ep0.state == ep0_state_token) { + if (!list_empty(&ep->queue) || + ep->ep0.state == ep0_state_token || + ep->ep0.state == ep0_state_stall) { dev_warn(dev, "EP0: Request in wrong state\n"); + EPVDBG(ep, "EP0: list_empty=%d state=%d\n", + list_empty(&ep->queue), ep->ep0.state); spin_unlock_irqrestore(&vhub->lock, flags); return -EBUSY; } @@ -459,6 +475,15 @@ static const struct usb_ep_ops ast_vhub_ep0_ops = { .free_request = ast_vhub_free_request, }; +void ast_vhub_reset_ep0(struct ast_vhub_dev *dev) +{ + struct ast_vhub_ep *ep = &dev->ep0; + + ast_vhub_nuke(ep, -EIO); + ep->ep0.state = ep0_state_token; +} + + void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep, struct ast_vhub_dev *dev) { diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index 35941dc125f9..7475c74aa5c5 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -352,7 +352,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req, /* Endpoint enabled ? */ if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx || - !ep->dev->enabled || ep->dev->suspended) { + !ep->dev->enabled) { EPDBG(ep, "Enqueuing request on wrong or disabled EP\n"); return -ESHUTDOWN; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c index 7c040f56100e..19b3517e04c0 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c @@ -449,8 +449,15 @@ static void ast_vhub_change_port_stat(struct ast_vhub *vhub, USB_PORT_STAT_C_OVERCURRENT | USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_L1; - p->change |= chg; + /* + * We only set USB_PORT_STAT_C_ENABLE if we are disabling + * the port as per USB spec, otherwise MacOS gets upset + */ + if (p->status & USB_PORT_STAT_ENABLE) + chg &= ~USB_PORT_STAT_C_ENABLE; + + p->change = chg; ast_vhub_update_hub_ep1(vhub, port); } } @@ -723,6 +730,12 @@ enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep, case ClearPortFeature: EPDBG(ep, "ClearPortFeature(%d,%d)\n", wIndex & 0xf, wValue); return ast_vhub_clr_port_feature(ep, wIndex & 0xf, wValue); + case ClearTTBuffer: + case ResetTT: + case StopTT: + return std_req_complete; + case GetTTState: + return ast_vhub_simple_reply(ep, 0, 0, 0, 0); default: EPDBG(ep, "Unknown class request\n"); } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h index 4ed03d33a5a9..761919e220d3 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h +++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h @@ -257,6 +257,7 @@ enum ep0_state { ep0_state_token, ep0_state_data, ep0_state_status, + ep0_state_stall, }; /* @@ -353,7 +354,6 @@ struct ast_vhub_dev { struct usb_gadget_driver *driver; bool registered : 1; bool wakeup_en : 1; - bool suspended : 1; bool enabled : 1; /* Endpoint structures */ @@ -507,6 +507,7 @@ void ast_vhub_init_hw(struct ast_vhub *vhub); /* ep0.c */ void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack); void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep); +void ast_vhub_reset_ep0(struct ast_vhub_dev *dev); void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep, struct ast_vhub_dev *dev); int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 7cf34beb50df..92af8dc98c3d 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1143,7 +1143,7 @@ static int check_pending_gadget_drivers(struct usb_udc *udc) dev_name(&udc->dev)) == 0) { ret = udc_bind_to_driver(udc, driver); if (ret != -EPROBE_DEFER) - list_del(&driver->pending); + list_del_init(&driver->pending); break; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index e9cf20979bf6..338c606b7c27 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -742,7 +742,6 @@ static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd, * response data */ static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd) { - u32 tmp; int to = 1000; /* Write a command and read data from the protocol engine */ @@ -752,7 +751,6 @@ static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd) /* Write command code */ udc_protocol_cmd_w(udc, cmd); - tmp = readl(USBD_DEVINTST(udc->udp_baseaddr)); while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL)) && (to > 0)) to--; @@ -1992,7 +1990,7 @@ void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) /* DMA end of transfer completion */ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) { - u32 status, epstatus; + u32 status; struct lpc32xx_request *req; struct lpc32xx_usbd_dd_gad *dd; @@ -2086,7 +2084,7 @@ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) { udc_clearep_getsts(udc, ep->hwep_num); uda_enable_hwepint(udc, ep->hwep_num); - epstatus = udc_clearep_getsts(udc, ep->hwep_num); + udc_clearep_getsts(udc, ep->hwep_num); /* Let the EP interrupt handle the ZLP */ return; @@ -2198,7 +2196,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc) struct lpc32xx_ep *ep, *ep0 = &udc->ep[0]; struct usb_ctrlrequest ctrlpkt; int i, bytes; - u16 wIndex, wValue, wLength, reqtype, req, tmp; + u16 wIndex, wValue, reqtype, req, tmp; /* Nuke previous transfers */ nuke(ep0, -EPROTO); @@ -2214,7 +2212,6 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc) /* Native endianness */ wIndex = le16_to_cpu(ctrlpkt.wIndex); wValue = le16_to_cpu(ctrlpkt.wValue); - wLength = le16_to_cpu(ctrlpkt.wLength); reqtype = le16_to_cpu(ctrlpkt.bRequestType); /* Set direction of EP0 */ @@ -2265,7 +2262,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc) default: break; } - + break; case USB_REQ_SET_ADDRESS: if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index b6bbe2e448ba..51efee21915f 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -2244,30 +2244,40 @@ static void usb_reinit_338x(struct net2280 *dev) } /* Hardware Defect and Workaround */ - val = readl(&dev->ll_lfps_regs->ll_lfps_5); + val = readl(&dev->llregs->ll_lfps_5); val &= ~(0xf << TIMER_LFPS_6US); val |= 0x5 << TIMER_LFPS_6US; - writel(val, &dev->ll_lfps_regs->ll_lfps_5); + writel(val, &dev->llregs->ll_lfps_5); - val = readl(&dev->ll_lfps_regs->ll_lfps_6); + val = readl(&dev->llregs->ll_lfps_6); val &= ~(0xffff << TIMER_LFPS_80US); val |= 0x0100 << TIMER_LFPS_80US; - writel(val, &dev->ll_lfps_regs->ll_lfps_6); + writel(val, &dev->llregs->ll_lfps_6); /* * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB * Hot Reset Exit Handshake may Fail in Specific Case using * Default Register Settings. Workaround for Enumeration test. */ - val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2); + val = readl(&dev->llregs->ll_tsn_counters_2); val &= ~(0x1f << HOT_TX_NORESET_TS2); val |= 0x10 << HOT_TX_NORESET_TS2; - writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2); + writel(val, &dev->llregs->ll_tsn_counters_2); - val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3); + val = readl(&dev->llregs->ll_tsn_counters_3); val &= ~(0x1f << HOT_RX_RESET_TS2); val |= 0x3 << HOT_RX_RESET_TS2; - writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3); + writel(val, &dev->llregs->ll_tsn_counters_3); + + /* + * AB errata. Errata 11. Workaround for Default Duration of LFPS + * Handshake Signaling for Device-Initiated U1 Exit is too short. + * Without this, various enumeration failures observed with + * modern superspeed hosts. + */ + val = readl(&dev->llregs->ll_lfps_timers_2); + writel((val & 0xffff0000) | LFPS_TIMERS_2_WORKAROUND_VALUE, + &dev->llregs->ll_lfps_timers_2); /* * Set Recovery Idle to Recover bit: @@ -2276,10 +2286,10 @@ static void usb_reinit_338x(struct net2280 *dev) * - It is safe to set for all connection speeds; all chip revisions. * - R-M-W to leave other bits undisturbed. * - Reference PLX TT-7372 - */ - val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit); + */ + val = readl(&dev->llregs->ll_tsn_chicken_bit); val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW); - writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit); + writel(val, &dev->llregs->ll_tsn_chicken_bit); INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); @@ -3669,12 +3679,6 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) (base + 0x00b4); dev->llregs = (struct usb338x_ll_regs __iomem *) (base + 0x0700); - dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *) - (base + 0x0748); - dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *) - (base + 0x077c); - dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *) - (base + 0x079c); dev->plregs = (struct usb338x_pl_regs __iomem *) (base + 0x0800); usbstat = readl(&dev->usb->usbstat); diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h index b65a797544d7..85d3ca1698ba 100644 --- a/drivers/usb/gadget/udc/net2280.h +++ b/drivers/usb/gadget/udc/net2280.h @@ -178,9 +178,6 @@ struct net2280 { struct net2280_dep_regs __iomem *dep; struct net2280_ep_regs __iomem *epregs; struct usb338x_ll_regs __iomem *llregs; - struct usb338x_ll_lfps_regs __iomem *ll_lfps_regs; - struct usb338x_ll_tsn_regs __iomem *ll_tsn_regs; - struct usb338x_ll_chi_regs __iomem *ll_chicken_reg; struct usb338x_pl_regs __iomem *plregs; struct dma_pool *requests; diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index cded51f36fc1..265dab2bbfac 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -3046,8 +3046,7 @@ static void pch_udc_remove(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int pch_udc_suspend(struct device *d) { - struct pci_dev *pdev = to_pci_dev(d); - struct pch_udc_dev *dev = pci_get_drvdata(pdev); + struct pch_udc_dev *dev = dev_get_drvdata(d); pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK); pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 1eb8d17e19db..4de91653a2c7 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci) * other cases where the next software may expect clean state from the * "firmware". this is bus-neutral, unlike shutdown() methods. */ -static void -ohci_shutdown (struct usb_hcd *hcd) +static void _ohci_shutdown(struct usb_hcd *hcd) { struct ohci_hcd *ohci; @@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd) ohci->rh_state = OHCI_RH_HALTED; } +static void ohci_shutdown(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + unsigned long flags; + + spin_lock_irqsave(&ohci->lock, flags); + _ohci_shutdown(hcd); + spin_unlock_irqrestore(&ohci->lock, flags); +} + /*-------------------------------------------------------------------------* * HC functions *-------------------------------------------------------------------------*/ @@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t) died: usb_hc_died(ohci_to_hcd(ohci)); ohci_dump(ohci); - ohci_shutdown(ohci_to_hcd(ohci)); + _ohci_shutdown(ohci_to_hcd(ohci)); goto done; } else { /* No write back because the done queue was empty */ diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index 0b2aea6e28d4..c158cda9e4b9 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -49,7 +49,7 @@ static const struct hc_driver ohci_sm501_hc_driver = { * generic hardware linkage */ .irq = ohci_irq, - .flags = HCD_USB11 | HCD_DMA | HCD_MEMORY, + .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index 221593d75556..fb6f5e9ae5c6 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -156,7 +156,7 @@ static const struct hc_driver ohci_tmio_hc_driver = { /* generic hardware linkage */ .irq = ohci_irq, - .flags = HCD_USB11 | HCD_DMA | HCD_MEMORY, + .flags = HCD_USB11 | HCD_MEMORY, /* basic lifecycle operations */ .start = ohci_tmio_start, diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index aff79ff5aba4..be726c791323 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -139,14 +139,14 @@ xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head, struct dbc_request *req; for (i = 0; i < DBC_QUEUE_SIZE; i++) { - req = dbc_alloc_request(dep, GFP_ATOMIC); + req = dbc_alloc_request(dep, GFP_KERNEL); if (!req) break; req->length = DBC_MAX_PACKET; req->buf = kmalloc(req->length, GFP_KERNEL); if (!req->buf) { - xhci_dbc_free_req(dep, req); + dbc_free_request(dep, req); break; } diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c index 399113f9fc5c..f498160df969 100644 --- a/drivers/usb/host/xhci-ext-caps.c +++ b/drivers/usb/host/xhci-ext-caps.c @@ -6,11 +6,20 @@ */ #include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/pci.h> #include "xhci.h" #define USB_SW_DRV_NAME "intel_xhci_usb_sw" #define USB_SW_RESOURCE_SIZE 0x400 +#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 + +static const struct property_entry role_switch_props[] = { + PROPERTY_ENTRY_BOOL("sw_switch_disable"), + {}, +}; + static void xhci_intel_unregister_pdev(void *arg) { platform_device_unregister(arg); @@ -21,6 +30,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset) struct usb_hcd *hcd = xhci_to_hcd(xhci); struct device *dev = hcd->self.controller; struct platform_device *pdev; + struct pci_dev *pci = to_pci_dev(dev); struct resource res = { 0, }; int ret; @@ -43,6 +53,14 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset) return ret; } + if (pci->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { + ret = platform_device_add_properties(pdev, role_switch_props); + if (ret) { + dev_err(dev, "failed to register device properties\n"); + return ret; + } + } + pdev->dev.parent = dev; ret = platform_device_add(pdev); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 026fe18972d3..b18a6baef204 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -216,6 +216,10 @@ static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk) return PTR_ERR(mtk->sys_clk); } + mtk->xhci_clk = devm_clk_get_optional(dev, "xhci_ck"); + if (IS_ERR(mtk->xhci_clk)) + return PTR_ERR(mtk->xhci_clk); + mtk->ref_clk = devm_clk_get_optional(dev, "ref_ck"); if (IS_ERR(mtk->ref_clk)) return PTR_ERR(mtk->ref_clk); @@ -244,6 +248,12 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk) goto sys_clk_err; } + ret = clk_prepare_enable(mtk->xhci_clk); + if (ret) { + dev_err(mtk->dev, "failed to enable xhci_clk\n"); + goto xhci_clk_err; + } + ret = clk_prepare_enable(mtk->mcu_clk); if (ret) { dev_err(mtk->dev, "failed to enable mcu_clk\n"); @@ -261,6 +271,8 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk) dma_clk_err: clk_disable_unprepare(mtk->mcu_clk); mcu_clk_err: + clk_disable_unprepare(mtk->xhci_clk); +xhci_clk_err: clk_disable_unprepare(mtk->sys_clk); sys_clk_err: clk_disable_unprepare(mtk->ref_clk); @@ -272,6 +284,7 @@ static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk) { clk_disable_unprepare(mtk->dma_clk); clk_disable_unprepare(mtk->mcu_clk); + clk_disable_unprepare(mtk->xhci_clk); clk_disable_unprepare(mtk->sys_clk); clk_disable_unprepare(mtk->ref_clk); } diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 8be8c5f7ff62..5ac458b7d2e0 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -139,6 +139,7 @@ struct xhci_hcd_mtk { struct regulator *vusb33; struct regulator *vbus; struct clk *sys_clk; /* sys and mac clock */ + struct clk *xhci_clk; struct clk *ref_clk; struct clk *mcu_clk; struct clk *dma_clk; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index a1e5ce484bf8..d90cd5ec09cf 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -66,12 +66,14 @@ static int xhci_priv_resume_quirk(struct usb_hcd *hcd) static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) { + struct xhci_plat_priv *priv = xhci_to_priv(xhci); + /* * As of now platform drivers don't provide MSI support so we ensure * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ - xhci->quirks |= XHCI_PLAT; + xhci->quirks |= XHCI_PLAT | priv->quirks; } /* called during probe() after chip reset completes */ @@ -103,17 +105,11 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { }; static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = { - .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1, - .init_quirk = xhci_rcar_init_quirk, - .plat_start = xhci_rcar_start, - .resume_quirk = xhci_rcar_resume_quirk, + SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1) }; static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { - .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3, - .init_quirk = xhci_rcar_init_quirk, - .plat_start = xhci_rcar_start, - .resume_quirk = xhci_rcar_resume_quirk, + SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3) }; static const struct of_device_id usb_xhci_of_match[] = { diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index ae29f22ff5bd..5681723fc9cd 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -12,10 +12,12 @@ struct xhci_plat_priv { const char *firmware_name; + unsigned long long quirks; void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*resume_quirk)(struct usb_hcd *); }; #define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv) +#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv) #endif /* _XHCI_PLAT_H */ diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 8616c52849c6..c1025d321a41 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -104,16 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev) return of_device_is_compatible(node, "renesas,xhci-r8a7790") || of_device_is_compatible(node, "renesas,xhci-r8a7791") || of_device_is_compatible(node, "renesas,xhci-r8a7793") || - of_device_is_compatible(node, "renensas,rcar-gen2-xhci"); -} - -static int xhci_rcar_is_gen3(struct device *dev) -{ - struct device_node *node = dev->of_node; - - return of_device_is_compatible(node, "renesas,xhci-r8a7795") || - of_device_is_compatible(node, "renesas,xhci-r8a7796") || - of_device_is_compatible(node, "renesas,rcar-gen3-xhci"); + of_device_is_compatible(node, "renesas,rcar-gen2-xhci"); } void xhci_rcar_start(struct usb_hcd *hcd) @@ -226,32 +217,13 @@ static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd) /* This function needs to initialize a "phy" of usb before */ int xhci_rcar_init_quirk(struct usb_hcd *hcd) { - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - /* If hcd->regs is NULL, we don't just call the following function */ if (!hcd->regs) return 0; - /* - * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set - * to 1. However, these SoCs don't support 64-bit address memory - * pointers. So, this driver clears the AC64 bit of xhci->hcc_params - * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in - * xhci_gen_setup(). - * - * And, since the firmware/internal CPU control the USBSTS.STS_HALT - * and the process speed is down when the roothub port enters U3, - * long delay for the handshake of STS_HALT is neeed in xhci_suspend(). - */ - if (xhci_rcar_is_gen2(hcd->self.controller) || - xhci_rcar_is_gen3(hcd->self.controller)) { - xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND; - } - if (!xhci_rcar_wait_for_pll_active(hcd)) return -ETIMEDOUT; - xhci->quirks |= XHCI_TRUST_TX_LENGTH; return xhci_rcar_download_firmware(hcd); } diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h index 804b6ab4246f..012744a63a49 100644 --- a/drivers/usb/host/xhci-rcar.h +++ b/drivers/usb/host/xhci-rcar.h @@ -31,4 +31,25 @@ static inline int xhci_rcar_resume_quirk(struct usb_hcd *hcd) return 0; } #endif + +/* + * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set + * to 1. However, these SoCs don't support 64-bit address memory + * pointers. So, this driver clears the AC64 bit of xhci->hcc_params + * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in + * xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk. + * + * And, since the firmware/internal CPU control the USBSTS.STS_HALT + * and the process speed is down when the roothub port enters U3, + * long delay for the handshake of STS_HALT is neeed in xhci_suspend() + * by using the XHCI_SLOW_SUSPEND quirk. + */ +#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \ + .firmware_name = firmware, \ + .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \ + XHCI_SLOW_SUSPEND, \ + .init_quirk = xhci_rcar_init_quirk, \ + .plat_start = xhci_rcar_start, \ + .resume_quirk = xhci_rcar_resume_quirk, + #endif /* _XHCI_RCAR_H */ diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index dafc65911fc0..2ff7c911fbd0 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) tegra_xusb_config(tegra, regs); + /* + * The XUSB Falcon microcontroller can only address 40 bits, so set + * the DMA mask accordingly. + */ + err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40)); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + goto put_rpm; + } + err = tegra_xusb_load_firmware(tegra); if (err < 0) { dev_err(&pdev->dev, "failed to load firmware: %d\n", err); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index e315c0158e90..500865975687 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3814,7 +3814,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } - xhci_debugfs_remove_slot(xhci, udev->slot_id); virt_dev->udev = NULL; ret = xhci_disable_slot(xhci, udev->slot_id); if (ret) @@ -3832,6 +3831,8 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) if (!command) return -ENOMEM; + xhci_debugfs_remove_slot(xhci, slot_id); + spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = readl(&xhci->op_regs->status); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f5c41448d067..f9f88626a57a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2337,12 +2337,13 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2, break; case TRB_RESET_EP: sprintf(str, - "%s: ctx %08x%08x slot %d ep %d flags %c", + "%s: ctx %08x%08x slot %d ep %d flags %c:%c", xhci_trb_type_string(type), field1, field0, TRB_TO_SLOT_ID(field3), /* Macro decrements 1, maybe it shouldn't?!? */ TRB_TO_EP_INDEX(field3) + 1, + field3 & TRB_TSP ? 'T' : 't', field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_STOP_RING: diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index a32d61a79ab8..30cae5e1954d 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -51,7 +51,6 @@ struct rio_usb_data { char *obuf, *ibuf; /* transfer buffers */ char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ wait_queue_head_t wait_q; /* for timeouts */ - struct mutex lock; /* general race avoidance */ }; static DEFINE_MUTEX(rio500_mutex); @@ -63,10 +62,8 @@ static int open_rio(struct inode *inode, struct file *file) /* against disconnect() */ mutex_lock(&rio500_mutex); - mutex_lock(&(rio->lock)); if (rio->isopen || !rio->present) { - mutex_unlock(&(rio->lock)); mutex_unlock(&rio500_mutex); return -EBUSY; } @@ -74,7 +71,6 @@ static int open_rio(struct inode *inode, struct file *file) init_waitqueue_head(&rio->wait_q); - mutex_unlock(&(rio->lock)); dev_info(&rio->rio_dev->dev, "Rio opened.\n"); mutex_unlock(&rio500_mutex); @@ -88,7 +84,6 @@ static int close_rio(struct inode *inode, struct file *file) /* against disconnect() */ mutex_lock(&rio500_mutex); - mutex_lock(&(rio->lock)); rio->isopen = 0; if (!rio->present) { @@ -100,7 +95,6 @@ static int close_rio(struct inode *inode, struct file *file) } else { dev_info(&rio->rio_dev->dev, "Rio closed.\n"); } - mutex_unlock(&(rio->lock)); mutex_unlock(&rio500_mutex); return 0; } @@ -115,7 +109,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) int retries; int retval=0; - mutex_lock(&(rio->lock)); + mutex_lock(&rio500_mutex); /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { retval = -ENODEV; @@ -259,7 +253,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) err_out: - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return retval; } @@ -279,12 +273,12 @@ write_rio(struct file *file, const char __user *buffer, int errn = 0; int intr; - intr = mutex_lock_interruptible(&(rio->lock)); + intr = mutex_lock_interruptible(&rio500_mutex); if (intr) return -EINTR; /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return -ENODEV; } @@ -307,7 +301,7 @@ write_rio(struct file *file, const char __user *buffer, goto error; } if (signal_pending(current)) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return bytes_written ? bytes_written : -EINTR; } @@ -345,12 +339,12 @@ write_rio(struct file *file, const char __user *buffer, buffer += copy_size; } while (count > 0); - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return bytes_written ? bytes_written : -EIO; error: - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return errn; } @@ -367,12 +361,12 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) char *ibuf; int intr; - intr = mutex_lock_interruptible(&(rio->lock)); + intr = mutex_lock_interruptible(&rio500_mutex); if (intr) return -EINTR; /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return -ENODEV; } @@ -383,11 +377,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) while (count > 0) { if (signal_pending(current)) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return read_count ? read_count : -EINTR; } if (!rio->rio_dev) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return -ENODEV; } this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; @@ -405,7 +399,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) count = this_read = partial; } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ if (!maxretry--) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); dev_err(&rio->rio_dev->dev, "read_rio: maxretry timeout\n"); return -ETIME; @@ -415,19 +409,19 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) finish_wait(&rio->wait_q, &wait); continue; } else if (result != -EREMOTEIO) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); dev_err(&rio->rio_dev->dev, "Read Whoops - result:%d partial:%u this_read:%u\n", result, partial, this_read); return -EIO; } else { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return (0); } if (this_read) { if (copy_to_user(buffer, ibuf, this_read)) { - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return -EFAULT; } count -= this_read; @@ -435,7 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) buffer += this_read; } } - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); return read_count; } @@ -460,53 +454,55 @@ static int probe_rio(struct usb_interface *intf, { struct usb_device *dev = interface_to_usbdev(intf); struct rio_usb_data *rio = &rio_instance; - int retval = 0; + int retval = -ENOMEM; + char *ibuf, *obuf; - mutex_lock(&rio500_mutex); if (rio->present) { dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum); - retval = -EBUSY; - goto bail_out; - } else { - dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); + return -EBUSY; } + dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); - retval = usb_register_dev(intf, &usb_rio_class); - if (retval) { - dev_err(&dev->dev, - "Not able to get a minor for this device.\n"); - retval = -ENOMEM; - goto bail_out; - } - - rio->rio_dev = dev; - - if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) { + obuf = kmalloc(OBUF_SIZE, GFP_KERNEL); + if (!obuf) { dev_err(&dev->dev, "probe_rio: Not enough memory for the output buffer\n"); - usb_deregister_dev(intf, &usb_rio_class); - retval = -ENOMEM; - goto bail_out; + goto err_obuf; } - dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf); + dev_dbg(&intf->dev, "obuf address: %p\n", obuf); - if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) { + ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL); + if (!ibuf) { dev_err(&dev->dev, "probe_rio: Not enough memory for the input buffer\n"); - usb_deregister_dev(intf, &usb_rio_class); - kfree(rio->obuf); - retval = -ENOMEM; - goto bail_out; + goto err_ibuf; } - dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf); - - mutex_init(&(rio->lock)); + dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf); - usb_set_intfdata (intf, rio); + mutex_lock(&rio500_mutex); + rio->rio_dev = dev; + rio->ibuf = ibuf; + rio->obuf = obuf; rio->present = 1; -bail_out: mutex_unlock(&rio500_mutex); + retval = usb_register_dev(intf, &usb_rio_class); + if (retval) { + dev_err(&dev->dev, + "Not able to get a minor for this device.\n"); + goto err_register; + } + + usb_set_intfdata(intf, rio); + return retval; + + err_register: + mutex_lock(&rio500_mutex); + rio->present = 0; + mutex_unlock(&rio500_mutex); + err_ibuf: + kfree(obuf); + err_obuf: return retval; } @@ -515,16 +511,14 @@ static void disconnect_rio(struct usb_interface *intf) struct rio_usb_data *rio = usb_get_intfdata (intf); usb_set_intfdata (intf, NULL); - mutex_lock(&rio500_mutex); if (rio) { usb_deregister_dev(intf, &usb_rio_class); - mutex_lock(&(rio->lock)); + mutex_lock(&rio500_mutex); if (rio->isopen) { rio->isopen = 0; /* better let it finish - the release will do whats needed */ rio->rio_dev = NULL; - mutex_unlock(&(rio->lock)); mutex_unlock(&rio500_mutex); return; } @@ -534,9 +528,8 @@ static void disconnect_rio(struct usb_interface *intf) dev_info(&intf->dev, "USB Rio disconnected.\n"); rio->present = 0; - mutex_unlock(&(rio->lock)); + mutex_unlock(&rio500_mutex); } - mutex_unlock(&rio500_mutex); } static const struct usb_device_id rio_table[] = { diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig index 928c2cd6fc00..bf98fd36341d 100644 --- a/drivers/usb/mtu3/Kconfig +++ b/drivers/usb/mtu3/Kconfig @@ -44,6 +44,7 @@ config USB_MTU3_DUAL_ROLE bool "Dual Role mode" depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3)) depends on (EXTCON=y || EXTCON=USB_MTU3) + select USB_ROLE_SWITCH help This is the default mode of working of MTU3 controller where both host and gadget features are enabled. diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h index 76ecf12fdf62..6087be236a35 100644 --- a/drivers/usb/mtu3/mtu3.h +++ b/drivers/usb/mtu3/mtu3.h @@ -199,6 +199,9 @@ struct mtu3_gpd_ring { * @id_nb : notifier for iddig(idpin) detection * @id_work : work of iddig detection notifier * @id_event : event of iddig detecion notifier +* @role_sw : use USB Role Switch to support dual-role switch, can't use +* extcon at the same time, and extcon is deprecated. +* @role_sw_used : true when the USB Role Switch is used. * @is_u3_drd: whether port0 supports usb3.0 dual-role device or not * @manual_drd_enabled: it's true when supports dual-role device by debugfs * to switch host/device modes depending on user input. @@ -212,6 +215,8 @@ struct otg_switch_mtk { struct notifier_block id_nb; struct work_struct id_work; unsigned long id_event; + struct usb_role_switch *role_sw; + bool role_sw_used; bool is_u3_drd; bool manual_drd_enabled; }; diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c index 62c57ddc554e..c96e5dab0a48 100644 --- a/drivers/usb/mtu3/mtu3_debugfs.c +++ b/drivers/usb/mtu3/mtu3_debugfs.c @@ -453,9 +453,9 @@ static ssize_t ssusb_mode_write(struct file *file, const char __user *ubuf, return -EFAULT; if (!strncmp(buf, "host", 4) && !ssusb->is_host) { - ssusb_mode_manual_switch(ssusb, 1); + ssusb_mode_switch(ssusb, 1); } else if (!strncmp(buf, "device", 6) && ssusb->is_host) { - ssusb_mode_manual_switch(ssusb, 0); + ssusb_mode_switch(ssusb, 0); } else { dev_err(ssusb->dev, "wrong or duplicated setting\n"); return -EINVAL; diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c index 5fcb71af875a..08e18448e8b8 100644 --- a/drivers/usb/mtu3/mtu3_dr.c +++ b/drivers/usb/mtu3/mtu3_dr.c @@ -7,6 +7,8 @@ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> */ +#include <linux/usb/role.h> + #include "mtu3.h" #include "mtu3_dr.h" #include "mtu3_debug.h" @@ -280,7 +282,7 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx) * This is useful in special cases, such as uses TYPE-A receptacle but also * wants to support dual-role mode. */ -void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host) +void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host) { struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; @@ -318,6 +320,47 @@ void ssusb_set_force_mode(struct ssusb_mtk *ssusb, mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value); } +static int ssusb_role_sw_set(struct device *dev, enum usb_role role) +{ + struct ssusb_mtk *ssusb = dev_get_drvdata(dev); + bool to_host = false; + + if (role == USB_ROLE_HOST) + to_host = true; + + if (to_host ^ ssusb->is_host) + ssusb_mode_switch(ssusb, to_host); + + return 0; +} + +static enum usb_role ssusb_role_sw_get(struct device *dev) +{ + struct ssusb_mtk *ssusb = dev_get_drvdata(dev); + enum usb_role role; + + role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE; + + return role; +} + +static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx) +{ + struct usb_role_switch_desc role_sx_desc = { 0 }; + struct ssusb_mtk *ssusb = + container_of(otg_sx, struct ssusb_mtk, otg_switch); + + if (!otg_sx->role_sw_used) + return 0; + + role_sx_desc.set = ssusb_role_sw_set; + role_sx_desc.get = ssusb_role_sw_get; + role_sx_desc.fwnode = dev_fwnode(ssusb->dev); + otg_sx->role_sw = usb_role_switch_register(ssusb->dev, &role_sx_desc); + + return PTR_ERR_OR_ZERO(otg_sx->role_sw); +} + int ssusb_otg_switch_init(struct ssusb_mtk *ssusb) { struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; @@ -328,6 +371,8 @@ int ssusb_otg_switch_init(struct ssusb_mtk *ssusb) if (otg_sx->manual_drd_enabled) ssusb_dr_debugfs_init(ssusb); + else if (otg_sx->role_sw_used) + ret = ssusb_role_sw_register(otg_sx); else ret = ssusb_extcon_register(otg_sx); @@ -340,4 +385,5 @@ void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb) cancel_work_sync(&otg_sx->id_work); cancel_work_sync(&otg_sx->vbus_work); + usb_role_switch_unregister(otg_sx->role_sw); } diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h index ba6fe357ce29..5e58c4dbd54a 100644 --- a/drivers/usb/mtu3/mtu3_dr.h +++ b/drivers/usb/mtu3/mtu3_dr.h @@ -71,7 +71,7 @@ static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb) #if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE) int ssusb_otg_switch_init(struct ssusb_mtk *ssusb); void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb); -void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host); +void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host); int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on); void ssusb_set_force_mode(struct ssusb_mtk *ssusb, enum mtu3_dr_force_mode mode); @@ -86,8 +86,8 @@ static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb) static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb) {} -static inline void -ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host) {} +static inline void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host) +{} static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on) { diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c index fd0f6c5dfbc1..9c256ea3cdf5 100644 --- a/drivers/usb/mtu3/mtu3_plat.c +++ b/drivers/usb/mtu3/mtu3_plat.c @@ -299,8 +299,9 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb) otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd"); otg_sx->manual_drd_enabled = of_property_read_bool(node, "enable-manual-drd"); + otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch"); - if (of_property_read_bool(node, "extcon")) { + if (!otg_sx->role_sw_used && of_property_read_bool(node, "extcon")) { otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0); if (IS_ERR(otg_sx->edev)) { dev_err(ssusb->dev, "couldn't get extcon device\n"); diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index f6a037b5e9ef..b451f4695f3f 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -65,7 +65,7 @@ struct fsl_otg_timer *b_data_pulse_tmr, *b_vbus_pulse_tmr, *b_srp_fail_tmr, static struct list_head active_timers; -static struct fsl_otg_config fsl_otg_initdata = { +static const struct fsl_otg_config fsl_otg_initdata = { .otg_port = 1, }; diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index 86defca6623e..94b4e7db2b94 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -85,16 +85,6 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) } EXPORT_SYMBOL_GPL(usb_role_switch_get_role); -static int switch_fwnode_match(struct device *dev, const void *fwnode) -{ - return dev_fwnode(dev) == fwnode; -} - -static int switch_name_match(struct device *dev, const void *name) -{ - return !strcmp((const char *)name, dev_name(dev)); -} - static void *usb_role_switch_match(struct device_connection *con, int ep, void *data) { @@ -104,16 +94,27 @@ static void *usb_role_switch_match(struct device_connection *con, int ep, if (con->id && !fwnode_property_present(con->fwnode, con->id)) return NULL; - dev = class_find_device(role_class, NULL, con->fwnode, - switch_fwnode_match); + dev = class_find_device_by_fwnode(role_class, con->fwnode); } else { - dev = class_find_device(role_class, NULL, con->endpoint[ep], - switch_name_match); + dev = class_find_device_by_name(role_class, con->endpoint[ep]); } return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER); } +static struct usb_role_switch * +usb_role_switch_is_parent(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent = fwnode_get_parent(fwnode); + struct device *dev; + + if (!parent || !fwnode_property_present(parent, "usb-role-switch")) + return NULL; + + dev = class_find_device_by_fwnode(role_class, parent); + return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER); +} + /** * usb_role_switch_get - Find USB role switch linked with the caller * @dev: The caller device @@ -125,8 +126,10 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev) { struct usb_role_switch *sw; - sw = device_connection_find_match(dev, "usb-role-switch", NULL, - usb_role_switch_match); + sw = usb_role_switch_is_parent(dev_fwnode(dev)); + if (!sw) + sw = device_connection_find_match(dev, "usb-role-switch", NULL, + usb_role_switch_match); if (!IS_ERR_OR_NULL(sw)) WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); @@ -136,6 +139,28 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev) EXPORT_SYMBOL_GPL(usb_role_switch_get); /** + * fwnode_usb_role_switch_get - Find USB role switch linked with the caller + * @fwnode: The caller device node + * + * This is similar to the usb_role_switch_get() function above, but it searches + * the switch using fwnode instead of device entry. + */ +struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode) +{ + struct usb_role_switch *sw; + + sw = usb_role_switch_is_parent(fwnode); + if (!sw) + sw = fwnode_connection_find_match(fwnode, "usb-role-switch", + NULL, usb_role_switch_match); + if (!IS_ERR_OR_NULL(sw)) + WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); + + return sw; +} +EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get); + +/** * usb_role_switch_put - Release handle to a switch * @sw: USB Role Switch * diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c index 277de96181f9..88d041601c51 100644 --- a/drivers/usb/roles/intel-xhci-usb-role-switch.c +++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/usb/role.h> /* register definition */ @@ -26,6 +27,12 @@ #define SW_VBUS_VALID BIT(24) #define SW_IDPIN_EN BIT(21) #define SW_IDPIN BIT(20) +#define SW_SWITCH_EN BIT(16) + +#define DRD_CONFIG_DYNAMIC 0 +#define DRD_CONFIG_STATIC_HOST 1 +#define DRD_CONFIG_STATIC_DEVICE 2 +#define DRD_CONFIG_MASK 3 #define DUAL_ROLE_CFG1 0x6c #define HOST_MODE BIT(29) @@ -37,6 +44,7 @@ struct intel_xhci_usb_data { struct usb_role_switch *role_sw; void __iomem *base; + bool enable_sw_switch; }; static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) @@ -45,6 +53,7 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) unsigned long timeout; acpi_status status; u32 glk, val; + u32 drd_config = DRD_CONFIG_DYNAMIC; /* * On many CHT devices ACPI event (_AEI) handlers read / modify / @@ -59,24 +68,35 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role) pm_runtime_get_sync(dev); - /* Set idpin value as requested */ + /* + * Set idpin value as requested. + * Since some devices rely on firmware setting DRD_CONFIG and + * SW_SWITCH_EN bits to be zero for role switch, + * do not set these bits for those devices. + */ val = readl(data->base + DUAL_ROLE_CFG0); switch (role) { case USB_ROLE_NONE: val |= SW_IDPIN; val &= ~SW_VBUS_VALID; + drd_config = DRD_CONFIG_DYNAMIC; break; case USB_ROLE_HOST: val &= ~SW_IDPIN; val &= ~SW_VBUS_VALID; + drd_config = DRD_CONFIG_STATIC_HOST; break; case USB_ROLE_DEVICE: val |= SW_IDPIN; val |= SW_VBUS_VALID; + drd_config = DRD_CONFIG_STATIC_DEVICE; break; } val |= SW_IDPIN_EN; - + if (data->enable_sw_switch) { + val &= ~DRD_CONFIG_MASK; + val |= SW_SWITCH_EN | drd_config; + } writel(val, data->base + DUAL_ROLE_CFG0); acpi_release_global_lock(glk); @@ -147,6 +167,9 @@ static int intel_xhci_usb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); + data->enable_sw_switch = !device_property_read_bool(dev, + "sw_switch_disable"); + data->role_sw = usb_role_switch_register(dev, &sw_desc); if (IS_ERR(data->role_sw)) return PTR_ERR(data->role_sw); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4b3a049561f3..f0688c44b04c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -2023,6 +2023,46 @@ static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr, return 0; } +static int ftdi_gpio_init_ft232h(struct usb_serial_port *port) +{ + struct ftdi_private *priv = usb_get_serial_port_data(port); + u16 cbus_config; + u8 *buf; + int ret; + int i; + + buf = kmalloc(4, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ftdi_read_eeprom(port->serial, buf, 0x1a, 4); + if (ret < 0) + goto out_free; + + /* + * FT232H CBUS Memory Map + * + * 0x1a: X- (upper nibble -> AC5) + * 0x1b: -X (lower nibble -> AC6) + * 0x1c: XX (upper nibble -> AC9 | lower nibble -> AC8) + */ + cbus_config = buf[2] << 8 | (buf[1] & 0xf) << 4 | (buf[0] & 0xf0) >> 4; + + priv->gc.ngpio = 4; + priv->gpio_altfunc = 0xff; + + for (i = 0; i < priv->gc.ngpio; ++i) { + if ((cbus_config & 0xf) == FTDI_FTX_CBUS_MUX_GPIO) + priv->gpio_altfunc &= ~BIT(i); + cbus_config >>= 4; + } + +out_free: + kfree(buf); + + return ret; +} + static int ftdi_gpio_init_ft232r(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); @@ -2098,6 +2138,9 @@ static int ftdi_gpio_init(struct usb_serial_port *port) int result; switch (priv->chip_type) { + case FT232H: + result = ftdi_gpio_init_ft232h(port); + break; case FT232RL: result = ftdi_gpio_init_ft232r(port); break; diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index cc794e25a0b6..1d9ce9cbc831 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -38,7 +38,7 @@ MODULE_LICENSE("GPL"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); +MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; @@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us) goto INIT_FAIL; } - if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || - CHECK_FW_VER(chip, 0x5901)) - SET_AUTO_DELINK(chip); - if (STATUS_LEN(chip) == 16) { - if (SUPPORT_AUTO_DELINK(chip)) + if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) || + CHECK_PID(chip, 0x0159)) { + if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || + CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); + if (STATUS_LEN(chip) == 16) { + if (SUPPORT_AUTO_DELINK(chip)) + SET_AUTO_DELINK(chip); + } } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index df4de8323eff..6737fab94959 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -40,6 +40,7 @@ #include <scsi/scsi_eh.h> #include "usb.h" +#include <linux/usb/hcd.h> #include "scsiglue.h" #include "debug.h" #include "transport.h" @@ -141,11 +142,10 @@ static int slave_configure(struct scsi_device *sdev) /* * Some USB host controllers can't do DMA; they have to use PIO. - * They indicate this by setting their dma_mask to NULL. For - * such controllers we need to make sure the block layer sets + * For such controllers we need to make sure the block layer sets * up bounce buffers in addressable memory. */ - if (!us->pusb_dev->bus->controller->dma_mask) + if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus))) blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH); /* diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ea0d27a94afe..1cd9b6305b06 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, US_FL_IGNORE_RESIDUE ), /* Reported by Michael Büsch <m@bues.ch> */ -UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig index 89d9193bd1cf..895e2418de53 100644 --- a/drivers/usb/typec/Kconfig +++ b/drivers/usb/typec/Kconfig @@ -53,6 +53,7 @@ source "drivers/usb/typec/ucsi/Kconfig" config TYPEC_TPS6598X tristate "TI TPS6598x USB Power Delivery controller driver" depends on I2C + select REGMAP_I2C help Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power Delivery controller. diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index a18285a990a8..94a3eda62add 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -205,16 +205,6 @@ static void typec_altmode_put_partner(struct altmode *altmode) put_device(&adev->dev); } -static int typec_port_fwnode_match(struct device *dev, const void *fwnode) -{ - return dev_fwnode(dev) == fwnode; -} - -static int typec_port_name_match(struct device *dev, const void *name) -{ - return !strcmp((const char *)name, dev_name(dev)); -} - static void *typec_port_match(struct device_connection *con, int ep, void *data) { struct device *dev; @@ -224,11 +214,9 @@ static void *typec_port_match(struct device_connection *con, int ep, void *data) * we need to return ERR_PTR(-PROBE_DEFER) when there is no device. */ if (con->fwnode) - return class_find_device(typec_class, NULL, con->fwnode, - typec_port_fwnode_match); + return class_find_device_by_fwnode(typec_class, con->fwnode); - dev = class_find_device(typec_class, NULL, con->endpoint[ep], - typec_port_name_match); + dev = class_find_device_by_name(typec_class, con->endpoint[ep]); return dev ? dev : ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 166b28562395..96562744101c 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1439,7 +1439,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo, else if ((pdo_min_voltage(pdo[i]) == pdo_min_voltage(pdo[i - 1])) && (pdo_max_voltage(pdo[i]) == - pdo_min_voltage(pdo[i - 1]))) + pdo_max_voltage(pdo[i - 1]))) return PDO_ERR_DUPE_PDO; break; /* diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h index 35618ceb2791..d11270560c24 100644 --- a/drivers/usb/usbip/stub.h +++ b/drivers/usb/usbip/stub.h @@ -52,7 +52,11 @@ struct stub_priv { unsigned long seqnum; struct list_head list; struct stub_device *sdev; - struct urb *urb; + struct urb **urbs; + struct scatterlist *sgl; + int num_urbs; + int completed_urbs; + int urb_status; int unlinking; }; @@ -86,6 +90,7 @@ extern struct usb_device_driver stub_driver; struct bus_id_priv *get_busid_priv(const char *busid); void put_busid_priv(struct bus_id_priv *bid); int del_match_busid(char *busid); +void stub_free_priv_and_urb(struct stub_priv *priv); void stub_device_cleanup_urbs(struct stub_device *sdev); /* stub_rx.c */ diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 2e4bfccd4bfc..c1c0bbc9f8b1 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -6,6 +6,7 @@ #include <linux/string.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/scatterlist.h> #include "usbip_common.h" #include "stub.h" @@ -281,13 +282,49 @@ static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead) struct stub_priv *priv, *tmp; list_for_each_entry_safe(priv, tmp, listhead, list) { - list_del(&priv->list); + list_del_init(&priv->list); return priv; } return NULL; } +void stub_free_priv_and_urb(struct stub_priv *priv) +{ + struct urb *urb; + int i; + + for (i = 0; i < priv->num_urbs; i++) { + urb = priv->urbs[i]; + + if (!urb) + return; + + kfree(urb->setup_packet); + urb->setup_packet = NULL; + + + if (urb->transfer_buffer && !priv->sgl) { + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + } + + if (urb->num_sgs) { + sgl_free(urb->sg); + urb->sg = NULL; + urb->num_sgs = 0; + } + + usb_free_urb(urb); + } + if (!list_empty(&priv->list)) + list_del(&priv->list); + if (priv->sgl) + sgl_free(priv->sgl); + kfree(priv->urbs); + kmem_cache_free(stub_priv_cache, priv); +} + static struct stub_priv *stub_priv_pop(struct stub_device *sdev) { unsigned long flags; @@ -314,25 +351,15 @@ done: void stub_device_cleanup_urbs(struct stub_device *sdev) { struct stub_priv *priv; - struct urb *urb; + int i; dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); while ((priv = stub_priv_pop(sdev))) { - urb = priv->urb; - dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", - priv->seqnum); - usb_kill_urb(urb); - - kmem_cache_free(stub_priv_cache, priv); + for (i = 0; i < priv->num_urbs; i++) + usb_kill_urb(priv->urbs[i]); - kfree(urb->transfer_buffer); - urb->transfer_buffer = NULL; - - kfree(urb->setup_packet); - urb->setup_packet = NULL; - - usb_free_urb(urb); + stub_free_priv_and_urb(priv); } } diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index b0a855acafa3..66edfeea68fe 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -7,6 +7,7 @@ #include <linux/kthread.h> #include <linux/usb.h> #include <linux/usb/hcd.h> +#include <linux/scatterlist.h> #include "usbip_common.h" #include "stub.h" @@ -201,7 +202,7 @@ static void tweak_special_requests(struct urb *urb) static int stub_recv_cmd_unlink(struct stub_device *sdev, struct usbip_header *pdu) { - int ret; + int ret, i; unsigned long flags; struct stub_priv *priv; @@ -246,12 +247,14 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, * so a driver in a client host will know the failure * of the unlink request ? */ - ret = usb_unlink_urb(priv->urb); - if (ret != -EINPROGRESS) - dev_err(&priv->urb->dev->dev, - "failed to unlink a urb # %lu, ret %d\n", - priv->seqnum, ret); - + for (i = priv->completed_urbs; i < priv->num_urbs; i++) { + ret = usb_unlink_urb(priv->urbs[i]); + if (ret != -EINPROGRESS) + dev_err(&priv->urbs[i]->dev->dev, + "failed to unlink %d/%d urb of seqnum %lu, ret %d\n", + i + 1, priv->num_urbs, + priv->seqnum, ret); + } return 0; } @@ -433,14 +436,36 @@ static void masking_bogus_flags(struct urb *urb) urb->transfer_flags &= allowed; } +static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv) +{ + int ret; + int i; + + for (i = 0; i < priv->num_urbs; i++) { + ret = usbip_recv_xbuff(ud, priv->urbs[i]); + if (ret < 0) + break; + } + + return ret; +} + static void stub_recv_cmd_submit(struct stub_device *sdev, struct usbip_header *pdu) { - int ret; struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = sdev->udev; + struct scatterlist *sgl = NULL, *sg; + void *buffer = NULL; + unsigned long long buf_len; + int nents; + int num_urbs = 1; int pipe = get_pipe(sdev, pdu); + int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG; + int support_sg = 1; + int np = 0; + int ret, i; if (pipe == -1) return; @@ -449,76 +474,139 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, if (!priv) return; - /* setup a urb */ - if (usb_pipeisoc(pipe)) - priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets, - GFP_KERNEL); - else - priv->urb = usb_alloc_urb(0, GFP_KERNEL); + buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length; - if (!priv->urb) { - usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); - return; + /* allocate urb transfer buffer, if needed */ + if (buf_len) { + if (use_sg) { + sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents); + if (!sgl) + goto err_malloc; + } else { + buffer = kzalloc(buf_len, GFP_KERNEL); + if (!buffer) + goto err_malloc; + } } - /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0) { - priv->urb->transfer_buffer = - kzalloc(pdu->u.cmd_submit.transfer_buffer_length, - GFP_KERNEL); - if (!priv->urb->transfer_buffer) { + /* Check if the server's HCD supports SG */ + if (use_sg && !udev->bus->sg_tablesize) { + /* + * If the server's HCD doesn't support SG, break a single SG + * request into several URBs and map each SG list entry to + * corresponding URB buffer. The previously allocated SG + * list is stored in priv->sgl (If the server's HCD support SG, + * SG list is stored only in urb->sg) and it is used as an + * indicator that the server split single SG request into + * several URBs. Later, priv->sgl is used by stub_complete() and + * stub_send_ret_submit() to reassemble the divied URBs. + */ + support_sg = 0; + num_urbs = nents; + priv->completed_urbs = 0; + pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG; + } + + /* allocate urb array */ + priv->num_urbs = num_urbs; + priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL); + if (!priv->urbs) + goto err_urbs; + + /* setup a urb */ + if (support_sg) { + if (usb_pipeisoc(pipe)) + np = pdu->u.cmd_submit.number_of_packets; + + priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL); + if (!priv->urbs[0]) + goto err_urb; + + if (buf_len) { + if (use_sg) { + priv->urbs[0]->sg = sgl; + priv->urbs[0]->num_sgs = nents; + priv->urbs[0]->transfer_buffer = NULL; + } else { + priv->urbs[0]->transfer_buffer = buffer; + } + } + + /* copy urb setup packet */ + priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, + 8, GFP_KERNEL); + if (!priv->urbs[0]->setup_packet) { usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } - } - /* copy urb setup packet */ - priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8, - GFP_KERNEL); - if (!priv->urb->setup_packet) { - dev_err(&udev->dev, "allocate setup_packet\n"); - usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); - return; + usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0); + } else { + for_each_sg(sgl, sg, nents, i) { + priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); + /* The URBs which is previously allocated will be freed + * in stub_device_cleanup_urbs() if error occurs. + */ + if (!priv->urbs[i]) + goto err_urb; + + usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0); + priv->urbs[i]->transfer_buffer = sg_virt(sg); + priv->urbs[i]->transfer_buffer_length = sg->length; + } + priv->sgl = sgl; } - /* set other members from the base header of pdu */ - priv->urb->context = (void *) priv; - priv->urb->dev = udev; - priv->urb->pipe = pipe; - priv->urb->complete = stub_complete; + for (i = 0; i < num_urbs; i++) { + /* set other members from the base header of pdu */ + priv->urbs[i]->context = (void *) priv; + priv->urbs[i]->dev = udev; + priv->urbs[i]->pipe = pipe; + priv->urbs[i]->complete = stub_complete; - usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0); + /* no need to submit an intercepted request, but harmless? */ + tweak_special_requests(priv->urbs[i]); + masking_bogus_flags(priv->urbs[i]); + } - if (usbip_recv_xbuff(ud, priv->urb) < 0) + if (stub_recv_xbuff(ud, priv) < 0) return; - if (usbip_recv_iso(ud, priv->urb) < 0) + if (usbip_recv_iso(ud, priv->urbs[0]) < 0) return; - /* no need to submit an intercepted request, but harmless? */ - tweak_special_requests(priv->urb); - - masking_bogus_flags(priv->urb); /* urb is now ready to submit */ - ret = usb_submit_urb(priv->urb, GFP_KERNEL); - - if (ret == 0) - usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", - pdu->base.seqnum); - else { - dev_err(&udev->dev, "submit_urb error, %d\n", ret); - usbip_dump_header(pdu); - usbip_dump_urb(priv->urb); - - /* - * Pessimistic. - * This connection will be discarded. - */ - usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); + for (i = 0; i < priv->num_urbs; i++) { + ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL); + + if (ret == 0) + usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", + pdu->base.seqnum); + else { + dev_err(&udev->dev, "submit_urb error, %d\n", ret); + usbip_dump_header(pdu); + usbip_dump_urb(priv->urbs[i]); + + /* + * Pessimistic. + * This connection will be discarded. + */ + usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); + break; + } } usbip_dbg_stub_rx("Leave\n"); + return; + +err_urb: + kfree(priv->urbs); +err_urbs: + kfree(buffer); + sgl_free(sgl); +err_malloc: + usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); } /* recv a pdu */ diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index f0ec41a50cbc..36010a82b359 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -5,25 +5,11 @@ #include <linux/kthread.h> #include <linux/socket.h> +#include <linux/scatterlist.h> #include "usbip_common.h" #include "stub.h" -static void stub_free_priv_and_urb(struct stub_priv *priv) -{ - struct urb *urb = priv->urb; - - kfree(urb->setup_packet); - urb->setup_packet = NULL; - - kfree(urb->transfer_buffer); - urb->transfer_buffer = NULL; - - list_del(&priv->list); - kmem_cache_free(stub_priv_cache, priv); - usb_free_urb(urb); -} - /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */ void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum, __u32 status) @@ -85,6 +71,22 @@ void stub_complete(struct urb *urb) break; } + /* + * If the server breaks single SG request into the several URBs, the + * URBs must be reassembled before sending completed URB to the vhci. + * Don't wake up the tx thread until all the URBs are completed. + */ + if (priv->sgl) { + priv->completed_urbs++; + + /* Only save the first error status */ + if (urb->status && !priv->urb_status) + priv->urb_status = urb->status; + + if (priv->completed_urbs < priv->num_urbs) + return; + } + /* link a urb to the queue of tx. */ spin_lock_irqsave(&sdev->priv_lock, flags); if (sdev->ud.tcp_socket == NULL) { @@ -156,18 +158,22 @@ static int stub_send_ret_submit(struct stub_device *sdev) size_t total_size = 0; while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { - int ret; - struct urb *urb = priv->urb; + struct urb *urb = priv->urbs[0]; struct usbip_header pdu_header; struct usbip_iso_packet_descriptor *iso_buffer = NULL; struct kvec *iov = NULL; + struct scatterlist *sg; + u32 actual_length = 0; int iovnum = 0; + int ret; + int i; txsize = 0; memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); - if (urb->actual_length > 0 && !urb->transfer_buffer) { + if (urb->actual_length > 0 && !urb->transfer_buffer && + !urb->num_sgs) { dev_err(&sdev->udev->dev, "urb: actual_length %d transfer_buffer null\n", urb->actual_length); @@ -176,6 +182,11 @@ static int stub_send_ret_submit(struct stub_device *sdev) if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) iovnum = 2 + urb->number_of_packets; + else if (usb_pipein(urb->pipe) && urb->actual_length > 0 && + urb->num_sgs) + iovnum = 1 + urb->num_sgs; + else if (usb_pipein(urb->pipe) && priv->sgl) + iovnum = 1 + priv->num_urbs; else iovnum = 2; @@ -192,6 +203,15 @@ static int stub_send_ret_submit(struct stub_device *sdev) setup_ret_submit_pdu(&pdu_header, urb); usbip_dbg_stub_tx("setup txdata seqnum: %d\n", pdu_header.base.seqnum); + + if (priv->sgl) { + for (i = 0; i < priv->num_urbs; i++) + actual_length += priv->urbs[i]->actual_length; + + pdu_header.u.ret_submit.status = priv->urb_status; + pdu_header.u.ret_submit.actual_length = actual_length; + } + usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; @@ -200,12 +220,47 @@ static int stub_send_ret_submit(struct stub_device *sdev) txsize += sizeof(pdu_header); /* 2. setup transfer buffer */ - if (usb_pipein(urb->pipe) && + if (usb_pipein(urb->pipe) && priv->sgl) { + /* If the server split a single SG request into several + * URBs because the server's HCD doesn't support SG, + * reassemble the split URB buffers into a single + * return command. + */ + for (i = 0; i < priv->num_urbs; i++) { + iov[iovnum].iov_base = + priv->urbs[i]->transfer_buffer; + iov[iovnum].iov_len = + priv->urbs[i]->actual_length; + iovnum++; + } + txsize += actual_length; + } else if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && urb->actual_length > 0) { - iov[iovnum].iov_base = urb->transfer_buffer; - iov[iovnum].iov_len = urb->actual_length; - iovnum++; + if (urb->num_sgs) { + unsigned int copy = urb->actual_length; + int size; + + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + if (copy == 0) + break; + + if (copy < sg->length) + size = copy; + else + size = sg->length; + + iov[iovnum].iov_base = sg_virt(sg); + iov[iovnum].iov_len = size; + + iovnum++; + copy -= size; + } + } else { + iov[iovnum].iov_base = urb->transfer_buffer; + iov[iovnum].iov_len = urb->actual_length; + iovnum++; + } txsize += urb->actual_length; } else if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 45da3e01c7b0..6532d68e8808 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -680,8 +680,12 @@ EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { - int ret; + struct scatterlist *sg; + int ret = 0; + int recv; int size; + int copy; + int i; if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) { /* the direction of urb must be OUT. */ @@ -701,29 +705,48 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) if (!(size > 0)) return 0; - if (size > urb->transfer_buffer_length) { + if (size > urb->transfer_buffer_length) /* should not happen, probably malicious packet */ - if (ud->side == USBIP_STUB) { - usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); - return 0; - } else { - usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); - return -EPIPE; - } - } + goto error; - ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); - if (ret != size) { - dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); - if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) { - usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); - } else { - usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); - return -EPIPE; + if (urb->num_sgs) { + copy = size; + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + int recv_size; + + if (copy < sg->length) + recv_size = copy; + else + recv_size = sg->length; + + recv = usbip_recv(ud->tcp_socket, sg_virt(sg), + recv_size); + + if (recv != recv_size) + goto error; + + copy -= recv; + ret += recv; } + + if (ret != size) + goto error; + } else { + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); + if (ret != size) + goto error; } return ret; + +error: + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); + if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); + else + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + + return -EPIPE; } EXPORT_SYMBOL_GPL(usbip_recv_xbuff); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 000ab7225717..585a84d319bd 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -697,7 +697,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag } vdev = &vhci_hcd->vdev[portnum-1]; - if (!urb->transfer_buffer && urb->transfer_buffer_length) { + if (!urb->transfer_buffer && !urb->num_sgs && + urb->transfer_buffer_length) { dev_dbg(dev, "Null URB transfer buffer\n"); return -EINVAL; } @@ -1143,6 +1144,15 @@ static int vhci_setup(struct usb_hcd *hcd) hcd->speed = HCD_USB3; hcd->self.root_hub->speed = USB_SPEED_SUPER; } + + /* + * Support SG. + * sg_tablesize is an arbitrary value to alleviate memory pressure + * on the host. + */ + hcd->self.sg_tablesize = 32; + hcd->self.no_sg_constraint = 1; + return 0; } diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 44cd64518925..33f8972ba842 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -90,6 +90,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); + if (urb->num_sgs) + urb->transfer_flags &= ~URB_DMA_MAP_SG; + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 2fa26d0578d7..c3803785f6ef 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -5,6 +5,7 @@ #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/scatterlist.h> #include "usbip_common.h" #include "vhci.h" @@ -50,19 +51,23 @@ static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev) static int vhci_send_cmd_submit(struct vhci_device *vdev) { + struct usbip_iso_packet_descriptor *iso_buffer = NULL; struct vhci_priv *priv = NULL; + struct scatterlist *sg; struct msghdr msg; - struct kvec iov[3]; + struct kvec *iov; size_t txsize; size_t total_size = 0; + int iovnum; + int err = -ENOMEM; + int i; while ((priv = dequeue_from_priv_tx(vdev)) != NULL) { int ret; struct urb *urb = priv->urb; struct usbip_header pdu_header; - struct usbip_iso_packet_descriptor *iso_buffer = NULL; txsize = 0; memset(&pdu_header, 0, sizeof(pdu_header)); @@ -72,18 +77,45 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", priv->seqnum); + if (urb->num_sgs && usb_pipeout(urb->pipe)) + iovnum = 2 + urb->num_sgs; + else + iovnum = 3; + + iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); + if (!iov) { + usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC); + return -ENOMEM; + } + + if (urb->num_sgs) + urb->transfer_flags |= URB_DMA_MAP_SG; + /* 1. setup usbip_header */ setup_cmd_submit_pdu(&pdu_header, urb); usbip_header_correct_endian(&pdu_header, 1); + iovnum = 0; - iov[0].iov_base = &pdu_header; - iov[0].iov_len = sizeof(pdu_header); + iov[iovnum].iov_base = &pdu_header; + iov[iovnum].iov_len = sizeof(pdu_header); txsize += sizeof(pdu_header); + iovnum++; /* 2. setup transfer buffer */ if (!usb_pipein(urb->pipe) && urb->transfer_buffer_length > 0) { - iov[1].iov_base = urb->transfer_buffer; - iov[1].iov_len = urb->transfer_buffer_length; + if (urb->num_sgs && + !usb_endpoint_xfer_isoc(&urb->ep->desc)) { + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + iov[iovnum].iov_base = sg_virt(sg); + iov[iovnum].iov_len = sg->length; + iovnum++; + } + } else { + iov[iovnum].iov_base = urb->transfer_buffer; + iov[iovnum].iov_len = + urb->transfer_buffer_length; + iovnum++; + } txsize += urb->transfer_buffer_length; } @@ -95,23 +127,26 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) if (!iso_buffer) { usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC); - return -1; + goto err_iso_buffer; } - iov[2].iov_base = iso_buffer; - iov[2].iov_len = len; + iov[iovnum].iov_base = iso_buffer; + iov[iovnum].iov_len = len; + iovnum++; txsize += len; } - ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, 3, txsize); + ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, iovnum, + txsize); if (ret != txsize) { pr_err("sendmsg failed!, ret=%d for %zd\n", ret, txsize); - kfree(iso_buffer); usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP); - return -1; + err = -EPIPE; + goto err_tx; } + kfree(iov); kfree(iso_buffer); usbip_dbg_vhci_tx("send txdata\n"); @@ -119,6 +154,13 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) } return total_size; + +err_tx: + kfree(iso_buffer); +err_iso_buffer: + kfree(iov); + + return err; } static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev) diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c index 92f23e3bc27a..7cacae5a8797 100644 --- a/drivers/video/fbdev/acornfb.c +++ b/drivers/video/fbdev/acornfb.c @@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt) case 'M': case 'm': size *= 1024; + /* Fall through */ case 'K': case 'k': size *= 1024; diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index 4eacfb1ce1ac..eb729d704836 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c @@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, soft_margin = new_margin; reload = soft_margin * (mem_fclk_21285 / 256); watchdog_ping(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: ret = put_user(soft_margin, int_arg); break; |