diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-11 10:55:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-11 10:55:49 -0700 |
commit | 237f83dfbe668443b5e31c3c7576125871cca674 (patch) | |
tree | 11848a8d0aa414a1d3ce2024e181071b1d9dea08 /drivers/net/ethernet/cadence/macb_main.c | |
parent | 8f6ccf6159aed1f04c6d179f61f6fb2691261e84 (diff) | |
parent | 1ff2f0fa450ea4e4f87793d9ed513098ec6e12be (diff) | |
download | kernel_replicant_linux-237f83dfbe668443b5e31c3c7576125871cca674.tar.gz kernel_replicant_linux-237f83dfbe668443b5e31c3c7576125871cca674.tar.bz2 kernel_replicant_linux-237f83dfbe668443b5e31c3c7576125871cca674.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Some highlights from this development cycle:
1) Big refactoring of ipv6 route and neigh handling to support
nexthop objects configurable as units from userspace. From David
Ahern.
2) Convert explored_states in BPF verifier into a hash table,
significantly decreased state held for programs with bpf2bpf
calls, from Alexei Starovoitov.
3) Implement bpf_send_signal() helper, from Yonghong Song.
4) Various classifier enhancements to mvpp2 driver, from Maxime
Chevallier.
5) Add aRFS support to hns3 driver, from Jian Shen.
6) Fix use after free in inet frags by allocating fqdirs dynamically
and reworking how rhashtable dismantle occurs, from Eric Dumazet.
7) Add act_ctinfo packet classifier action, from Kevin
Darbyshire-Bryant.
8) Add TFO key backup infrastructure, from Jason Baron.
9) Remove several old and unused ISDN drivers, from Arnd Bergmann.
10) Add devlink notifications for flash update status to mlxsw driver,
from Jiri Pirko.
11) Lots of kTLS offload infrastructure fixes, from Jakub Kicinski.
12) Add support for mv88e6250 DSA chips, from Rasmus Villemoes.
13) Various enhancements to ipv6 flow label handling, from Eric
Dumazet and Willem de Bruijn.
14) Support TLS offload in nfp driver, from Jakub Kicinski, Dirk van
der Merwe, and others.
15) Various improvements to axienet driver including converting it to
phylink, from Robert Hancock.
16) Add PTP support to sja1105 DSA driver, from Vladimir Oltean.
17) Add mqprio qdisc offload support to dpaa2-eth, from Ioana
Radulescu.
18) Add devlink health reporting to mlx5, from Moshe Shemesh.
19) Convert stmmac over to phylink, from Jose Abreu.
20) Add PTP PHC (Physical Hardware Clock) support to mlxsw, from
Shalom Toledo.
21) Add nftables SYNPROXY support, from Fernando Fernandez Mancera.
22) Convert tcp_fastopen over to use SipHash, from Ard Biesheuvel.
23) Track spill/fill of constants in BPF verifier, from Alexei
Starovoitov.
24) Support bounded loops in BPF, from Alexei Starovoitov.
25) Various page_pool API fixes and improvements, from Jesper Dangaard
Brouer.
26) Just like ipv4, support ref-countless ipv6 route handling. From
Wei Wang.
27) Support VLAN offloading in aquantia driver, from Igor Russkikh.
28) Add AF_XDP zero-copy support to mlx5, from Maxim Mikityanskiy.
29) Add flower GRE encap/decap support to nfp driver, from Pieter
Jansen van Vuuren.
30) Protect against stack overflow when using act_mirred, from John
Hurley.
31) Allow devmap map lookups from eBPF, from Toke Høiland-Jørgensen.
32) Use page_pool API in netsec driver, Ilias Apalodimas.
33) Add Google gve network driver, from Catherine Sullivan.
34) More indirect call avoidance, from Paolo Abeni.
35) Add kTLS TX HW offload support to mlx5, from Tariq Toukan.
36) Add XDP_REDIRECT support to bnxt_en, from Andy Gospodarek.
37) Add MPLS manipulation actions to TC, from John Hurley.
38) Add sending a packet to connection tracking from TC actions, and
then allow flower classifier matching on conntrack state. From
Paul Blakey.
39) Netfilter hw offload support, from Pablo Neira Ayuso"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2080 commits)
net/mlx5e: Return in default case statement in tx_post_resync_params
mlx5: Return -EINVAL when WARN_ON_ONCE triggers in mlx5e_tls_resync().
net: dsa: add support for BRIDGE_MROUTER attribute
pkt_sched: Include const.h
net: netsec: remove static declaration for netsec_set_tx_de()
net: netsec: remove superfluous if statement
netfilter: nf_tables: add hardware offload support
net: flow_offload: rename tc_cls_flower_offload to flow_cls_offload
net: flow_offload: add flow_block_cb_is_busy() and use it
net: sched: remove tcf block API
drivers: net: use flow block API
net: sched: use flow block API
net: flow_offload: add flow_block_cb_{priv, incref, decref}()
net: flow_offload: add list handling functions
net: flow_offload: add flow_block_cb_alloc() and flow_block_cb_free()
net: flow_offload: rename TCF_BLOCK_BINDER_TYPE_* to FLOW_BLOCK_BINDER_TYPE_*
net: flow_offload: rename TC_BLOCK_{UN}BIND to FLOW_BLOCK_{UN}BIND
net: flow_offload: add flow_block_cb_setup_simple()
net: hisilicon: Add an tx_desc to adapt HI13X1_GMAC
net: hisilicon: Add an rx_desc to adapt HI13X1_GMAC
...
Diffstat (limited to 'drivers/net/ethernet/cadence/macb_main.c')
-rw-r--r-- | drivers/net/ethernet/cadence/macb_main.c | 143 |
1 files changed, 134 insertions, 9 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 262a28ff81fc..5ca17e62dc3e 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/crc32.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -37,6 +38,13 @@ #include <linux/pm_runtime.h> #include "macb.h" +/* This structure is only used for MACB on SiFive FU540 devices */ +struct sifive_fu540_macb_mgmt { + void __iomem *reg; + unsigned long rate; + struct clk_hw hw; +}; + #define MACB_RX_BUFFER_SIZE 128 #define RX_BUFFER_MULTIPLE 64 /* bytes */ @@ -981,7 +989,8 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, */ } -static int gem_rx(struct macb_queue *queue, int budget) +static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, + int budget) { struct macb *bp = queue->bp; unsigned int len; @@ -1063,7 +1072,7 @@ static int gem_rx(struct macb_queue *queue, int budget) skb->data, 32, true); #endif - netif_receive_skb(skb); + napi_gro_receive(napi, skb); } gem_rx_refill(queue); @@ -1071,8 +1080,8 @@ static int gem_rx(struct macb_queue *queue, int budget) return count; } -static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, - unsigned int last_frag) +static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, + unsigned int first_frag, unsigned int last_frag) { unsigned int len; unsigned int frag; @@ -1148,7 +1157,7 @@ static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, bp->dev->stats.rx_bytes += skb->len; netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", skb->len, skb->csum); - netif_receive_skb(skb); + napi_gro_receive(napi, skb); return 0; } @@ -1171,7 +1180,8 @@ static inline void macb_init_rx_ring(struct macb_queue *queue) queue->rx_tail = 0; } -static int macb_rx(struct macb_queue *queue, int budget) +static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, + int budget) { struct macb *bp = queue->bp; bool reset_rx_queue = false; @@ -1208,7 +1218,7 @@ static int macb_rx(struct macb_queue *queue, int budget) continue; } - dropped = macb_rx_frame(queue, first_frag, tail); + dropped = macb_rx_frame(queue, napi, first_frag, tail); first_frag = -1; if (unlikely(dropped < 0)) { reset_rx_queue = true; @@ -1262,7 +1272,7 @@ static int macb_poll(struct napi_struct *napi, int budget) netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); - work_done = bp->macbgem_ops.mog_rx(queue, budget); + work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); if (work_done < budget) { napi_complete_done(napi, work_done); @@ -3477,7 +3487,7 @@ static int macb_init(struct platform_device *pdev) queue = &bp->queues[q]; queue->bp = bp; - netif_napi_add(dev, &queue->napi, macb_poll, 64); + netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); if (hw_q) { queue->ISR = GEM_ISR(hw_q - 1); queue->IER = GEM_IER(hw_q - 1); @@ -3616,6 +3626,8 @@ static int macb_init(struct platform_device *pdev) /* max number of receive buffers */ #define AT91ETHER_MAX_RX_DESCR 9 +static struct sifive_fu540_macb_mgmt *mgmt; + /* Initialize and start the Receiver and Transmit subsystems */ static int at91ether_start(struct net_device *dev) { @@ -3943,6 +3955,116 @@ static int at91ether_init(struct platform_device *pdev) return 0; } +static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return mgmt->rate; +} + +static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + if (WARN_ON(rate < 2500000)) + return 2500000; + else if (rate == 2500000) + return 2500000; + else if (WARN_ON(rate < 13750000)) + return 2500000; + else if (WARN_ON(rate < 25000000)) + return 25000000; + else if (rate == 25000000) + return 25000000; + else if (WARN_ON(rate < 75000000)) + return 25000000; + else if (WARN_ON(rate < 125000000)) + return 125000000; + else if (rate == 125000000) + return 125000000; + + WARN_ON(rate > 125000000); + + return 125000000; +} + +static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); + if (rate != 125000000) + iowrite32(1, mgmt->reg); + else + iowrite32(0, mgmt->reg); + mgmt->rate = rate; + + return 0; +} + +static const struct clk_ops fu540_c000_ops = { + .recalc_rate = fu540_macb_tx_recalc_rate, + .round_rate = fu540_macb_tx_round_rate, + .set_rate = fu540_macb_tx_set_rate, +}; + +static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk, struct clk **tsu_clk) +{ + struct clk_init_data init; + int err = 0; + + err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); + if (err) + return err; + + mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); + if (!mgmt) + return -ENOMEM; + + init.name = "sifive-gemgxl-mgmt"; + init.ops = &fu540_c000_ops; + init.flags = 0; + init.num_parents = 0; + + mgmt->rate = 0; + mgmt->hw.init = &init; + + *tx_clk = clk_register(NULL, &mgmt->hw); + if (IS_ERR(*tx_clk)) + return PTR_ERR(*tx_clk); + + err = clk_prepare_enable(*tx_clk); + if (err) + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + else + dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); + + return 0; +} + +static int fu540_c000_init(struct platform_device *pdev) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return -ENODEV; + + mgmt->reg = ioremap(res->start, resource_size(res)); + if (!mgmt->reg) + return -ENOMEM; + + return macb_init(pdev); +} + +static const struct macb_config fu540_c000_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | + MACB_CAPS_GEM_HAS_PTP, + .dma_burst_length = 16, + .clk_init = fu540_c000_clk_init, + .init = fu540_c000_init, + .jumbo_max_len = 10240, +}; + static const struct macb_config at91sam9260_config = { .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, @@ -4032,6 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,emac", .data = &emac_config }, { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, + { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); @@ -4239,6 +4362,7 @@ err_out_free_netdev: err_disable_clocks: clk_disable_unprepare(tx_clk); + clk_unregister(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); @@ -4273,6 +4397,7 @@ static int macb_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { clk_disable_unprepare(bp->tx_clk); + clk_unregister(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); |