/************************************************************************************
* I.MX6 千兆网卡设置跟踪
* 说明:
* 设备只能识别到百兆网卡,跟一下源代码,找一下原因,结果是默认被注释了。
* 2017-4-5 深圳 南山平山村 曾剑锋
***********************************************************************************/
// 1. Linux内核网络部分控制流
// http://longerzone.blog.51cto.com/6081195/1154807
// 2. linux 内核网络,数据发送流程图
// http://blog.csdn.net/echoisland/article/details/6993756
DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)")
/*
* i.MX6Q/DL maps system memory at 0x10000000 (offset 256MiB), and
* GPU has a limit on physical address that it accesses, which must
* be below 2GiB.
*/
.dma_zone_size = (SZ_2G - SZ_256M),
.smp = smp_ops(imx_smp_ops),
.map_io = imx6q_map_io,
.init_irq = imx6q_init_irq,
.init_machine = imx6q_init_machine, ----------------+
.init_late = imx6q_init_late, |
.dt_compat = imx6q_dt_compat, |
.reserve = imx6q_reserve, |
.restart = mxc_restart, |
MACHINE_END |
|
static void __init imx6q_init_machine(void) <-----------+
{
struct device *parent;
if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_2_0)
imx_print_silicon_rev("i.MX6QP", IMX_CHIP_REVISION_1_0);
else
imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q",
imx_get_soc_revision());
mxc_arch_reset_init_dt();
parent = imx_soc_device_init();
if (parent == NULL)
pr_warn("failed to initialize soc device\n");
of_platform_populate(NULL, of_default_bus_match_table,
imx6q_auxdata_lookup, parent);
imx6q_enet_init(); ------------+
imx_anatop_init(); |
imx6q_csi_mux_init(); |
cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init(); |
imx6q_mini_pcie_init(); |
} |
|
static inline void imx6q_enet_init(void) <-----------+
{
imx6_enet_mac_init("fsl,imx6q-fec"); ---------------------------+---------------------+
imx6q_enet_phy_init(); ---------------------------*-------------------+ |
imx6q_1588_init(); | | |
if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_2_0) | | |
imx6q_enet_clk_sel(); | | |
imx6q_enet_plt_init(); | | |
} | | |
| | |
void __init imx6_enet_mac_init(const char *compatible) <-------------+ | |
{ | |
struct device_node *ocotp_np, *enet_np, *from = NULL; | |
void __iomem *base; | |
struct property *newmac; | |
u32 macaddr_low; | |
u32 macaddr_high = 0; | |
u32 macaddr1_high = 0; | |
u8 *macaddr; | |
int i; | |
| |
for (i = 0; i < 2; i++) { | |
enet_np = of_find_compatible_node(from, NULL, compatible); | |
if (!enet_np) | |
return; | |
| |
from = enet_np; | |
| |
if (of_get_mac_address(enet_np)) | |
goto put_enet_node; | |
| |
ocotp_np = of_find_compatible_node(NULL, --------+ | |
NULL, "fsl,imx6q-ocotp"); | | |
if (!ocotp_np) { | | |
pr_warn("failed to find ocotp node\n"); | | |
goto put_enet_node; | | |
} | | |
| | |
base = of_iomap(ocotp_np, 0); | | |
if (!base) { | | |
pr_warn("failed to map ocotp\n"); | | |
goto put_ocotp_node; | | |
} | | |
| | |
macaddr_low = readl_relaxed(base + OCOTP_MACn(1)); | | |
if (i) | | |
macaddr1_high = readl_relaxed(base + OCOTP_MACn(2)); | | |
else | | |
macaddr_high = readl_relaxed(base + OCOTP_MACn(0)); | | |
| | |
newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL); | | |
if (!newmac) | | |
goto put_ocotp_node; | | |
| | |
newmac->value = newmac + 1; | | |
newmac->length = 6; | | |
newmac->name = kstrdup("local-mac-address", GFP_KERNEL); | | |
if (!newmac->name) { | | |
kfree(newmac); | | |
goto put_ocotp_node; | | |
} | | |
| | |
macaddr = newmac->value; | | |
if (i) { | | |
macaddr[5] = (macaddr_low >> 16) & 0xff; | | |
macaddr[4] = (macaddr_low >> 24) & 0xff; | | |
macaddr[3] = macaddr1_high & 0xff; | | |
macaddr[2] = (macaddr1_high >> 8) & 0xff; | | |
macaddr[1] = (macaddr1_high >> 16) & 0xff; | | |
macaddr[0] = (macaddr1_high >> 24) & 0xff; | | |
} else { | | |
macaddr[5] = macaddr_high & 0xff; | | |
macaddr[4] = (macaddr_high >> 8) & 0xff; | | |
macaddr[3] = (macaddr_high >> 16) & 0xff; | | |
macaddr[2] = (macaddr_high >> 24) & 0xff; | | |
macaddr[1] = macaddr_low & 0xff; | | |
macaddr[0] = (macaddr_low >> 8) & 0xff; | | |
} | | |
| | |
of_update_property(enet_np, newmac); | | |
| | |
put_ocotp_node: | | |
of_node_put(ocotp_np); | | |
put_enet_node: | | |
of_node_put(enet_np); | | |
} | | |
} +---------------------------------------+ | |
V | |
struct device_node *of_find_compatible_node(struct device_node *from, | |
const char *type, const char *compatible) | |
{ | |
struct device_node *np; | |
unsigned long flags; | |
| |
raw_spin_lock_irqsave(&devtree_lock, flags); | |
np = from ? from->allnext : of_allnodes; -------------------+ | |
for (; np; np = np->allnext) { | | |
if (__of_device_is_compatible(np, compatible, type, NULL) && | | |
of_node_get(np)) | | |
break; | | |
} | | |
of_node_put(from); | | |
raw_spin_unlock_irqrestore(&devtree_lock, flags); | | |
return np; | | |
} | | |
EXPORT_SYMBOL(of_find_compatible_node); | | |
| | |
struct device_node *of_allnodes; <---------------------------+ | |
EXPORT_SYMBOL(of_allnodes); ------------+ | |
| | |
void __init unflatten_device_tree(void) | | |
{ V | |
__unflatten_device_tree(initial_boot_params, &of_allnodes, ---------+ | |
early_init_dt_alloc_memory_arch); ---------*-+ | |
| | | |
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ | | | |
of_alias_scan(early_init_dt_alloc_memory_arch); ---------*-*-+ | |
} | | | | |
| | | | |
static void __unflatten_device_tree(struct boot_param_header *blob, <-----+ | | | |
struct device_node **mynodes, | | | |
void * (*dt_alloc)(u64 size, u64 align)) | | | |
{ | | | |
unsigned long size; | | | |
int start; | | | |
void *mem; | | | |
struct device_node **allnextp = mynodes; | | | |
| | | |
pr_debug(" -> unflatten_device_tree()\n"); | | | |
| | | |
if (!blob) { | | | |
pr_debug("No device tree pointer\n"); | | | |
return; | | | |
} | | | |
| | | |
pr_debug("Unflattening device tree:\n"); | | | |
pr_debug("magic: %08x\n", be32_to_cpu(blob->magic)); | | | |
pr_debug("size: %08x\n", be32_to_cpu(blob->totalsize)); | | | |
pr_debug("version: %08x\n", be32_to_cpu(blob->version)); | | | |
| | | |
if (be32_to_cpu(blob->magic) != OF_DT_HEADER) { | | | |
pr_err("Invalid device tree blob header\n"); | | | |
return; | | | |
} | | | |
| | | |
/* First pass, scan for size */ | | | |
start = 0; | | | |
size = (unsigned long)unflatten_dt_node(blob, 0, &start, NULL, NULL, 0); | | | |
size = ALIGN(size, 4); | | | |
| | | |
pr_debug(" size is %lx, allocating...\n", size); | | | |
| | | |
/* Allocate memory for the expanded device tree */ | | | |
mem = dt_alloc(size + 4, __alignof__(struct device_node)); | | | |
memset(mem, 0, size); | | | |
| | | |
*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); | | | |
| | | |
pr_debug(" unflattening %p...\n", mem); | | | |
| | | |
/* Second pass, do actual unflattening */ | | | |
start = 0; | | | |
unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0); | | | |
if (be32_to_cpup(mem + size) != 0xdeadbeef) | | | |
pr_warning("End of tree marker overwritten: %08x\n", | | | |
be32_to_cpup(mem + size)); | | | |
*allnextp = NULL; | | | |
| | | |
pr_debug(" <- unflatten_device_tree()\n"); | | | |
} | | | |
| | | |
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) <--------+ | | |
{ | | |
return memblock_virt_alloc(size, align); | | |
} | | |
| | |
static inline void * __init memblock_virt_alloc( <---------------+ | |
phys_addr_t size, phys_addr_t align) | |
{ | |
return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, -----+ | |
BOOTMEM_ALLOC_ACCESSIBLE, | | |
NUMA_NO_NODE); | | |
} | | |
| | |
void * __init memblock_virt_alloc_try_nid( <-----------+ | |
phys_addr_t size, phys_addr_t align, | |
phys_addr_t min_addr, phys_addr_t max_addr, | |
int nid) | |
{ | |
void *ptr; | |
| |
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", | |
__func__, (u64)size, (u64)align, nid, (u64)min_addr, | |
(u64)max_addr, (void *)_RET_IP_); | |
ptr = memblock_virt_alloc_internal(size, align, | |
min_addr, max_addr, nid); | |
if (ptr) | |
return ptr; | |
| |
panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",| |
__func__, (u64)size, (u64)align, nid, (u64)min_addr, | |
(u64)max_addr); | |
return NULL; | |
} | |
| |
static void __init imx6q_enet_phy_init(void) <------------------------------------------+ |
{ |
if (IS_BUILTIN(CONFIG_PHYLIB)) { |
phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, |
ksz9021rn_phy_fixup); |
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK, |
ksz9031rn_phy_fixup); |
phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff, |
ar8031_phy_fixup); |
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef, |
ar8035_phy_fixup); |
} |
} |
|
static struct platform_driver fec_driver = { |
.driver = { |
.name = DRIVER_NAME, |
.owner = THIS_MODULE, |
.pm = &fec_pm_ops, |
.of_match_table = fec_dt_ids, |
}, |
.id_table = fec_devtype, -------------+ |
.probe = fec_probe, -------------*------------------+ |
.remove = fec_drv_remove, | | |
}; | | |
| | |
module_platform_driver(fec_driver); | | |
| | |
static struct platform_device_id fec_devtype[] = { <------+ | |
{ | |
/* keep it for coldfire */ | |
.name = DRIVER_NAME, | |
.driver_data = 0, | |
}, { | |
.name = "imx25-fec", | |
.driver_data = FEC_QUIRK_USE_GASKET, | |
}, { | |
.name = "imx27-fec", | |
.driver_data = 0, | |
}, { | |
.name = "imx28-fec", | |
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | |
}, { | |
.name = "imx6q-fec", <----------*-------------------+
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | -----------*------+
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | | |
FEC_QUIRK_BUG_WAITMODE, | |
}, { | |
.name = "mvf600-fec", | |
.driver_data = FEC_QUIRK_ENET_MAC, | |
}, { | |
.name = "imx6sx-fec", | |
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | | |
FEC_QUIRK_ERR007885 | FEC_QUIRK_TKT210590, | |
}, { | |
.name = "imx6ul-fec", | |
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | |
FEC_QUIRK_HAS_VLAN, | |
}, { | |
/* sentinel */ | |
} | |
}; | |
MODULE_DEVICE_TABLE(platform, fec_devtype); | |
| |
static int | |
fec_probe(struct platform_device *pdev) <-----------------------------+ |
{ |
struct fec_enet_private *fep; |
struct fec_platform_data *pdata; |
struct net_device *ndev; |
int i, irq, ret = 0; |
struct resource *r; |
const struct of_device_id *of_id; |
static int dev_id; |
struct device_node *np = pdev->dev.of_node, *phy_node; |
int num_tx_qs; |
int num_rx_qs; |
|
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); |
|
/* Init network device */ |
ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), |
num_tx_qs, num_rx_qs); |
if (!ndev) |
return -ENOMEM; |
|
SET_NETDEV_DEV(ndev, &pdev->dev); |
|
/* setup board info structure */ |
fep = netdev_priv(ndev); |
|
of_id = of_match_device(fec_dt_ids, &pdev->dev); |
if (of_id) |
pdev->id_entry = of_id->data; |
fep->quirks = pdev->id_entry->driver_data; |
|
fep->netdev = ndev; |
fep->num_rx_queues = num_rx_qs; |
fep->num_tx_queues = num_tx_qs; |
|
#if !defined(CONFIG_M5272) |
/* default enable pause frame auto negotiation */ |
if (fep->quirks & FEC_QUIRK_HAS_GBIT) |
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; |
#endif |
|
/* Select default pin state */ |
pinctrl_pm_select_default_state(&pdev->dev); |
|
r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
fep->hwp = devm_ioremap_resource(&pdev->dev, r); |
if (IS_ERR(fep->hwp)) { |
ret = PTR_ERR(fep->hwp); |
goto failed_ioremap; |
} |
|
fep->pdev = pdev; |
fep->dev_id = dev_id++; |
|
platform_set_drvdata(pdev, ndev); |
|
fec_enet_of_parse_stop_mode(pdev); |
|
if (of_get_property(np, "fsl,magic-packet", NULL)) |
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; |
|
phy_node = of_parse_phandle(np, "phy-handle", 0); |
if (!phy_node && of_phy_is_fixed_link(np)) { |
ret = of_phy_register_fixed_link(np); |
if (ret < 0) { |
dev_err(&pdev->dev, |
"broken fixed-link specification\n"); |
goto failed_phy; |
} |
phy_node = of_node_get(np); |
} |
fep->phy_node = phy_node; |
|
ret = of_get_phy_mode(pdev->dev.of_node); |
if (ret < 0) { |
pdata = dev_get_platdata(&pdev->dev); |
if (pdata) |
fep->phy_interface = pdata->phy; |
else |
fep->phy_interface = PHY_INTERFACE_MODE_MII; |
} else { |
fep->phy_interface = ret; |
} |
|
fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); |
if (IS_ERR(fep->clk_ipg)) { |
ret = PTR_ERR(fep->clk_ipg); |
goto failed_clk; |
} |
|
fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); |
if (IS_ERR(fep->clk_ahb)) { |
ret = PTR_ERR(fep->clk_ahb); |
goto failed_clk; |
} |
|
fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); |
|
/* enet_out is optional, depends on board */ |
fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); |
if (IS_ERR(fep->clk_enet_out)) |
fep->clk_enet_out = NULL; |
|
fep->ptp_clk_on = false; |
mutex_init(&fep->ptp_clk_mutex); |
|
/* clk_ref is optional, depends on board */ |
fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); |
if (IS_ERR(fep->clk_ref)) |
fep->clk_ref = NULL; |
|
fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; |
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); |
if (IS_ERR(fep->clk_ptp)) { |
fep->clk_ptp = NULL; |
fep->bufdesc_ex = false; |
} |
|
pm_runtime_enable(&pdev->dev); |
ret = fec_enet_clk_enable(ndev, true); |
if (ret) |
goto failed_clk; |
|
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); |
if (!IS_ERR(fep->reg_phy)) { |
ret = regulator_enable(fep->reg_phy); |
if (ret) { |
dev_err(&pdev->dev, |
"Failed to enable phy regulator: %d\n", ret); |
goto failed_regulator; |
} |
} else { |
fep->reg_phy = NULL; |
} |
|
fec_reset_phy(pdev); |
|
if (fep->bufdesc_ex) |
fec_ptp_init(pdev); |
|
ret = fec_enet_init(ndev); -------------------+ |
if (ret) | |
goto failed_init; | |
| |
for (i = 0; i < FEC_IRQ_NUM; i++) { | |
irq = platform_get_irq(pdev, i); | |
if (irq < 0) { | |
if (i) | |
break; | |
ret = irq; | |
goto failed_irq; | |
} | |
ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, | |
0, pdev->name, ndev); | |
if (ret) | |
goto failed_irq; | |
| |
fep->irq[i] = irq; | |
} | |
| |
ret = of_property_read_u32(np, "fsl,wakeup_irq", &irq); | |
if (!ret && irq < FEC_IRQ_NUM) | |
fep->wake_irq = fep->irq[irq]; | |
else | |
fep->wake_irq = fep->irq[0]; | |
| |
init_completion(&fep->mdio_done); | |
ret = fec_enet_mii_init(pdev); ---------*-------------*-----+
if (ret) | | |
goto failed_mii_init; | | |
| | |
/* Carrier starts down, phylib will bring it up */ | | |
netif_carrier_off(ndev); | | |
fec_enet_clk_enable(ndev, false); | | |
pinctrl_pm_select_sleep_state(&pdev->dev); | | |
| | |
ret = register_netdev(ndev); | | |
if (ret) | | |
goto failed_register; | | |
| | |
device_init_wakeup(&ndev->dev, fep->wol_flag & | | |
FEC_WOL_HAS_MAGIC_PACKET); | | |
| | |
if (fep->bufdesc_ex && fep->ptp_clock) | | |
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); | | |
| | |
fep->rx_copybreak = COPYBREAK_DEFAULT; | | |
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); | | |
return 0; | | |
| | |
failed_register: | | |
fec_enet_mii_remove(fep); | | |
failed_mii_init: | | |
failed_irq: | | |
failed_init: | | |
if (fep->reg_phy) | | |
regulator_disable(fep->reg_phy); | | |
failed_regulator: | | |
fec_enet_clk_enable(ndev, false); | | |
failed_clk: | | |
failed_phy: | | |
of_node_put(phy_node); | | |
failed_ioremap: | | |
free_netdev(ndev); | | |
| | |
return ret; | | |
} | | |
| | |
static int fec_enet_init(struct net_device *ndev) <-----------+ | |
{ | |
struct fec_enet_private *fep = netdev_priv(ndev); | |
struct fec_enet_priv_tx_q *txq; | |
struct fec_enet_priv_rx_q *rxq; | |
struct bufdesc *cbd_base; | |
dma_addr_t bd_dma; | |
int bd_size; | |
unsigned int i; | |
| |
#if defined(CONFIG_ARM) | |
fep->rx_align = 0xf; | |
fep->tx_align = 0xf; | |
#else | |
fep->rx_align = 0x3; | |
fep->tx_align = 0x3; | |
#endif | |
| |
fec_enet_alloc_queue(ndev); | |
| |
if (fep->bufdesc_ex) | |
fep->bufdesc_size = sizeof(struct bufdesc_ex); | |
else | |
fep->bufdesc_size = sizeof(struct bufdesc); | |
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * | |
fep->bufdesc_size; | |
| |
/* Allocate memory for buffer descriptors. */ | |
cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, | |
GFP_KERNEL); | |
if (!cbd_base) { | |
return -ENOMEM; | |
} | |
| |
memset(cbd_base, 0, bd_size); | |
| |
/* Get the Ethernet address */ | |
fec_get_mac(ndev); | |
/* make sure MAC we just acquired is programmed into the hw */ | |
fec_set_mac_address(ndev, NULL); | |
| |
/* Set receive and transmit descriptor base. */ | |
for (i = 0; i < fep->num_rx_queues; i++) { | |
rxq = fep->rx_queue[i]; | |
rxq->index = i; | |
rxq->rx_bd_base = (struct bufdesc *)cbd_base; | |
rxq->bd_dma = bd_dma; | |
if (fep->bufdesc_ex) { | |
bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; | |
cbd_base = (struct bufdesc *) | |
(((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); | |
} else { | |
bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; | |
cbd_base += rxq->rx_ring_size; | |
} | |
} | |
| |
for (i = 0; i < fep->num_tx_queues; i++) { | |
txq = fep->tx_queue[i]; | |
txq->index = i; | |
txq->tx_bd_base = (struct bufdesc *)cbd_base; | |
txq->bd_dma = bd_dma; | |
if (fep->bufdesc_ex) { | |
bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; | |
cbd_base = (struct bufdesc *) | |
(((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); | |
} else { | |
bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; | |
cbd_base += txq->tx_ring_size; | |
} | |
} | |
| |
| |
/* The FEC Ethernet specific entries in the device structure */ | |
ndev->watchdog_timeo = TX_TIMEOUT; | |
ndev->netdev_ops = &fec_netdev_ops; ----------------------+ | |
ndev->ethtool_ops = &fec_enet_ethtool_ops; | | |
| | |
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | | |
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); | | |
| | |
if (fep->quirks & FEC_QUIRK_HAS_VLAN) | | |
/* enable hw VLAN support */ | | |
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; | | |
| | |
if (fep->quirks & FEC_QUIRK_HAS_CSUM) { | | |
ndev->gso_max_segs = FEC_MAX_TSO_SEGS; | | |
| | |
/* enable hw accelerator */ | | |
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); | | |
fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | | |
} | | |
| | |
if (fep->quirks & FEC_QUIRK_HAS_AVB) { | | |
fep->tx_align = 0; | | |
fep->rx_align = 0x3f; | | |
} | | |
| | |
ndev->hw_features = ndev->features; | | |
| | |
fec_restart(ndev); | | |
| | |
return 0; | | |
} | | |
| | |
static const struct net_device_ops fec_netdev_ops = { <-------------+ | |
.ndo_open = fec_enet_open, --------------+ | |
.ndo_stop = fec_enet_close, | | |
.ndo_start_xmit = fec_enet_start_xmit, | | |
.ndo_select_queue = fec_enet_select_queue, | | |
.ndo_set_rx_mode = set_multicast_list, | | |
.ndo_change_mtu = eth_change_mtu, | | |
.ndo_validate_addr = eth_validate_addr, | | |
.ndo_tx_timeout = fec_timeout, | | |
.ndo_set_mac_address = fec_set_mac_address, | | |
.ndo_do_ioctl = fec_enet_ioctl, | | |
#ifdef CONFIG_NET_POLL_CONTROLLER | | |
.ndo_poll_controller = fec_poll_controller, | | |
#endif | | |
.ndo_set_features = fec_set_features, | | |
}; | | |
| | |
static int | | |
fec_enet_open(struct net_device *ndev) <--------------+ | |
{ | |
struct fec_enet_private *fep = netdev_priv(ndev); | |
const struct platform_device_id *id_entry = | |
platform_get_device_id(fep->pdev); | |
int ret; | |
| |
pinctrl_pm_select_default_state(&fep->pdev->dev); | |
ret = fec_enet_clk_enable(ndev, true); | |
if (ret) | |
return ret; | |
| |
/* I should reset the ring buffers here, but I don't yet know | |
* a simple way to do that. | |
*/ | |
| |
ret = fec_enet_alloc_buffers(ndev); | |
if (ret) | |
goto err_enet_alloc; | |
| |
/* Init MAC firstly for suspend/resume with megafix off case */ | |
fec_restart(ndev); | |
| |
/* Probe and connect to PHY when open the interface */ | |
ret = fec_enet_mii_probe(ndev); ---------------------+ | |
if (ret) | | |
goto err_enet_mii_probe; | | |
| | |
napi_enable(&fep->napi); | | |
phy_start(fep->phy_dev); | | |
netif_tx_start_all_queues(ndev); | | |
| | |
pm_runtime_get_sync(ndev->dev.parent); | | |
if ((id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE) && | | |
!fec_enet_irq_workaround(fep)) | | |
pm_qos_add_request(&ndev->pm_qos_req, | | |
PM_QOS_CPU_DMA_LATENCY, | | |
0); | | |
else | | |
pm_qos_add_request(&ndev->pm_qos_req, | | |
PM_QOS_CPU_DMA_LATENCY, | | |
PM_QOS_DEFAULT_VALUE); | | |
| | |
device_set_wakeup_enable(&ndev->dev, fep->wol_flag & | | |
FEC_WOL_FLAG_ENABLE); | | |
fep->miibus_up_failed = false; | | |
| | |
return 0; | | |
| | |
err_enet_mii_probe: | | |
fec_enet_free_buffers(ndev); | | |
err_enet_alloc: | | |
fep->miibus_up_failed = true; | | |
if (!fep->mii_bus_share) | | |
pinctrl_pm_select_sleep_state(&fep->pdev->dev); | | |
return ret; | | |
} | | |
| | |
static int fec_enet_mii_probe(struct net_device *ndev) <----------+ | |
{ | |
struct fec_enet_private *fep = netdev_priv(ndev); | |
struct phy_device *phy_dev = NULL; | |
char mdio_bus_id[MII_BUS_ID_SIZE]; | |
char phy_name[MII_BUS_ID_SIZE + 3]; | |
int phy_id; | |
int dev_id = fep->dev_id; | |
| |
fep->phy_dev = NULL; | |
| |
if (fep->phy_node) { | |
phy_dev = of_phy_connect(ndev, fep->phy_node, | |
&fec_enet_adjust_link, 0, | |
fep->phy_interface); | |
if (!phy_dev) | |
return -ENODEV; | |
} else { | |
/* check for attached phy */ | |
for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { | |
if ((fep->mii_bus->phy_mask & (1 << phy_id))) | |
continue; | |
if (fep->mii_bus->phy_map[phy_id] == NULL) | |
continue; | |
if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) | |
continue; | |
if (dev_id--) | |
continue; | |
strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); | |
break; | |
} | |
| |
if (phy_id >= PHY_MAX_ADDR) { | |
netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); | |
strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); | |
phy_id = 0; | |
} | |
| |
snprintf(phy_name, sizeof(phy_name), | |
PHY_ID_FMT, mdio_bus_id, phy_id); | |
phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, | |
fep->phy_interface); | |
} | |
| |
if (IS_ERR(phy_dev)) { | |
netdev_err(ndev, "could not attach to PHY\n"); | |
return PTR_ERR(phy_dev); | |
} | |
| |
/* mask with MAC supported features */ | |
//if (fep->quirks & FEC_QUIRK_HAS_GBIT) { <--------------------------+ |
//phy_dev->supported &= PHY_GBIT_FEATURES; |
//phy_dev->supported &= ~SUPPORTED_1000baseT_Half; |
//printk("FEC_QUIRK_HAS_GBIT\n"); |
#if !defined(CONFIG_M5272) |
//phy_dev->supported |= SUPPORTED_Pause; |
#endif |
//} |
//else |
{ |
printk("PHY_BASIC_FEATURES\n"); |
phy_dev->supported &= PHY_BASIC_FEATURES; |
} |
phy_dev->advertising = phy_dev->supported; |
|
fep->phy_dev = phy_dev; |
fep->link = 0; |
fep->full_duplex = 0; |
|
netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), |
fep->phy_dev->irq); |
|
return 0; |
} |
|
static int fec_enet_mii_init(struct platform_device *pdev) <------------------+
{
static struct mii_bus *fec0_mii_bus;
static int *fec_mii_bus_share;
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *node;
int err = -ENXIO, i;
u32 mii_speed, holdtime;
/*
* The dual fec interfaces are not equivalent with enet-mac.
* Here are the differences:
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
* - fec0 acts as the 1588 time master while fec1 is slave
* - external phys can only be configured by fec0
*
* That is to say fec1 can not work independently. It only works
* when fec0 is working. The reason behind this design is that the
* second interface is added primarily for Switch mode.
*
* Because of the last point above, both phys are attached on fec0
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
*fec_mii_bus_share = FEC0_MII_BUS_SHARE_TRUE;
mii_cnt++;
return 0;
}
return -ENOENT;
}
fep->mii_timeout = 0;
/*
* Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
*
* The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
* for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
mii_speed--;
if (mii_speed > 63) {
dev_err(&pdev->dev,
"fec clock (%lu) to fast to get right mii speed\n",
clk_get_rate(fep->clk_ipg));
err = -EINVAL;
goto err_out;
}
/*
* The i.MX28 and i.MX6 types have another filed in the MSCR (aka
* MII_SPEED) register that defines the MDIO output hold time. Earlier
* versions are RAZ there, so just ignore the difference and write the
* register always.
* The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
* HOLDTIME + 1 is the number of clk cycles the fec is holding the
* output.
* The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
* Given that ceil(clkrate / 5000000) <= 64, the calculation for
* holdtime cannot result in a value greater than 3.
*/
holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
fep->phy_speed = mii_speed << 1 | holdtime << 8;
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
fep->mii_bus = mdiobus_alloc();
if (fep->mii_bus == NULL) {
err = -ENOMEM;
goto err_out;
}
fep->mii_bus->name = "fec_enet_mii_bus";
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!fep->mii_bus->irq) {
err = -ENOMEM;
goto err_out_free_mdiobus;
}
for (i = 0; i < PHY_MAX_ADDR; i++)
fep->mii_bus->irq[i] = PHY_POLL;
node = of_get_child_by_name(pdev->dev.of_node, "mdio");
if (node) {
err = of_mdiobus_register(fep->mii_bus, node);
of_node_put(node);
} else {
err = mdiobus_register(fep->mii_bus);
}
if (err)
goto err_out_free_mdio_irq;
mii_cnt++;
/* save fec0 mii_bus */
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
fec0_mii_bus = fep->mii_bus;
fec_mii_bus_share = &fep->mii_bus_share;
}
return 0;
err_out_free_mdio_irq:
kfree(fep->mii_bus->irq);
err_out_free_mdiobus:
mdiobus_free(fep->mii_bus);
err_out:
return err;
}