@@ -74,12 +74,6 @@ MODULE_AUTHOR("Intel Corporation");
74
74
#define bar0_off (base , bar ) ((base) + ((bar) << 2))
75
75
#define bar2_off (base , bar ) bar0_off(base, (bar) - 2)
76
76
77
- static const struct intel_ntb_reg atom_reg ;
78
- static const struct intel_ntb_alt_reg atom_pri_reg ;
79
- static const struct intel_ntb_alt_reg atom_sec_reg ;
80
- static const struct intel_ntb_alt_reg atom_b2b_reg ;
81
- static const struct intel_ntb_xlat_reg atom_pri_xlat ;
82
- static const struct intel_ntb_xlat_reg atom_sec_xlat ;
83
77
static const struct intel_ntb_reg xeon_reg ;
84
78
static const struct intel_ntb_alt_reg xeon_pri_reg ;
85
79
static const struct intel_ntb_alt_reg xeon_sec_reg ;
@@ -184,15 +178,6 @@ static inline void _iowrite64(u64 val, void __iomem *mmio)
184
178
#endif
185
179
#endif
186
180
187
- static inline int pdev_is_atom (struct pci_dev * pdev )
188
- {
189
- switch (pdev -> device ) {
190
- case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD :
191
- return 1 ;
192
- }
193
- return 0 ;
194
- }
195
-
196
181
static inline int pdev_is_xeon (struct pci_dev * pdev )
197
182
{
198
183
switch (pdev -> device ) {
@@ -1006,8 +991,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
1006
991
{
1007
992
struct intel_ntb_dev * ndev = filp -> private_data ;
1008
993
1009
- if (pdev_is_xeon (ndev -> ntb .pdev ) ||
1010
- pdev_is_atom (ndev -> ntb .pdev ))
994
+ if (pdev_is_xeon (ndev -> ntb .pdev ))
1011
995
return ndev_ntb_debugfs_read (filp , ubuf , count , offp );
1012
996
else if (pdev_is_skx_xeon (ndev -> ntb .pdev ))
1013
997
return ndev_ntb3_debugfs_read (filp , ubuf , count , offp );
@@ -1439,242 +1423,6 @@ static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
1439
1423
ndev -> peer_reg -> spad );
1440
1424
}
1441
1425
1442
- /* ATOM */
1443
-
1444
- static u64 atom_db_ioread (void __iomem * mmio )
1445
- {
1446
- return ioread64 (mmio );
1447
- }
1448
-
1449
- static void atom_db_iowrite (u64 bits , void __iomem * mmio )
1450
- {
1451
- iowrite64 (bits , mmio );
1452
- }
1453
-
1454
- static int atom_poll_link (struct intel_ntb_dev * ndev )
1455
- {
1456
- u32 ntb_ctl ;
1457
-
1458
- ntb_ctl = ioread32 (ndev -> self_mmio + ATOM_NTBCNTL_OFFSET );
1459
-
1460
- if (ntb_ctl == ndev -> ntb_ctl )
1461
- return 0 ;
1462
-
1463
- ndev -> ntb_ctl = ntb_ctl ;
1464
-
1465
- ndev -> lnk_sta = ioread32 (ndev -> self_mmio + ATOM_LINK_STATUS_OFFSET );
1466
-
1467
- return 1 ;
1468
- }
1469
-
1470
- static int atom_link_is_up (struct intel_ntb_dev * ndev )
1471
- {
1472
- return ATOM_NTB_CTL_ACTIVE (ndev -> ntb_ctl );
1473
- }
1474
-
1475
- static int atom_link_is_err (struct intel_ntb_dev * ndev )
1476
- {
1477
- if (ioread32 (ndev -> self_mmio + ATOM_LTSSMSTATEJMP_OFFSET )
1478
- & ATOM_LTSSMSTATEJMP_FORCEDETECT )
1479
- return 1 ;
1480
-
1481
- if (ioread32 (ndev -> self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET )
1482
- & ATOM_IBIST_ERR_OFLOW )
1483
- return 1 ;
1484
-
1485
- return 0 ;
1486
- }
1487
-
1488
- static inline enum ntb_topo atom_ppd_topo (struct intel_ntb_dev * ndev , u32 ppd )
1489
- {
1490
- struct device * dev = & ndev -> ntb .pdev -> dev ;
1491
-
1492
- switch (ppd & ATOM_PPD_TOPO_MASK ) {
1493
- case ATOM_PPD_TOPO_B2B_USD :
1494
- dev_dbg (dev , "PPD %d B2B USD\n" , ppd );
1495
- return NTB_TOPO_B2B_USD ;
1496
-
1497
- case ATOM_PPD_TOPO_B2B_DSD :
1498
- dev_dbg (dev , "PPD %d B2B DSD\n" , ppd );
1499
- return NTB_TOPO_B2B_DSD ;
1500
-
1501
- case ATOM_PPD_TOPO_PRI_USD :
1502
- case ATOM_PPD_TOPO_PRI_DSD : /* accept bogus PRI_DSD */
1503
- case ATOM_PPD_TOPO_SEC_USD :
1504
- case ATOM_PPD_TOPO_SEC_DSD : /* accept bogus SEC_DSD */
1505
- dev_dbg (dev , "PPD %d non B2B disabled\n" , ppd );
1506
- return NTB_TOPO_NONE ;
1507
- }
1508
-
1509
- dev_dbg (dev , "PPD %d invalid\n" , ppd );
1510
- return NTB_TOPO_NONE ;
1511
- }
1512
-
1513
- static void atom_link_hb (struct work_struct * work )
1514
- {
1515
- struct intel_ntb_dev * ndev = hb_ndev (work );
1516
- struct device * dev = & ndev -> ntb .pdev -> dev ;
1517
- unsigned long poll_ts ;
1518
- void __iomem * mmio ;
1519
- u32 status32 ;
1520
-
1521
- poll_ts = ndev -> last_ts + ATOM_LINK_HB_TIMEOUT ;
1522
-
1523
- /* Delay polling the link status if an interrupt was received,
1524
- * unless the cached link status says the link is down.
1525
- */
1526
- if (time_after (poll_ts , jiffies ) && atom_link_is_up (ndev )) {
1527
- schedule_delayed_work (& ndev -> hb_timer , poll_ts - jiffies );
1528
- return ;
1529
- }
1530
-
1531
- if (atom_poll_link (ndev ))
1532
- ntb_link_event (& ndev -> ntb );
1533
-
1534
- if (atom_link_is_up (ndev ) || !atom_link_is_err (ndev )) {
1535
- schedule_delayed_work (& ndev -> hb_timer , ATOM_LINK_HB_TIMEOUT );
1536
- return ;
1537
- }
1538
-
1539
- /* Link is down with error: recover the link! */
1540
-
1541
- mmio = ndev -> self_mmio ;
1542
-
1543
- /* Driver resets the NTB ModPhy lanes - magic! */
1544
- iowrite8 (0xe0 , mmio + ATOM_MODPHY_PCSREG6 );
1545
- iowrite8 (0x40 , mmio + ATOM_MODPHY_PCSREG4 );
1546
- iowrite8 (0x60 , mmio + ATOM_MODPHY_PCSREG4 );
1547
- iowrite8 (0x60 , mmio + ATOM_MODPHY_PCSREG6 );
1548
-
1549
- /* Driver waits 100ms to allow the NTB ModPhy to settle */
1550
- msleep (100 );
1551
-
1552
- /* Clear AER Errors, write to clear */
1553
- status32 = ioread32 (mmio + ATOM_ERRCORSTS_OFFSET );
1554
- dev_dbg (dev , "ERRCORSTS = %x\n" , status32 );
1555
- status32 &= PCI_ERR_COR_REP_ROLL ;
1556
- iowrite32 (status32 , mmio + ATOM_ERRCORSTS_OFFSET );
1557
-
1558
- /* Clear unexpected electrical idle event in LTSSM, write to clear */
1559
- status32 = ioread32 (mmio + ATOM_LTSSMERRSTS0_OFFSET );
1560
- dev_dbg (dev , "LTSSMERRSTS0 = %x\n" , status32 );
1561
- status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI ;
1562
- iowrite32 (status32 , mmio + ATOM_LTSSMERRSTS0_OFFSET );
1563
-
1564
- /* Clear DeSkew Buffer error, write to clear */
1565
- status32 = ioread32 (mmio + ATOM_DESKEWSTS_OFFSET );
1566
- dev_dbg (dev , "DESKEWSTS = %x\n" , status32 );
1567
- status32 |= ATOM_DESKEWSTS_DBERR ;
1568
- iowrite32 (status32 , mmio + ATOM_DESKEWSTS_OFFSET );
1569
-
1570
- status32 = ioread32 (mmio + ATOM_IBSTERRRCRVSTS0_OFFSET );
1571
- dev_dbg (dev , "IBSTERRRCRVSTS0 = %x\n" , status32 );
1572
- status32 &= ATOM_IBIST_ERR_OFLOW ;
1573
- iowrite32 (status32 , mmio + ATOM_IBSTERRRCRVSTS0_OFFSET );
1574
-
1575
- /* Releases the NTB state machine to allow the link to retrain */
1576
- status32 = ioread32 (mmio + ATOM_LTSSMSTATEJMP_OFFSET );
1577
- dev_dbg (dev , "LTSSMSTATEJMP = %x\n" , status32 );
1578
- status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT ;
1579
- iowrite32 (status32 , mmio + ATOM_LTSSMSTATEJMP_OFFSET );
1580
-
1581
- /* There is a potential race between the 2 NTB devices recovering at the
1582
- * same time. If the times are the same, the link will not recover and
1583
- * the driver will be stuck in this loop forever. Add a random interval
1584
- * to the recovery time to prevent this race.
1585
- */
1586
- schedule_delayed_work (& ndev -> hb_timer , ATOM_LINK_RECOVERY_TIME
1587
- + prandom_u32 () % ATOM_LINK_RECOVERY_TIME );
1588
- }
1589
-
1590
- static int atom_init_isr (struct intel_ntb_dev * ndev )
1591
- {
1592
- int rc ;
1593
-
1594
- rc = ndev_init_isr (ndev , 1 , ATOM_DB_MSIX_VECTOR_COUNT ,
1595
- ATOM_DB_MSIX_VECTOR_SHIFT , ATOM_DB_TOTAL_SHIFT );
1596
- if (rc )
1597
- return rc ;
1598
-
1599
- /* ATOM doesn't have link status interrupt, poll on that platform */
1600
- ndev -> last_ts = jiffies ;
1601
- INIT_DELAYED_WORK (& ndev -> hb_timer , atom_link_hb );
1602
- schedule_delayed_work (& ndev -> hb_timer , ATOM_LINK_HB_TIMEOUT );
1603
-
1604
- return 0 ;
1605
- }
1606
-
1607
- static void atom_deinit_isr (struct intel_ntb_dev * ndev )
1608
- {
1609
- cancel_delayed_work_sync (& ndev -> hb_timer );
1610
- ndev_deinit_isr (ndev );
1611
- }
1612
-
1613
- static int atom_init_ntb (struct intel_ntb_dev * ndev )
1614
- {
1615
- ndev -> mw_count = ATOM_MW_COUNT ;
1616
- ndev -> spad_count = ATOM_SPAD_COUNT ;
1617
- ndev -> db_count = ATOM_DB_COUNT ;
1618
-
1619
- switch (ndev -> ntb .topo ) {
1620
- case NTB_TOPO_B2B_USD :
1621
- case NTB_TOPO_B2B_DSD :
1622
- ndev -> self_reg = & atom_pri_reg ;
1623
- ndev -> peer_reg = & atom_b2b_reg ;
1624
- ndev -> xlat_reg = & atom_sec_xlat ;
1625
-
1626
- /* Enable Bus Master and Memory Space on the secondary side */
1627
- iowrite16 (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER ,
1628
- ndev -> self_mmio + ATOM_SPCICMD_OFFSET );
1629
-
1630
- break ;
1631
-
1632
- default :
1633
- return - EINVAL ;
1634
- }
1635
-
1636
- ndev -> db_valid_mask = BIT_ULL (ndev -> db_count ) - 1 ;
1637
-
1638
- return 0 ;
1639
- }
1640
-
1641
- static int atom_init_dev (struct intel_ntb_dev * ndev )
1642
- {
1643
- u32 ppd ;
1644
- int rc ;
1645
-
1646
- rc = pci_read_config_dword (ndev -> ntb .pdev , ATOM_PPD_OFFSET , & ppd );
1647
- if (rc )
1648
- return - EIO ;
1649
-
1650
- ndev -> ntb .topo = atom_ppd_topo (ndev , ppd );
1651
- if (ndev -> ntb .topo == NTB_TOPO_NONE )
1652
- return - EINVAL ;
1653
-
1654
- rc = atom_init_ntb (ndev );
1655
- if (rc )
1656
- return rc ;
1657
-
1658
- rc = atom_init_isr (ndev );
1659
- if (rc )
1660
- return rc ;
1661
-
1662
- if (ndev -> ntb .topo != NTB_TOPO_SEC ) {
1663
- /* Initiate PCI-E link training */
1664
- rc = pci_write_config_dword (ndev -> ntb .pdev , ATOM_PPD_OFFSET ,
1665
- ppd | ATOM_PPD_INIT_LINK );
1666
- if (rc )
1667
- return rc ;
1668
- }
1669
-
1670
- return 0 ;
1671
- }
1672
-
1673
- static void atom_deinit_dev (struct intel_ntb_dev * ndev )
1674
- {
1675
- atom_deinit_isr (ndev );
1676
- }
1677
-
1678
1426
/* Skylake Xeon NTB */
1679
1427
1680
1428
static int skx_poll_link (struct intel_ntb_dev * ndev )
@@ -2658,24 +2406,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
2658
2406
2659
2407
node = dev_to_node (& pdev -> dev );
2660
2408
2661
- if (pdev_is_atom (pdev )) {
2662
- ndev = kzalloc_node (sizeof (* ndev ), GFP_KERNEL , node );
2663
- if (!ndev ) {
2664
- rc = - ENOMEM ;
2665
- goto err_ndev ;
2666
- }
2667
-
2668
- ndev_init_struct (ndev , pdev );
2669
-
2670
- rc = intel_ntb_init_pci (ndev , pdev );
2671
- if (rc )
2672
- goto err_init_pci ;
2673
-
2674
- rc = atom_init_dev (ndev );
2675
- if (rc )
2676
- goto err_init_dev ;
2677
-
2678
- } else if (pdev_is_xeon (pdev )) {
2409
+ if (pdev_is_xeon (pdev )) {
2679
2410
ndev = kzalloc_node (sizeof (* ndev ), GFP_KERNEL , node );
2680
2411
if (!ndev ) {
2681
2412
rc = - ENOMEM ;
@@ -2731,9 +2462,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
2731
2462
2732
2463
err_register :
2733
2464
ndev_deinit_debugfs (ndev );
2734
- if (pdev_is_atom (pdev ))
2735
- atom_deinit_dev (ndev );
2736
- else if (pdev_is_xeon (pdev ) || pdev_is_skx_xeon (pdev ))
2465
+ if (pdev_is_xeon (pdev ) || pdev_is_skx_xeon (pdev ))
2737
2466
xeon_deinit_dev (ndev );
2738
2467
err_init_dev :
2739
2468
intel_ntb_deinit_pci (ndev );
@@ -2749,41 +2478,12 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
2749
2478
2750
2479
ntb_unregister_device (& ndev -> ntb );
2751
2480
ndev_deinit_debugfs (ndev );
2752
- if (pdev_is_atom (pdev ))
2753
- atom_deinit_dev (ndev );
2754
- else if (pdev_is_xeon (pdev ) || pdev_is_skx_xeon (pdev ))
2481
+ if (pdev_is_xeon (pdev ) || pdev_is_skx_xeon (pdev ))
2755
2482
xeon_deinit_dev (ndev );
2756
2483
intel_ntb_deinit_pci (ndev );
2757
2484
kfree (ndev );
2758
2485
}
2759
2486
2760
- static const struct intel_ntb_reg atom_reg = {
2761
- .poll_link = atom_poll_link ,
2762
- .link_is_up = atom_link_is_up ,
2763
- .db_ioread = atom_db_ioread ,
2764
- .db_iowrite = atom_db_iowrite ,
2765
- .db_size = sizeof (u64 ),
2766
- .ntb_ctl = ATOM_NTBCNTL_OFFSET ,
2767
- .mw_bar = {2 , 4 },
2768
- };
2769
-
2770
- static const struct intel_ntb_alt_reg atom_pri_reg = {
2771
- .db_bell = ATOM_PDOORBELL_OFFSET ,
2772
- .db_mask = ATOM_PDBMSK_OFFSET ,
2773
- .spad = ATOM_SPAD_OFFSET ,
2774
- };
2775
-
2776
- static const struct intel_ntb_alt_reg atom_b2b_reg = {
2777
- .db_bell = ATOM_B2B_DOORBELL_OFFSET ,
2778
- .spad = ATOM_B2B_SPAD_OFFSET ,
2779
- };
2780
-
2781
- static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2782
- /* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
2783
- /* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
2784
- .bar2_xlat = ATOM_SBAR2XLAT_OFFSET ,
2785
- };
2786
-
2787
2487
static const struct intel_ntb_reg xeon_reg = {
2788
2488
.poll_link = xeon_poll_link ,
2789
2489
.link_is_up = xeon_link_is_up ,
@@ -2940,7 +2640,6 @@ static const struct file_operations intel_ntb_debugfs_info = {
2940
2640
};
2941
2641
2942
2642
static const struct pci_device_id intel_ntb_pci_tbl [] = {
2943
- {PCI_VDEVICE (INTEL , PCI_DEVICE_ID_INTEL_NTB_B2B_BWD )},
2944
2643
{PCI_VDEVICE (INTEL , PCI_DEVICE_ID_INTEL_NTB_B2B_JSF )},
2945
2644
{PCI_VDEVICE (INTEL , PCI_DEVICE_ID_INTEL_NTB_B2B_SNB )},
2946
2645
{PCI_VDEVICE (INTEL , PCI_DEVICE_ID_INTEL_NTB_B2B_IVT )},
0 commit comments