forked from open-power/skiboot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathphb4.c
6084 lines (5210 loc) · 179 KB
/
phb4.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
/*
* PHB4: PCI Host Bridge 4, in POWER9
*
* Copyright 2013-2019 IBM Corp.
* Copyright 2018 Raptor Engineering, LLC
*/
/*
*
* FIXME:
* More stuff for EEH support:
* - PBCQ error reporting interrupt
* - I2C-based power management (replacing SHPC)
* - Directly detect fenced PHB through one dedicated HW reg
*/
/*
* This is a simplified view of the PHB4 reset and link training steps
*
* Step 1:
* - Check for hotplug status:
* o PHB_PCIE_HOTPLUG_STATUS bit PHB_PCIE_HPSTAT_PRESENCE
* o If not set -> Bail out (Slot is empty)
*
* Step 2:
* - Do complete PHB reset:
* o PHB/ETU reset procedure
*
* Step 3:
* - Drive PERST active (skip if already asserted. ie. after cold reboot)
* - Wait 250ms (for cards to reset)
* o powervm have used 250ms for a long time without any problems
*
* Step 4:
* - Drive PERST inactive
*
* Step 5:
* - Look for inband presence:
* o From PERST we have two stages to get inband presence detected
* 1) Devices must enter Detect state within 20 ms of the end of
* Fundamental Reset
* 2) Receiver detect pulse are every 12ms
* - Hence minimum wait time 20 + 12 = 32ms
* o Unfortunatey, we've seen cards take 440ms
* o Hence we are conservative and poll here for 1000ms (> 440ms)
* - If no inband presence after 100ms -> Bail out (Slot is broken)
* o PHB_PCIE_DLP_TRAIN_CTL bit PHB_PCIE_DLP_INBAND_PRESENCE
*
* Step 6:
* - Look for link training done:
* o PHB_PCIE_DLP_TRAIN_CTL bit PHB_PCIE_DLP_TL_LINKACT
* - If not set after 2000ms, Retry (3 times) -> Goto Step 2
* o phy lockup could link training failure, hence going back to a
* complete PHB reset on retry
* o not expect to happen very often
*
* Step 7:
* - Wait for 1 sec (before touching device config space):
* - From PCIe spec:
* Root Complex and/or system software must allow at least 1.0 s after
* a Conventional Reset of a device, before it may determine that a
* device which fails to return a Successful Completion status for a
* valid Configuration Request is a broken device.
*
* Step 8:
* - Sanity check for fence and link still up:
* o If fenced or link down, Retry (3 times) -> Goto Step 2
* o This is not nessary but takes no time and can be useful
* o Once we leave here, much harder to recover from errors
*
* Step 9:
* - Check for optimised link for directly attached devices:
* o Wait for CRS (so we can read device config space)
* o Check chip and device are in whitelist. if not, Goto Step 10
* o If trained link speed is degraded, retry -> Goto Step 2
* o If trained link width is degraded, retry -> Goto Step 2
* o If still degraded after 3 retries. Give up, Goto Step 10.
*
* Step 10:
* - PHB good, start probing config space.
* o core/pci.c: pci_reset_phb() -> pci_scan_phb()
*/
#undef NO_ASB
#undef LOG_CFG
#include <skiboot.h>
#include <io.h>
#include <timebase.h>
#include <pci.h>
#include <pci-cfg.h>
#include <pci-slot.h>
#include <vpd.h>
#include <interrupts.h>
#include <opal.h>
#include <cpu.h>
#include <device.h>
#include <ccan/str/str.h>
#include <ccan/array_size/array_size.h>
#include <xscom.h>
#include <affinity.h>
#include <phb4.h>
#include <phb4-regs.h>
#include <phb4-capp.h>
#include <capp.h>
#include <fsp.h>
#include <chip.h>
#include <chiptod.h>
#include <xive.h>
#include <xscom-p9-regs.h>
#include <phys-map.h>
#include <nvram.h>
/* Enable this to disable error interrupts for debug purposes */
#undef DISABLE_ERR_INTS
static void phb4_init_hw(struct phb4 *p);
#define PHBDBG(p, fmt, a...) prlog(PR_DEBUG, "PHB#%04x[%d:%d]: " fmt, \
(p)->phb.opal_id, (p)->chip_id, \
(p)->index, ## a)
#define PHBINF(p, fmt, a...) prlog(PR_INFO, "PHB#%04x[%d:%d]: " fmt, \
(p)->phb.opal_id, (p)->chip_id, \
(p)->index, ## a)
#define PHBNOTICE(p, fmt, a...) prlog(PR_NOTICE, "PHB#%04x[%d:%d]: " fmt, \
(p)->phb.opal_id, (p)->chip_id, \
(p)->index, ## a)
#define PHBERR(p, fmt, a...) prlog(PR_ERR, "PHB#%04x[%d:%d]: " fmt, \
(p)->phb.opal_id, (p)->chip_id, \
(p)->index, ## a)
#ifdef LOG_CFG
#define PHBLOGCFG(p, fmt, a...) PHBDBG(p, fmt, ## a)
#else
#define PHBLOGCFG(p, fmt, a...) do {} while (0)
#endif
#define PHB4_CAN_STORE_EOI(p) XIVE_STORE_EOI_ENABLED
static bool pci_eeh_mmio;
static bool pci_retry_all;
static int rx_err_max = PHB4_RX_ERR_MAX;
/* Note: The "ASB" name is historical, practically this means access via
* the XSCOM backdoor
*/
static inline uint64_t phb4_read_reg_asb(struct phb4 *p, uint32_t offset)
{
#ifdef NO_ASB
return in_be64(p->regs + offset);
#else
int64_t rc;
uint64_t addr, val;
/* Address register: must use 4 bytes for built-in config space.
*
* This path isn't usable for outbound configuration space
*/
if (((offset & 0xfffffffc) == PHB_CONFIG_DATA) && (offset & 3)) {
PHBERR(p, "XSCOM unaligned access to CONFIG_DATA unsupported\n");
return -1ull;
}
addr = XETU_HV_IND_ADDR_VALID | offset;
if ((offset >= 0x1000 && offset < 0x1800) || (offset == PHB_CONFIG_DATA))
addr |= XETU_HV_IND_ADDR_4B;
rc = xscom_write(p->chip_id, p->etu_xscom + XETU_HV_IND_ADDRESS, addr);
if (rc != 0) {
PHBERR(p, "XSCOM error addressing register 0x%x\n", offset);
return -1ull;
}
rc = xscom_read(p->chip_id, p->etu_xscom + XETU_HV_IND_DATA, &val);
if (rc != 0) {
PHBERR(p, "XSCOM error reading register 0x%x\n", offset);
return -1ull;
}
return val;
#endif
}
static inline void phb4_write_reg_asb(struct phb4 *p,
uint32_t offset, uint64_t val)
{
#ifdef NO_ASB
out_be64(p->regs + offset, val);
#else
int64_t rc;
uint64_t addr;
/* Address register: must use 4 bytes for built-in config space.
*
* This path isn't usable for outbound configuration space
*/
if (((offset & 0xfffffffc) == PHB_CONFIG_DATA) && (offset & 3)) {
PHBERR(p, "XSCOM access to CONFIG_DATA unsupported\n");
return;
}
addr = XETU_HV_IND_ADDR_VALID | offset;
if ((offset >= 0x1000 && offset < 0x1800) || (offset == PHB_CONFIG_DATA))
addr |= XETU_HV_IND_ADDR_4B;
rc = xscom_write(p->chip_id, p->etu_xscom + XETU_HV_IND_ADDRESS, addr);
if (rc != 0) {
PHBERR(p, "XSCOM error addressing register 0x%x\n", offset);
return;
}
rc = xscom_write(p->chip_id, p->etu_xscom + XETU_HV_IND_DATA, val);
if (rc != 0) {
PHBERR(p, "XSCOM error writing register 0x%x\n", offset);
return;
}
#endif
}
static uint64_t phb4_read_reg(struct phb4 *p, uint32_t offset)
{
/* No register accesses are permitted while in reset */
if (p->flags & PHB4_ETU_IN_RESET)
return -1ull;
if (p->flags & PHB4_CFG_USE_ASB)
return phb4_read_reg_asb(p, offset);
else
return in_be64(p->regs + offset);
}
static void phb4_write_reg(struct phb4 *p, uint32_t offset, uint64_t val)
{
/* No register accesses are permitted while in reset */
if (p->flags & PHB4_ETU_IN_RESET)
return;
if (p->flags & PHB4_CFG_USE_ASB)
phb4_write_reg_asb(p, offset, val);
else
return out_be64(p->regs + offset, val);
}
/* Helper to select an IODA table entry */
static inline void phb4_ioda_sel(struct phb4 *p, uint32_t table,
uint32_t addr, bool autoinc)
{
phb4_write_reg(p, PHB_IODA_ADDR,
(autoinc ? PHB_IODA_AD_AUTOINC : 0) |
SETFIELD(PHB_IODA_AD_TSEL, 0ul, table) |
SETFIELD(PHB_IODA_AD_TADR, 0ul, addr));
}
/*
* Configuration space access
*
* The PHB lock is assumed to be already held
*/
static int64_t phb4_pcicfg_check(struct phb4 *p, uint32_t bdfn,
uint32_t offset, uint32_t size,
uint16_t *pe)
{
uint32_t sm = size - 1;
if (offset > 0xfff || bdfn > 0xffff)
return OPAL_PARAMETER;
if (offset & sm)
return OPAL_PARAMETER;
/* The root bus only has a device at 0 and we get into an
* error state if we try to probe beyond that, so let's
* avoid that and just return an error to Linux
*/
if (PCI_BUS_NUM(bdfn) == 0 && (bdfn & 0xff))
return OPAL_HARDWARE;
/* Check PHB state */
if (p->broken)
return OPAL_HARDWARE;
/* Fetch the PE# from cache */
*pe = be16_to_cpu(p->tbl_rtt[bdfn]);
return OPAL_SUCCESS;
}
static int64_t phb4_rc_read(struct phb4 *p, uint32_t offset, uint8_t sz,
void *data, bool use_asb)
{
uint32_t reg = offset & ~3;
uint32_t oval;
/* Some registers are handled locally */
switch (reg) {
/* Bridge base/limit registers are cached here as HW
* doesn't implement them (it hard codes values that
* will confuse a proper PCI implementation).
*/
case PCI_CFG_MEM_BASE: /* Includes PCI_CFG_MEM_LIMIT */
oval = p->rc_cache[(reg - 0x20) >> 2] & 0xfff0fff0;
break;
case PCI_CFG_PREF_MEM_BASE: /* Includes PCI_CFG_PREF_MEM_LIMIT */
oval = p->rc_cache[(reg - 0x20) >> 2] & 0xfff0fff0;
oval |= 0x00010001;
break;
case PCI_CFG_IO_BASE_U16: /* Includes PCI_CFG_IO_LIMIT_U16 */
oval = 0;
break;
case PCI_CFG_PREF_MEM_BASE_U32:
case PCI_CFG_PREF_MEM_LIMIT_U32:
oval = p->rc_cache[(reg - 0x20) >> 2];
break;
default:
oval = 0xffffffff; /* default if offset too big */
if (reg < PHB_RC_CONFIG_SIZE) {
if (use_asb)
oval = bswap_32(phb4_read_reg_asb(p, PHB_RC_CONFIG_BASE
+ reg));
else
oval = in_le32(p->regs + PHB_RC_CONFIG_BASE + reg);
}
}
/* Apply any post-read fixups */
switch (reg) {
case PCI_CFG_IO_BASE:
oval |= 0x01f1; /* Set IO base < limit to disable the window */
break;
}
switch (sz) {
case 1:
offset &= 3;
*((uint8_t *)data) = (oval >> (offset << 3)) & 0xff;
PHBLOGCFG(p, "000 CFG08 Rd %02x=%02x\n",
offset, *((uint8_t *)data));
break;
case 2:
offset &= 2;
*((uint16_t *)data) = (oval >> (offset << 3)) & 0xffff;
PHBLOGCFG(p, "000 CFG16 Rd %02x=%04x\n",
offset, *((uint16_t *)data));
break;
case 4:
*((uint32_t *)data) = oval;
PHBLOGCFG(p, "000 CFG32 Rd %02x=%08x\n",
offset, *((uint32_t *)data));
break;
default:
assert(false);
}
return OPAL_SUCCESS;
}
static int64_t phb4_rc_write(struct phb4 *p, uint32_t offset, uint8_t sz,
uint32_t val, bool use_asb)
{
uint32_t reg = offset & ~3;
uint32_t old, mask, shift, oldold;
int64_t rc;
if (reg > PHB_RC_CONFIG_SIZE)
return OPAL_SUCCESS;
/* If size isn't 4-bytes, do a RMW cycle */
if (sz < 4) {
rc = phb4_rc_read(p, reg, 4, &old, use_asb);
if (rc != OPAL_SUCCESS)
return rc;
/*
* Since we have to Read-Modify-Write here, we need to filter
* out registers that have write-1-to-clear bits to prevent
* clearing stuff we shouldn't be. So for any register this
* applies to, mask out those bits.
*/
oldold = old;
switch(reg) {
case 0x1C: /* Secondary status */
old &= 0x00ffffff; /* mask out 24-31 */
break;
case 0x50: /* EC - Device status */
old &= 0xfff0ffff; /* mask out 16-19 */
break;
case 0x58: /* EC - Link status */
old &= 0x3fffffff; /* mask out 30-31 */
break;
case 0x78: /* EC - Link status 2 */
old &= 0xf000ffff; /* mask out 16-27 */
break;
/* These registers *only* have write-1-to-clear bits */
case 0x104: /* AER - Uncorr. error status */
case 0x110: /* AER - Corr. error status */
case 0x130: /* AER - Root error status */
case 0x180: /* P16 - status */
case 0x184: /* P16 - LDPM status */
case 0x188: /* P16 - FRDPM status */
case 0x18C: /* P16 - SRDPM status */
old &= 0x00000000;
break;
}
if (old != oldold) {
PHBLOGCFG(p, "Rewrote %x to %x for reg %x for W1C\n",
oldold, old, reg);
}
if (sz == 1) {
shift = (offset & 3) << 3;
mask = 0xff << shift;
val = (old & ~mask) | ((val & 0xff) << shift);
} else {
shift = (offset & 2) << 3;
mask = 0xffff << shift;
val = (old & ~mask) | ((val & 0xffff) << shift);
}
}
/* Some registers are handled locally */
switch (reg) {
/* See comment in phb4_rc_read() */
case PCI_CFG_MEM_BASE: /* Includes PCI_CFG_MEM_LIMIT */
case PCI_CFG_PREF_MEM_BASE: /* Includes PCI_CFG_PREF_MEM_LIMIT */
case PCI_CFG_PREF_MEM_BASE_U32:
case PCI_CFG_PREF_MEM_LIMIT_U32:
p->rc_cache[(reg - 0x20) >> 2] = val;
break;
case PCI_CFG_IO_BASE_U16: /* Includes PCI_CFG_IO_LIMIT_U16 */
break;
default:
/* Workaround PHB config space enable */
PHBLOGCFG(p, "000 CFG%02d Wr %02x=%08x\n", 8 * sz, reg, val);
if (use_asb)
phb4_write_reg_asb(p, PHB_RC_CONFIG_BASE + reg, val);
else
out_le32(p->regs + PHB_RC_CONFIG_BASE + reg, val);
}
return OPAL_SUCCESS;
}
static int64_t phb4_pcicfg_read(struct phb4 *p, uint32_t bdfn,
uint32_t offset, uint32_t size,
void *data)
{
uint64_t addr, val64;
int64_t rc;
uint16_t pe;
bool use_asb = false;
rc = phb4_pcicfg_check(p, bdfn, offset, size, &pe);
if (rc)
return rc;
if (p->flags & PHB4_AIB_FENCED) {
if (!(p->flags & PHB4_CFG_USE_ASB))
return OPAL_HARDWARE;
if (bdfn != 0)
return OPAL_HARDWARE;
use_asb = true;
} else if ((p->flags & PHB4_CFG_BLOCKED) && bdfn != 0) {
return OPAL_HARDWARE;
}
/* Handle per-device filters */
rc = pci_handle_cfg_filters(&p->phb, bdfn, offset, size,
(uint32_t *)data, false);
if (rc != OPAL_PARTIAL)
return rc;
/* Handle root complex MMIO based config space */
if (bdfn == 0)
return phb4_rc_read(p, offset, size, data, use_asb);
addr = PHB_CA_ENABLE;
addr = SETFIELD(PHB_CA_BDFN, addr, bdfn);
addr = SETFIELD(PHB_CA_REG, addr, offset & ~3u);
addr = SETFIELD(PHB_CA_PE, addr, pe);
if (use_asb) {
phb4_write_reg_asb(p, PHB_CONFIG_ADDRESS, addr);
sync();
val64 = bswap_64(phb4_read_reg_asb(p, PHB_CONFIG_DATA));
switch(size) {
case 1:
*((uint8_t *)data) = val64 >> (8 * (offset & 3));
break;
case 2:
*((uint16_t *)data) = val64 >> (8 * (offset & 2));
break;
case 4:
*((uint32_t *)data) = val64;
break;
default:
return OPAL_PARAMETER;
}
} else {
out_be64(p->regs + PHB_CONFIG_ADDRESS, addr);
switch(size) {
case 1:
*((uint8_t *)data) =
in_8(p->regs + PHB_CONFIG_DATA + (offset & 3));
PHBLOGCFG(p, "%03x CFG08 Rd %02x=%02x\n",
bdfn, offset, *((uint8_t *)data));
break;
case 2:
*((uint16_t *)data) =
in_le16(p->regs + PHB_CONFIG_DATA + (offset & 2));
PHBLOGCFG(p, "%03x CFG16 Rd %02x=%04x\n",
bdfn, offset, *((uint16_t *)data));
break;
case 4:
*((uint32_t *)data) = in_le32(p->regs + PHB_CONFIG_DATA);
PHBLOGCFG(p, "%03x CFG32 Rd %02x=%08x\n",
bdfn, offset, *((uint32_t *)data));
break;
default:
return OPAL_PARAMETER;
}
}
return OPAL_SUCCESS;
}
#define PHB4_PCI_CFG_READ(size, type) \
static int64_t phb4_pcicfg_read##size(struct phb *phb, uint32_t bdfn, \
uint32_t offset, type *data) \
{ \
struct phb4 *p = phb_to_phb4(phb); \
\
/* Initialize data in case of error */ \
*data = (type)0xffffffff; \
return phb4_pcicfg_read(p, bdfn, offset, sizeof(type), data); \
}
static int64_t phb4_pcicfg_write(struct phb4 *p, uint32_t bdfn,
uint32_t offset, uint32_t size,
uint32_t data)
{
uint64_t addr;
int64_t rc;
uint16_t pe;
bool use_asb = false;
rc = phb4_pcicfg_check(p, bdfn, offset, size, &pe);
if (rc)
return rc;
if (p->flags & PHB4_AIB_FENCED) {
if (!(p->flags & PHB4_CFG_USE_ASB))
return OPAL_HARDWARE;
if (bdfn != 0)
return OPAL_HARDWARE;
use_asb = true;
} else if ((p->flags & PHB4_CFG_BLOCKED) && bdfn != 0) {
return OPAL_HARDWARE;
}
/* Handle per-device filters */
rc = pci_handle_cfg_filters(&p->phb, bdfn, offset, size,
(uint32_t *)&data, true);
if (rc != OPAL_PARTIAL)
return rc;
/* Handle root complex MMIO based config space */
if (bdfn == 0)
return phb4_rc_write(p, offset, size, data, use_asb);
addr = PHB_CA_ENABLE;
addr = SETFIELD(PHB_CA_BDFN, addr, bdfn);
addr = SETFIELD(PHB_CA_REG, addr, offset & ~3u);
addr = SETFIELD(PHB_CA_PE, addr, pe);
if (use_asb) {
/* We don't support ASB config space writes */
return OPAL_UNSUPPORTED;
} else {
out_be64(p->regs + PHB_CONFIG_ADDRESS, addr);
switch(size) {
case 1:
out_8(p->regs + PHB_CONFIG_DATA + (offset & 3), data);
break;
case 2:
out_le16(p->regs + PHB_CONFIG_DATA + (offset & 2), data);
break;
case 4:
out_le32(p->regs + PHB_CONFIG_DATA, data);
break;
default:
return OPAL_PARAMETER;
}
}
PHBLOGCFG(p, "%03x CFG%d Wr %02x=%08x\n", bdfn, 8 * size, offset, data);
return OPAL_SUCCESS;
}
#define PHB4_PCI_CFG_WRITE(size, type) \
static int64_t phb4_pcicfg_write##size(struct phb *phb, uint32_t bdfn, \
uint32_t offset, type data) \
{ \
struct phb4 *p = phb_to_phb4(phb); \
\
return phb4_pcicfg_write(p, bdfn, offset, sizeof(type), data); \
}
PHB4_PCI_CFG_READ(8, u8)
PHB4_PCI_CFG_READ(16, u16)
PHB4_PCI_CFG_READ(32, u32)
PHB4_PCI_CFG_WRITE(8, u8)
PHB4_PCI_CFG_WRITE(16, u16)
PHB4_PCI_CFG_WRITE(32, u32)
static int64_t phb4_get_reserved_pe_number(struct phb *phb)
{
struct phb4 *p = phb_to_phb4(phb);
return PHB4_RESERVED_PE_NUM(p);
}
static void phb4_root_port_init(struct phb *phb, struct pci_device *dev,
int ecap, int aercap)
{
struct phb4 *p = phb_to_phb4(phb);
struct pci_slot *slot = dev->slot;
uint16_t bdfn = dev->bdfn;
uint16_t val16;
uint32_t val32;
/*
* Use the PHB's callback so that UTL events will be masked or
* unmasked when the link is down or up.
*/
if (dev->slot && dev->slot->ops.prepare_link_change &&
phb->slot && phb->slot->ops.prepare_link_change)
dev->slot->ops.prepare_link_change =
phb->slot->ops.prepare_link_change;
// FIXME: check recommended init values for phb4
/*
* Enable the bridge slot capability in the root port's config
* space. This should probably be done *before* we start
* scanning config space, but we need a pci_device struct to
* exist before we do a slot lookup so *faaaaaaaaaaaaaart*
*/
if (slot && slot->pluggable && slot->power_limit) {
uint64_t val;
val = in_be64(p->regs + PHB_PCIE_SCR);
val |= PHB_PCIE_SCR_SLOT_CAP;
out_be64(p->regs + PHB_PCIE_SCR, val);
/* update the cached slotcap */
pci_cfg_read32(phb, bdfn, ecap + PCICAP_EXP_SLOTCAP,
&slot->slot_cap);
}
/* Enable SERR and parity checking */
pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
val16 |= (PCI_CFG_CMD_SERR_EN | PCI_CFG_CMD_PERR_RESP |
PCI_CFG_CMD_MEM_EN);
pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
/* Enable reporting various errors */
if (!ecap) return;
pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
PCICAP_EXP_DEVCTL_NFE_REPORT |
PCICAP_EXP_DEVCTL_FE_REPORT |
PCICAP_EXP_DEVCTL_UR_REPORT);
pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
if (!aercap) return;
/* Mask various unrecoverable errors */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, &val32);
val32 |= (PCIECAP_AER_UE_MASK_POISON_TLP |
PCIECAP_AER_UE_MASK_COMPL_TIMEOUT |
PCIECAP_AER_UE_MASK_COMPL_ABORT |
PCIECAP_AER_UE_MASK_ECRC);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, val32);
/* Report various unrecoverable errors as fatal errors */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, &val32);
val32 |= (PCIECAP_AER_UE_SEVERITY_DLLP |
PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
PCIECAP_AER_UE_SEVERITY_UNEXP_COMPL |
PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
/* Mask various recoverable errors */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, &val32);
val32 |= PCIECAP_AER_CE_MASK_ADV_NONFATAL;
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
/* Enable ECRC check */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
PCIECAP_AER_CAPCTL_ECRCC_EN);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
/* Enable all error reporting */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, &val32);
val32 |= (PCIECAP_AER_RERR_CMD_FE |
PCIECAP_AER_RERR_CMD_NFE |
PCIECAP_AER_RERR_CMD_CE);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, val32);
}
static void phb4_switch_port_init(struct phb *phb,
struct pci_device *dev,
int ecap, int aercap)
{
uint16_t bdfn = dev->bdfn;
uint16_t val16;
uint32_t val32;
// FIXME: update AER settings for phb4
/* Enable SERR and parity checking and disable INTx */
pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
val16 |= (PCI_CFG_CMD_PERR_RESP |
PCI_CFG_CMD_SERR_EN |
PCI_CFG_CMD_INTx_DIS);
pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
/* Disable partity error and enable system error */
pci_cfg_read16(phb, bdfn, PCI_CFG_BRCTL, &val16);
val16 &= ~PCI_CFG_BRCTL_PERR_RESP_EN;
val16 |= PCI_CFG_BRCTL_SERR_EN;
pci_cfg_write16(phb, bdfn, PCI_CFG_BRCTL, val16);
/* Enable reporting various errors */
if (!ecap) return;
pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
PCICAP_EXP_DEVCTL_NFE_REPORT |
PCICAP_EXP_DEVCTL_FE_REPORT);
/* HW279570 - Disable reporting of correctable errors */
val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
/* Unmask all unrecoverable errors */
if (!aercap) return;
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, 0x0);
/* Severity of unrecoverable errors */
if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT)
val32 = (PCIECAP_AER_UE_SEVERITY_DLLP |
PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP |
PCIECAP_AER_UE_SEVERITY_INTERNAL);
else
val32 = (PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
PCIECAP_AER_UE_SEVERITY_INTERNAL);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
/*
* Mask various correctable errors
*/
val32 = PCIECAP_AER_CE_MASK_ADV_NONFATAL;
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
/* Enable ECRC generation and disable ECRC check */
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
val32 |= PCIECAP_AER_CAPCTL_ECRCG_EN;
val32 &= ~PCIECAP_AER_CAPCTL_ECRCC_EN;
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
}
static void phb4_endpoint_init(struct phb *phb,
struct pci_device *dev,
int ecap, int aercap)
{
uint16_t bdfn = dev->bdfn;
uint16_t val16;
uint32_t val32;
/* Enable SERR and parity checking */
pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
val16 |= (PCI_CFG_CMD_PERR_RESP |
PCI_CFG_CMD_SERR_EN);
pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
/* Enable reporting various errors */
if (!ecap) return;
pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
val16 |= (PCICAP_EXP_DEVCTL_NFE_REPORT |
PCICAP_EXP_DEVCTL_FE_REPORT |
PCICAP_EXP_DEVCTL_UR_REPORT);
pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
/* Enable ECRC generation and check */
if (!aercap)
return;
pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
PCIECAP_AER_CAPCTL_ECRCC_EN);
pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
}
static int64_t phb4_pcicfg_no_dstate(void *dev __unused,
struct pci_cfg_reg_filter *pcrf,
uint32_t offset, uint32_t len __unused,
uint32_t *data __unused, bool write)
{
uint32_t loff = offset - pcrf->start;
/* Disable D-state change on children of the PHB. For now we
* simply block all writes to the PM control/status
*/
if (write && loff >= 4 && loff < 6)
return OPAL_SUCCESS;
return OPAL_PARTIAL;
}
void phb4_pec2_dma_engine_realloc(struct phb4 *p)
{
uint64_t reg;
/*
* Allocate 16 extra dma read engines to stack 0, to boost dma
* performance for devices on stack 0 of PEC2, i.e PHB3.
* It comes at a price of reduced read engine allocation for
* devices on stack 1 and 2. The engine allocation becomes
* 48/8/8 instead of the default 32/16/16.
*
* The reallocation magic value should be 0xffff0000ff008000,
* but per the PCI designers, dma engine 32 (bit 0) has a
* quirk, and 0x7fff80007F008000 has the same effect (engine
* 32 goes to PHB4).
*/
if (p->index != 3) /* shared slot on PEC2 */
return;
PHBINF(p, "Allocating an extra 16 dma read engines on PEC2 stack0\n");
reg = 0x7fff80007F008000ULL;
xscom_write(p->chip_id,
p->pci_xscom + XPEC_PCI_PRDSTKOVR, reg);
xscom_write(p->chip_id,
p->pe_xscom + XPEC_NEST_READ_STACK_OVERRIDE, reg);
}
static void phb4_check_device_quirks(struct pci_device *dev)
{
/* Some special adapter tweaks for devices directly under the PHB */
if (dev->primary_bus != 1)
return;
/* PM quirk */
if (!pci_has_cap(dev, PCI_CFG_CAP_ID_PM, false))
return;
pci_add_cfg_reg_filter(dev,
pci_cap(dev, PCI_CFG_CAP_ID_PM, false), 8,
PCI_REG_FLAG_WRITE,
phb4_pcicfg_no_dstate);
}
static int phb4_device_init(struct phb *phb, struct pci_device *dev,
void *data __unused)
{
int ecap, aercap;
/* Setup special device quirks */
phb4_check_device_quirks(dev);
/* Common initialization for the device */
pci_device_init(phb, dev);
ecap = pci_cap(dev, PCI_CFG_CAP_ID_EXP, false);
aercap = pci_cap(dev, PCIECAP_ID_AER, true);
if (dev->dev_type == PCIE_TYPE_ROOT_PORT)
phb4_root_port_init(phb, dev, ecap, aercap);
else if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT ||
dev->dev_type == PCIE_TYPE_SWITCH_DNPORT)
phb4_switch_port_init(phb, dev, ecap, aercap);
else
phb4_endpoint_init(phb, dev, ecap, aercap);
return 0;
}
static int64_t phb4_pci_reinit(struct phb *phb, uint64_t scope, uint64_t data)
{
struct pci_device *pd;
uint16_t bdfn = data;
int ret;
if (scope != OPAL_REINIT_PCI_DEV)
return OPAL_PARAMETER;
pd = pci_find_dev(phb, bdfn);
if (!pd)
return OPAL_PARAMETER;
ret = phb4_device_init(phb, pd, NULL);
if (ret)
return OPAL_HARDWARE;
return OPAL_SUCCESS;
}
/* Default value for MBT0, see comments in init_ioda_cache() */
static uint64_t phb4_default_mbt0(struct phb4 *p, unsigned int bar_idx)
{
uint64_t mbt0;
switch (p->mbt_size - bar_idx - 1) {
case 0:
mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 3);
break;
case 1:
mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 2);
break;
case 2:
mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
mbt0 = SETFIELD(IODA3_MBT0_MDT_COLUMN, mbt0, 1);
break;
default:
mbt0 = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_PE_SEG);
}
return mbt0;
}
/*
* Clear the saved (cached) IODA state.
*
* The caches here are used to save the configuration of the IODA tables
* done by the OS. When the PHB is reset it loses all of its internal state
* so we need to keep a copy to restore from. This function re-initialises
* the saved state to sane defaults.
*/
static void phb4_init_ioda_cache(struct phb4 *p)
{
uint32_t i;
/*
* The RTT entries (RTE) are supposed to be initialised to
* 0xFF which indicates an invalid PE# for that RTT index
* (the bdfn). However, we set them to 0x00 since Linux
* needs to find the devices first by scanning config space
* and this occurs before PEs have been assigned.
*/
for (i = 0; i < RTT_TABLE_ENTRIES; i++)
p->tbl_rtt[i] = cpu_to_be16(PHB4_RESERVED_PE_NUM(p));
memset(p->tbl_peltv, 0x0, p->tbl_peltv_size);
memset(p->tve_cache, 0x0, sizeof(p->tve_cache));
/* XXX Should we mask them ? */
memset(p->mist_cache, 0x0, sizeof(p->mist_cache));
/* Configure MBT entries 1...N */
/* Column 0 is left 0 and will be used fo M32 and configured
* by the OS. We use MDT column 1..3 for the last 3 BARs, thus
* allowing Linux to remap those, and setup all the other ones
* for now in mode 00 (segment# == PE#). By default those
* columns are set to map the same way.
*/
for (i = 0; i < p->max_num_pes; i++) {
p->mdt_cache[i] = SETFIELD(IODA3_MDT_PE_B, 0ull, i);
p->mdt_cache[i] |= SETFIELD(IODA3_MDT_PE_C, 0ull, i);
p->mdt_cache[i] |= SETFIELD(IODA3_MDT_PE_D, 0ull, i);
}
/* Initialize MBT entries for BARs 1...N */
for (i = 1; i < p->mbt_size; i++) {
p->mbt_cache[i][0] = phb4_default_mbt0(p, i);
p->mbt_cache[i][1] = 0;
}
/* Initialize M32 bar using MBT entry 0, MDT colunm A */
p->mbt_cache[0][0] = SETFIELD(IODA3_MBT0_MODE, 0ull, IODA3_MBT0_MODE_MDT);
p->mbt_cache[0][0] |= SETFIELD(IODA3_MBT0_MDT_COLUMN, 0ull, 0);
p->mbt_cache[0][0] |= IODA3_MBT0_TYPE_M32 | (p->mm1_base & IODA3_MBT0_BASE_ADDR);
p->mbt_cache[0][1] = IODA3_MBT1_ENABLE | ((~(M32_PCI_SIZE - 1)) & IODA3_MBT1_MASK);
}
static int64_t phb4_wait_bit(struct phb4 *p, uint32_t reg,
uint64_t mask, uint64_t want_val)
{
uint64_t val;
/* Wait for all pending TCE kills to complete
*
* XXX Add timeout...
*/
/* XXX SIMICS is nasty... */
if ((reg == PHB_TCE_KILL || reg == PHB_DMARD_SYNC) &&
chip_quirk(QUIRK_SIMICS))
return OPAL_SUCCESS;
for (;;) {
val = in_be64(p->regs + reg);
if (val == 0xffffffffffffffffull) {
/* XXX Fenced ? */
return OPAL_HARDWARE;
}