|
21 | 21 |
|
22 | 22 | #include "cxl.h"
|
23 | 23 |
|
24 |
| -static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, |
25 |
| - bool sec_hash, |
26 |
| - struct cxl_sste *secondary_group, |
27 |
| - unsigned int *lru) |
| 24 | +static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) |
28 | 25 | {
|
29 |
| - unsigned int i, entry; |
30 |
| - struct cxl_sste *sste, *group = primary_group; |
31 |
| - |
32 |
| - for (i = 0; i < 2; i++) { |
33 |
| - for (entry = 0; entry < 8; entry++) { |
34 |
| - sste = group + entry; |
35 |
| - if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) |
36 |
| - return sste; |
37 |
| - } |
38 |
| - if (!sec_hash) |
39 |
| - break; |
40 |
| - group = secondary_group; |
| 26 | + return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && |
| 27 | + (sste->esid_data == cpu_to_be64(slb->esid))); |
| 28 | +} |
| 29 | + |
| 30 | +/* |
| 31 | + * This finds a free SSTE for the given SLB, or returns NULL if it's already in |
| 32 | + * the segment table. |
| 33 | + */ |
| 34 | +static struct cxl_sste* find_free_sste(struct cxl_context *ctx, |
| 35 | + struct copro_slb *slb) |
| 36 | +{ |
| 37 | + struct cxl_sste *primary, *sste, *ret = NULL; |
| 38 | + unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ |
| 39 | + unsigned int entry; |
| 40 | + unsigned int hash; |
| 41 | + |
| 42 | + if (slb->vsid & SLB_VSID_B_1T) |
| 43 | + hash = (slb->esid >> SID_SHIFT_1T) & mask; |
| 44 | + else /* 256M */ |
| 45 | + hash = (slb->esid >> SID_SHIFT) & mask; |
| 46 | + |
| 47 | + primary = ctx->sstp + (hash << 3); |
| 48 | + |
| 49 | + for (entry = 0, sste = primary; entry < 8; entry++, sste++) { |
| 50 | + if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) |
| 51 | + ret = sste; |
| 52 | + if (sste_matches(sste, slb)) |
| 53 | + return NULL; |
41 | 54 | }
|
| 55 | + if (ret) |
| 56 | + return ret; |
| 57 | + |
42 | 58 | /* Nothing free, select an entry to cast out */
|
43 |
| - if (sec_hash && (*lru & 0x8)) |
44 |
| - sste = secondary_group + (*lru & 0x7); |
45 |
| - else |
46 |
| - sste = primary_group + (*lru & 0x7); |
47 |
| - *lru = (*lru + 1) & 0xf; |
| 59 | + ret = primary + ctx->sst_lru; |
| 60 | + ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; |
48 | 61 |
|
49 |
| - return sste; |
| 62 | + return ret; |
50 | 63 | }
|
51 | 64 |
|
52 | 65 | static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
|
53 | 66 | {
|
54 | 67 | /* mask is the group index, we search primary and secondary here. */
|
55 |
| - unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */ |
56 |
| - bool sec_hash = 1; |
57 | 68 | struct cxl_sste *sste;
|
58 |
| - unsigned int hash; |
59 | 69 | unsigned long flags;
|
60 | 70 |
|
61 |
| - |
62 |
| - sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC); |
63 |
| - |
64 |
| - if (slb->vsid & SLB_VSID_B_1T) |
65 |
| - hash = (slb->esid >> SID_SHIFT_1T) & mask; |
66 |
| - else /* 256M */ |
67 |
| - hash = (slb->esid >> SID_SHIFT) & mask; |
68 |
| - |
69 | 71 | spin_lock_irqsave(&ctx->sste_lock, flags);
|
70 |
| - sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash, |
71 |
| - ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru); |
| 72 | + sste = find_free_sste(ctx, slb); |
| 73 | + if (!sste) |
| 74 | + goto out_unlock; |
72 | 75 |
|
73 | 76 | pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
|
74 | 77 | sste - ctx->sstp, slb->vsid, slb->esid);
|
75 | 78 |
|
76 | 79 | sste->vsid_data = cpu_to_be64(slb->vsid);
|
77 | 80 | sste->esid_data = cpu_to_be64(slb->esid);
|
| 81 | +out_unlock: |
78 | 82 | spin_unlock_irqrestore(&ctx->sste_lock, flags);
|
79 | 83 | }
|
80 | 84 |
|
|
0 commit comments