diff --git a/coresight-tools/cskern/pagemap.py b/coresight-tools/cskern/pagemap.py index 5bda3cd..856e09d 100755 --- a/coresight-tools/cskern/pagemap.py +++ b/coresight-tools/cskern/pagemap.py @@ -6,6 +6,9 @@ Since 4.2 this requires CAP_SYS_ADMIN. Users without this capability may see the PTE as zeroes. +Note that if we're in a VM, we might only be seeing intermediate addresses. +Memory might not be physically backed at all, or PAs may change at any time. + Copyright (C) ARM Ltd. 2019. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +24,8 @@ limitations under the License. """ +from __future__ import print_function + """ Note that the "PA" is the physical address as seen by the OS. If running under virtualization it might be an IPA. @@ -33,8 +38,6 @@ """ -from __future__ import print_function - import os, sys, struct @@ -67,10 +70,16 @@ def is_swapped(self): def is_file_mapped(self): return self.bit(61) + def pa(self): + if self.is_present(): + return self.pfn * self.page_size + else: + return None + def __str__(self): s = "%03x " % (self.raw >> 52) if self.is_present(): - s += "%16x" % (self.pfn * self.page_size) + s += "PA:%16x" % self.pa() else: s += "-" if self.bit(61): @@ -82,9 +91,47 @@ def __str__(self): return s +class PageMapping: + """ + Mapping of one VA range to a PA range (if mapped). + """ + def __init__(self, va=None): + self.n_pages = 1 + self.va = va + self.pte = None + self.size = None + + def is_mapped(self): + return self.pte.is_present() + + def pa(self): + if self.is_mapped(): + return self.pte.pa() + else: + return None + + def end_pa(self): + pa = self.pa() + if pa is not None: + return pa + self.size + else: + return None + + def __str__(self): + s = "VA:0x%x -> " % (self.va) + if self.is_mapped(): + s += "PA:0x%x" % self.pa() + else: + s += "" + if self.n_pages > 1: + s += " (%u)" % self.n_pages + return s + + class PAMap: """ Get the complete VA-to-PA mapping from /proc/self/pagemap. + This allows VAs to be looked up to a PTE, and to a PA. We use OS file operations to avoid Python's buffering. """ @@ -96,6 +143,9 @@ def __init__(self, pid="self"): self.fn = "/proc/" + str(pid) + "/pagemap" self.fd = os.open(self.fn, os.O_RDONLY) + def round_down(self, addr): + return addr - (addr % self.page_size) + def entry(self, va): """ Get the kernel PTE for a virtual address. @@ -114,8 +164,21 @@ def entry(self, va): assert len(ebs) == PTE.entry_size return PTE(ebs) + def mapping(self, va): + """ + Get a PageMapping object for a given virtual address + """ + va = self.round_down(va) + m = PageMapping(va=va) + m.size = self.page_size + m.pte = self.entry(va) + return m + def pa(self, va): - e = self.entry(va, size=self.page_size) + """ + Translate a VA to a PA. + """ + e = self.entry(va) if e.is_present(): # if the PFN has been zeroed, we didn't have the right permissions assert e.pfn != 0, "PFN reads as zero: you don't have permissions for this operation" @@ -123,32 +186,73 @@ def pa(self, va): else: return None + def pa_range(self, va, size): + """ + Given a range of VAs, find all the physical pages spanning the range. + Return a list of PageMapping objects. + Currently we do this simplistically. + """ + size += (va % self.page_size) + if (size % self.page_size) != 0: + size += (self.page_size - (size % self.page_size)) + n_pages = size // self.page_size + va = self.round_down(va) + maps = [] + for v in range(va, va+size, self.page_size): + m = self.mapping(v) + if m.is_mapped() and len(maps) >= 1 and m.pa() == maps[-1].end_pa(): + maps[-1].n_pages += 1 + maps[-1].size += self.page_size + elif not m.is_mapped() and len(maps) >= 1 and not maps[-1].is_mapped(): + maps[-1].n_pages += 1 + maps[-1].size += self.page_size + else: + maps.append(m) + return maps + def __del__(self): os.close(self.fd) class SystemRAMRange: + """ + A range of physical addresses known to the system and described in /proc/iomem. + """ def __init__(self, start, size): - self.start = start - self.size = size + self.start = start # Start PA + self.size = size # Size in bytes + self.index = -1 def contains(self, pa): return self.start <= pa and pa < (self.start + self.size) + def __str__(self): + return "#%d PA:0x%x (%uMb)" % (self.index, self.start, self.size/(1024*1024)) + def system_RAM_ranges(): - # Get the ranges of System RAM known to the OS + """ + Get the physical ranges of System RAM known to the OS, by reading /proc/iomem. + """ + page_size = os.sysconf("SC_PAGE_SIZE") + assert page_size != 0, "cannot determine system page size" f = open("/proc/iomem") for ln in f: ln = ln.strip('\n') if ln.endswith("System RAM"): - addrs = ln.split()[0] - (a0, a1) = addrs.split('-') + toks = ln.split(None, 2) + print(toks) + (a0, a1) = toks[0].split('-') astart = int(a0, 16) aend = int(a1, 16) - assert aend > astart + if astart == 0 and aend == 0: + # Kernel reports range as 00000000-00000000. We're not privileged enough. + print("error: /proc/iomem is not disclosing memory addresses. Run with increased privilege.", file=sys.stderr) + sys.exit(1) + assert aend > astart, "invalid system memory range: %s" % ln size = aend+1 - astart - assert (size % os.sysconf("SC_PAGE_SIZE")) == 0 + assert (astart % page_size) == 0, "error: /proc/iomem entry not %u-aligned" % (page_size, ln) + assert (size % page_size) == 0 yield SystemRAMRange(astart, size) f.close() @@ -164,6 +268,9 @@ def __init__(self): r.index = i def addr_index(self, pa): + """ + Given a PA, find the /proc/iomem range containing this PA. + """ for r in self.ranges: if r.contains(pa): return r @@ -199,8 +306,16 @@ def proc_maps(fn="/proc/self/maps"): yield (ln[:-1], int(addr, 16), int(aend, 16)) m = PAMap(pid=opts.p) sysram = SystemRAMMap() + # Scan the virtual memory ranges allocated to the target process. for (ln, vaddr, vaend) in proc_maps("/proc/" + pidstr + "/maps"): printed = False + # Scan this range in page-sized chunks. Each page may have a different mapping. + assert (vaddr % m.page_size) == 0 and (vaend % m.page_size) == 0, "not 0x%x-aligned:" % (m.page_size, ln) + if True: + maps = m.pa_range(vaddr, vaend-vaddr) + for m in maps: + print(" %s" % m) + sys.exit() while vaddr < vaend: if (vaddr + m.page_size) <= opts.address: # this entry is before the range we're interested in @@ -214,13 +329,14 @@ def proc_maps(fn="/proc/self/maps"): print("%s:" % ln) printed = True if pte.pfn is not None: + # virtual memory is physically backed paddr = pte.pfn * m.page_size - sram_range = sysram.addr_index(paddr) + sram_range = sysram.addr_index(paddr) # /proc/iomem entry containing this PA else: sram_range = None - print(" %16x %s" % (vaddr, pte), end="") + print(" PA=%16x PTE=%s" % (vaddr, pte), end="") if sram_range is not None: - print(" #%u: size=%uMb" % (sram_range.index, sram_range.size/(1024*1024)), end="") + print(" from: %s" % (sram_range), end="") print() assert pte is not None vaddr += m.page_size diff --git a/coresight-tools/csscan.py b/coresight-tools/csscan.py index a53f005..1b327eb 100755 --- a/coresight-tools/csscan.py +++ b/coresight-tools/csscan.py @@ -192,6 +192,17 @@ def decode_one_hot(x,n): return "?%s" % str(bs) +class DeviceTimeout(Exception): + def __init__(self, dev, off, mask): + self.device = dev + self.off = off + self.mask = mask + + def __str__(self): + s = "device %s reg 0x%03x did not set 0x%08x" % (self.device, self.off, self.mask) + return s + + class DevicePhy: """ Access a memory-mapped device @@ -306,7 +317,7 @@ class Device: A single CoreSight device mapped by a ROM table (including ROM tables themselves). """ - def __init__(self, cs, addr, write=False, unlock=False): + def __init__(self, cs, addr, write=False, unlock=False, checking=False): """ Construct a device object at the given address. 'cs' is the device map (e.g. virtual memory via CSROM(), or a MEM-AP) through which we access the device. @@ -333,9 +344,9 @@ def __init__(self, cs, addr, write=False, unlock=False): self.part_number = self.PIDR & 0xfff self.devtype = None self.devarch = None - self.is_checking = (o_verbose >= 1) + self.is_checking = checking or (o_verbose >= 1) if self.is_coresight(): - arch = self.read32(0xFBC) + arch = self.read32(0xFBC) # DEVARCH if (arch & 0x00100000) != 0: self.devarch = arch self.devtype = self.read32(0xFCC) @@ -372,7 +383,7 @@ def address_string(self): A string describing how to locate this device. """ s = "@0x%x" % self.base_address - if self.phy.memap is not None: + if self.phy is not None and self.phy.memap is not None: s = self.phy.memap.memap.address_string() + "." + s return s @@ -413,6 +424,27 @@ def read32(self, off): print(" = 0x%08x" % x) return x + def test32(self, off, mask): + return (self.read32(off) & mask) == mask + + def wait(self, off, mask, timeout=0): + """ + Wait for a bit to become set. + Raise an exception if it isn't set within the timeout. + Default timeout is "a few times". + """ + default_iters = 10 + for i in range(0, default_iters): + if self.test32(off, mask): + return + # Taking some time, switch to timeout mode + if timeout > 0: + t = time.time() + timeout + while time.time() < t: + if self.test32(off, mask): + return + raise DeviceTimeout(self, off, mask) + def do_check(self, check): # We can read-back to check that the write has taken effect. # But not when the caller has indicated that the register is volatile. @@ -461,6 +493,11 @@ def read32x2(self, hi, lo): # where special action is needed to return a consistent result. return (self.read32(hi) << 32) | self.read32(lo) + def write32x2(self, hi, lo, value): + # Write a 64-bit value to hi and lo registers, non-atomically. + self.write32(hi, value >> 32) + self.write32(lo, value & 0xffffffff) + def read64(self, off): # assume little-endian return self.read32x2(off+4,off) @@ -765,20 +802,26 @@ class DevMem: """ Access physical memory via /dev/mem. This object creates mappings into page-aligned regions of physical address space. + + Object construction will raise PermissionError if not privileged. """ def __init__(self): self.page_size = os.sysconf("SC_PAGE_SIZE") self.fd = None + if os.path.isfile("/dev/csmem"): + devmem = "/dev/csmem" + else: + devmem = "/dev/mem" try: # This may fail because not present or access-restricted. - self.fd = open("/dev/mem", "r+b") - except: - try: - self.fd = open("/dev/csmem", "r+b") - except: - #print("Can't access /dev/mem or /dev/csmem - are you running as superuser?") - raise + self.fd = open(devmem, "r+b") + except FileNotFoundError: + print("physical memory %s not found - rebuild kernel" % devmem) + raise + except PermissionError: + print("can't access %s - try running as superuser" % devmem) + raise self.fno = self.fd.fileno() self.n_mappings = 0 @@ -1805,36 +1848,40 @@ def print_words(d, off, n): is_ETF = False # Mode is a programming choice: e.g. is it set up as a circular buffer or a draining FIFO mode = d.read32(0x028) & 3 - print(" mode: %s" % ["circular buffer","software FIFO","hardware FIFO","?3"][mode]) + print(" mode: %s" % ["circular buffer","software FIFO","hardware FIFO","?3"][mode]) if is_ETR: axi_control = d.read32(0x110) - print(" AXI control: 0x%08x" % axi_control) - scatter_gather = bit(axi_control,7) + print(" AXI control: 0x%08x" % axi_control) + scatter_gather = bit(axi_control,7) # n/a in SoC-600 TMC? etr_memory = d.read64(0x118) # DBALO, DBAHI if not scatter_gather: # base address of trace buffer in system memory print(" buffer address: 0x%x" % etr_memory) - print(" buffer size: 0x%x" % (d.read32(0x004)*4)) + print(" buffer size: 0x%x" % (d.read32(0x004)*4)) else: # address of first page table entry in linked list print(" scatter-gather table: 0x%x" % etr_memory) # ideally we'd read the scatter-gather table from physical memory, # to show where the ETR was actually writing the data + ctl = d.read32(0x020) + TraceCaptEn = bit(ctl, 0) + print(" control: 0x%08x %s" % (ctl, bits_set(ctl,{0:"TraceCaptEn"}))) ffcr = d.read32(0x304) ffcr_map = {0:"formatting",1:"format-triggers",4:"FOnFlIn",5:"flush-on-trigger",6:"FlushMan",12:"stop-on-flush",13:"stop-on-trigger"} - print(" flush control: %s" % bits_set(ffcr,ffcr_map)) + print(" flush control: 0x%08x %s" % (ffcr, bits_set(ffcr,ffcr_map))) # from here, report current status - TraceCaptEn = bit(d.read32(0x020), 0) - status = d.read32(0x00C) + ffsr = d.read32(0x300) + status = d.read32(0x00C) # STS + print(" status: 0x%08x" % status, end="") if not is_TMC: - print(" status: %s" % bits_set(status,{0:"Full",1:"Triggered",2:"AcqComp",3:"FtEmpty"})) - print(" state: %s" % ["disabled","enabled"][TraceCaptEn]) + print(" %s" % bits_set(status,{0:"Full",1:"Triggered",2:"AcqComp",3:"FtEmpty"})) + print(" state: %s" % ["disabled","enabled"][TraceCaptEn]) else: - print(" status: %s" % bits_set(status,{0:"Full",1:"Triggered",2:"TMCready",3:"FtEmpty",4:"Empty",5:"MemErr"})) + print(" %s" % bits_set(status,{0:"Full",1:"Triggered",2:"TMCready",3:"FtEmpty",4:"Empty",5:"MemErr"})) TMCReady = bit(status,2) if not TraceCaptEn: if not TMCReady: - tmcstate = "Disabling (CTL=0x%08x, STS=0x%08x, FFCR=0x%08x, FFSR=0x%08x, RRP=0x%08x, RWP=0x%08x)" % (d.read32(0x020), status, ffcr, d.read32(0x300), d.read32(0x014), d.read32(0x018)) + tmcstate = "Disabling (CTL=0x%08x, STS=0x%08x, FFCR=0x%08x, FFSR=0x%08x, RRP=0x%08x, RWP=0x%08x)" % (d.read32(0x020), status, ffcr, ffsr, d.read32(0x014), d.read32(0x018)) else: tmcstate = "Disabled" else: @@ -1846,7 +1893,12 @@ def print_words(d, off, n): tmcstate = "Running/Stopping" else: tmcstate = "Stopped" - print(" state: %s" % tmcstate) + print(" state: %s" % tmcstate) + if is_ETR: + rwp = d.read32x2(0x03C,0x018) + else: + rwp = d.read32(0x018) + print(" write pointer: 0x%x" % rwp) if TraceCaptEn: print(" buffer fill level (current): 0x%08x" % d.read32(0x030)) if False: @@ -1895,7 +1947,14 @@ def print_words(d, off, n): print(" (all IDs enabled)", end="") print() integration_regs = [0xEF8] + elif d.is_arm_part_number(0x912) or d.is_arm_part_number(0x9e7): + # CoreSight TPIU + ffcr = d.read32(0x304) + print(" FFCR: 0x%08x" % ffcr) + ffsr = d.read32(0x300) + print(" FFSR: 0x%08x" % ffsr) elif d.is_arm_part_number(0x9ee): + # CoreSight Address Translation Unit (CATU) catu_control = d.read(0x000) catu_mode = d.read(0x004) catu_status = d.read(0x100) diff --git a/coresight-tools/physmem.py b/coresight-tools/physmem.py new file mode 100644 index 0000000..52c8c65 --- /dev/null +++ b/coresight-tools/physmem.py @@ -0,0 +1,285 @@ +#!/usr/bin/python + +""" +Allocate physical memory + +--- +Copyright (C) ARM Ltd. 2022. All rights reserved. + +SPDX-License-Identifer: Apache 2.0 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--- + +Allocate some physical memory. + +This is achieved by + - using mmap() to allocate some virtual memory + - using mlock() to pin the memory into RAM + - finding out the physical address(es) of the memory + +Note that in a VM, the 'physical' address space is that presented by the hypervisor. +""" + +from __future__ import print_function + +import ctypes, sys, os, mmap +_MAP_LOCKED = 0x2000 +_MAP_HUGETLB = 0x40000 +_MADV_HUGEPAGE = 14 + +sys.path.append("./cskern") +import pagemap + +libc = ctypes.CDLL(None) +libc_mmap = libc.mmap +libc_mmap.restype = ctypes.c_void_p +libc_mmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong] +libc_munmap = libc.munmap +libc_munmap.restype = ctypes.c_int +libc_munmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t] +libc_mlock = libc.mlock +libc_mlock.restype = ctypes.c_int +libc_mlock.argtypes = [ctypes.c_void_p, ctypes.c_size_t] +libc_munlock = libc.munlock +libc_munlock.restype = ctypes.c_int +libc_munlock.argtypes = [ctypes.c_void_p, ctypes.c_size_t] +libc_madvise = libc.madvise +libc.madvise.restype = ctypes.c_int +libc.madvise.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_int] + + +HUGE_NOT = 0 # don't try to use huge pages +HUGE_ADVISE = 1 # advise use of THP +HUGE_ALLOC = 2 # allocate from huge pool, else advise +HUGE_FORCE = 3 # allocate from huge pool or fail + + +class PhysMem: + """ + A contiguous block of virtual addresses which we can lock into physical memory. + This may span multiple pages, and be discontiguous in physical memory. + + We can try allocating with MAP_HUGETLB, but this may fail if the system hasn't + been set up with any huge pages. + + Or, we can allocate (without using MAP_LOCKED) and then use madvise to request + transparent huge pages. But the VA range has already been allocated by that point. + + 'huge' parameter can be: + 0 to not care about huge pages + 1 to request huge pages if available (using madvise() if MAP_HUGETLB doesn't work) + 2 to force huge pages, using MAP_HUGETLB + """ + def __init__(self, size, lock=True, huge=HUGE_NOT, contiguous=False, init=None): + self.requested_size = size + self.page_size = os.sysconf("SC_PAGE_SIZE") + self.va = None + self.is_locked = False + self.pa_map = pagemap.PAMap() # map of our own process + self.pa_range_cached = None + self.ctype_buffer_cached = None + # Round up requested page size + if (size % self.page_size) != 0: + size += (self.page_size - (size % self.page_size)) + self.alloc_size = size + flags = mmap.MAP_PRIVATE | mmap.MAP_ANONYMOUS + if lock: + flags |= _MAP_LOCKED + if huge >= HUGE_ALLOC: + flags |= _MAP_HUGETLB + self.va = libc_mmap(0, size, mmap.PROT_WRITE|mmap.PROT_READ, flags, -1, 0) + if (self.va & 0xfff) == 0xfff: + if (flags & _MAP_HUGETLB) != 0: + # Failed to allocate with MAP_HUGETLB. Either there are none in the pool or + # they are all in use. + if huge == HUGE_FORCE: + if n_sys_huge_pages() == 0: + print("mmap failed: no huge pages, set /proc/sys/vm/nr_hugepages", file=sys.stderr) + raise EnvironmentError + # retry with THP + flags &= ~_MAP_HUGETLB + flags &= ~_MAP_LOCKED + self.va = libc_mmap(0, size, mmap.PROT_WRITE|mmap.PROT_READ, flags, -1, 0) + if (self.va & 0xfff) == 0xfff: + print("mmap failed (errno=%u) size=0x%x flags=0x%x" % (ctypes.get_errno(), size, flags), file=sys.stderr) + raise EnvironmentError + if (flags & _MAP_HUGETLB) == 0: + if huge >= HUGE_ADVISE: + self.madvise(_MADV_HUGEPAGE) + if lock: + self.lock() + if contiguous: + assert self.is_contiguous(), "failed to allocate contiguous physical memory" + if init is not None: + b = self.buffer() + for i in range(0,self.alloc_size): + b[i] = init + + def lock(self): + """ + Lock the allocated VA page range into physical memory. This will map any currently unmapped pages. + """ + assert self.va is not None + rc = libc_mlock(self.va, self.alloc_size) + if rc != 0: + raise OSError + self.is_locked = True + + def unlock(self): + assert self.is_locked, "block is already unlocked" + rc = libc_munlock(self.va, self.alloc_size) + self.is_locked = False + self.pa_range_cached = None + + def madvise(self, option, start=None, length=None): + if start is None: + start = self.va + if length is None: + length = self.va + self.alloc_size - start + rc = libc_madvise(start, length, _MADV_HUGEPAGE) + if rc != 0: + raise OSError + return rc + + def buffer(self): + assert self.alloc_size > 0 + if self.ctype_buffer_cached is None: + self.ctype_buffer_cached = (ctypes.c_char * self.alloc_size).from_address(self.va) + assert len(self.ctype_buffer_cached.raw) == self.alloc_size + return self.ctype_buffer_cached + + def pa_range(self, refresh=False): + """ + Return the list of physical page ranges for this memory block. + Since this shouldn't change, should we cache it? + """ + if self.pa_range_cached is None or refresh: + self.pa_range_cached = self.pa_map.pa_range(self.va, self.alloc_size) + return self.pa_range_cached + + def pa(self, refresh=False): + pr = self.pa_range(refresh=refresh) + if pr is None: + return None + elif len(pr) == 1: + return pr[0].pa() + else: + return None + + def is_contiguous(self): + """ + Return true if the block is in physically contiguous memory. + """ + return len(self.pa_range()) == 1 + + def is_in_memory(self): + """ + Return true if the block is entirely in memory. + """ + for r in self.pa_range(): + if not r.is_mapped(): + return False + return True + + def close(self): + if self.va is not None: + if self.is_locked: + self.unlock() + libc_munmap(self.va, self.alloc_size) + self.va = None + self.pa_range_cached = None + self.ctype_buffer_cached = None + + def __del__(self): + self.close() + + def __str__(self): + s = "size:0x%x" % self.alloc_size + if self.va is not None: + s += ",VA:0x%x" % self.va + # Find physical address of start TBD: might not be contiguous in PA. Should find range of PAs. + pa = self.pa_map.pa(self.va) + if pa is not None: + s += ",PA:0x%x" % pa + if not self.is_contiguous(): + s += ",discontiguous" + if not self.is_in_memory(): + s += ",unalloc" + if self.is_locked: + s += ",locked" + return "{%s}" % s + + +def _readn(fn): + """ + Read a number from a file in e.g. sysfs + """ + with open(fn) as f: + return int(f.read().strip()) + + +def n_sys_huge_pages(): + """ + Return the number of huge pages in the pool for MAP_HUGETLB. + """ + return _readn("/proc/sys/vm/nr_hugepages") + + +def show_huge_page_config(): + print("Huge pages:") + print(" THP: %s" % open("/sys/kernel/mm/transparent_hugepage/enabled").read().strip()) + print(" nr_hugepages: %u" % n_sys_huge_pages()) + hpc = "/sys/kernel/mm/hugepages" + hpsizes = sorted([int(x[10:-2]) for x in os.listdir(hpc)]) + for ps in hpsizes: + d = os.path.join(hpc, "hugepages-%ukB" % ps) + assert os.path.isdir(d) + pnr = _readn(os.path.join(d, "nr_hugepages")) + pfree = _readn(os.path.join(d, "free_hugepages")) + print(" %9ukB %9u %9u" % (ps, pnr, pfree)) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="test physical memory allocation") + parser.add_argument("--pages", type=int, default=3, help="number of pages to allocate") + parser.add_argument("--blocks", type=int, default=1, help="number of blocks") + parser.add_argument("--huge", type=int, default=0, help="0: don't care, 1: if available, 2: force") + parser.add_argument("--no-lock", action="store_true", help="don't lock block in memory") + parser.add_argument("--set-huge", type=int, help="set OS global number of huge pages") + parser.add_argument("--show-huge", action="store_true", help="show OS global huge page configuration") + opts = parser.parse_args() + if opts.show_huge: + show_huge_page_config() + if opts.set_huge is not None: + with open("/proc/sys/vm/nr_hugepages", "w") as f: + f.write("%u" % opts.set_huge) + with open("/proc/sys/vm/nr_hugepages") as f: + print("Huge pages set to %s" % f.read().strip()) + sys.exit() + blocks = [] + size = opts.pages * os.sysconf("SC_PAGE_SIZE") + for i in range(0, opts.blocks): + b = PhysMem(size, huge=opts.huge, lock=(not opts.no_lock)) + print(b) + for m in b.pa_range(): + print(" %s" % m) + buf = b.buffer() + buf[3] = b'x' + blocks.append(b) + for b in blocks: + buf = b.buffer() + assert buf[3] == b'x' + if b.is_locked: + b.unlock()