Skip to content

Commit

Permalink
replace np.product with np.prod
Browse files Browse the repository at this point in the history
  • Loading branch information
trhallam authored Aug 14, 2024
1 parent db72fc3 commit a595514
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 25 deletions.
4 changes: 2 additions & 2 deletions openzgy/impl/bulk.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ def _getBrickSizeInBytes(self):
Get the size of an uncompressed brick in bytes.
TODO-Performance, this should be cached on file open and
should probably be a derived attribute of self._metadata._ih.
NOTE-Performance: np.product() might be preferable to spelling out
NOTE-Performance: np.prod() might be preferable to spelling out
the multiply and needing a temp. But it could be 100 times slower.
"""
file_dtype = np.dtype(impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype))
Expand Down Expand Up @@ -629,7 +629,7 @@ def _deliverOneBrick(self, result, start, startpos, raw, brickstatus, as_float,
if onebrick is None:
raise ZgyFormatError("Compression type not recognized")
elif brickstatus == impl_enum.BrickStatus.Normal:
if len(raw) != np.product(self._metadata._ih._bricksize) * np.dtype(file_dtype).itemsize:
if len(raw) != np.prod(self._metadata._ih._bricksize) * np.dtype(file_dtype).itemsize:
raise ZgyFormatError("Got wrong count when reading brick.")
onebrick = np.frombuffer(raw, dtype=file_dtype)
# Instead of describing the array as explicitly little-endian
Expand Down
14 changes: 7 additions & 7 deletions openzgy/iterator.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def _readall_1(surveysize, blocksize, dtype, readfn):
"""
data = np.zeros(blocksize, dtype=dtype) if readfn else None
done = np.int64(0)
total = np.product(surveysize)
total = np.prod(surveysize)

for ii in range(0, surveysize[0], blocksize[0]):
for jj in range(0, surveysize[1], blocksize[1]):
Expand All @@ -76,17 +76,17 @@ def _readall_1(surveysize, blocksize, dtype, readfn):
view = data[:count[0],:count[1],:count[2]] if data is not None else None
#print("Reading", start, count, view.shape)
if readfn: readfn(start, view)
done += np.product(count)
done += np.prod(count)
yield start, count, view
assert done == np.product(surveysize)
assert done == np.prod(surveysize)

def _readall_2(surveysize, blocksize, chunksize, dtype, readfn, progress):
"""
Iterates over the entire file and returns data in chunks.
All numeric and array parameters use numpy types.
"""
done = np.int64(0)
total = np.product(surveysize)
total = np.prod(surveysize)
# Give a chance to abort before we even start.
if progress and not progress(done, total): return
alldata = _readall_1(surveysize=surveysize,
Expand All @@ -103,12 +103,12 @@ def _readall_2(surveysize, blocksize, chunksize, dtype, readfn, progress):
end = np.minimum(start + chunksize, datasize)
count = end - start
view = data[start[0]:end[0],start[1]:end[1],start[2]:end[2]] if data is not None else None
done += np.product(count)
done += np.prod(count)
yield datastart + start, count, view
# After yielding, give a chance to abort before the next read.
# Also makes sure the final done==total is sent.
if progress and not progress(done, total): return
assert done == np.product(surveysize)
assert done == np.prod(surveysize)

def _readall_3(surveysize, bricksize, blocksize, chunksize, dtype, readfn, maxbytes, progress):
"""
Expand All @@ -125,7 +125,7 @@ def _readall_3(surveysize, bricksize, blocksize, chunksize, dtype, readfn, maxby
blocksize[blocksize==0] = surveysize[blocksize==0]
chunksize[chunksize==0] = blocksize[chunksize==0]
if False:
fmt = lambda x: "{0} = {1} voxels, {2:.1f} MB".format(str(tuple(x)), np.product(x), np.product(x) * dtype.itemsize/(1024*1024))
fmt = lambda x: "{0} = {1} voxels, {2:.1f} MB".format(str(tuple(x)), np.prod(x), np.prod(x) * dtype.itemsize/(1024*1024))
print("survey", fmt(surveysize), "of", np.dtype(dtype).name)
print("brick ", fmt(bricksize))
print("block ", fmt(blocksize))
Expand Down
8 changes: 4 additions & 4 deletions openzgy/test/black.py
Original file line number Diff line number Diff line change
Expand Up @@ -2720,14 +2720,14 @@ def testCloudConsolidateBricks(filename, *, verbose = False):
_debug_trace = trace
)
bricksize = np.array((64, 64, 64), dtype=np.int64)
brick = np.product(bricksize) * np.dtype(np.float32).itemsize
brick = np.prod(bricksize) * np.dtype(np.float32).itemsize
size = np.array((181, 241, 169), dtype=np.int64)
numbricks = (size + bricksize - 1) // bricksize
vprint("Creating. Expect header written twice, then bulk data once.")
with newzgy.ZgyWriter(filename, iocontext=iocontext,
bricksize = tuple(bricksize),
size = tuple(size)) as writer:
data = np.arange(np.product(size), dtype=np.float32).reshape(size)
data = np.arange(np.prod(size), dtype=np.float32).reshape(size)
writer.write((0,0,0), data)

# lod 0 bricks: 3 * 4 * 3 = 36
Expand Down Expand Up @@ -2869,14 +2869,14 @@ def testCloudConsolidateBricks(filename, *, verbose = False):
segsize=7, _debug_trace = trace
)
bricksize = np.array((64, 64, 64), dtype=np.int64)
brick = np.product(bricksize) * np.dtype(np.float32).itemsize
brick = np.prod(bricksize) * np.dtype(np.float32).itemsize
size = np.array((181, 241, 169), dtype=np.int64)
numbricks = (size + bricksize - 1) // bricksize
vprint("Creating. Expect header written twice and bulk data in 7 parts.")
with newzgy.ZgyWriter(filename, iocontext=iocontext,
bricksize = tuple(bricksize),
size = tuple(size)) as writer:
data = np.arange(np.product(size), dtype=np.float32).reshape(size)
data = np.arange(np.prod(size), dtype=np.float32).reshape(size)
writer.write((0,0,0), data)

# There may be several reads needed to generate lod 1 bricks
Expand Down
2 changes: 1 addition & 1 deletion openzgy/test/lodalgo.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def testLodAlgorithms():

def testLowpassLodAlgorithm():
bricksize = (64, 64, 64)
b0 = np.arange(np.product(bricksize), dtype=np.float64).reshape(bricksize)
b0 = np.arange(np.prod(bricksize), dtype=np.float64).reshape(bricksize)
#b0.fill(42)
bricks = _make8(b0, None) # TODO-Test also test NaN handling?
lowpass = decimate8(bricks, DecimationType.LowPass)
Expand Down
6 changes: 3 additions & 3 deletions openzgy/tools/copy.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,12 +169,12 @@ def timing_report(writer, elapsed):
bs = np.array(writer.bricksize, dtype=np.int64)
size = np.array(writer.size, dtype=np.int64)
paddedsize = ((size + bs - 1) // bs) * bs
bandwidth = np.product(paddedsize) / elapsed # should I use size or padsize?
bandwidth = np.prod(paddedsize) / elapsed # should I use size or padsize?
bandwidth /= (1024*1024)
print("Elapsed {0:7.2f} seconds, bandwidth {1:6.2f} MVoxel/s copying {2} {3} samples, exact {4:.0f} MVoxel, padded {5:.0f} MVoxel".format(
elapsed, bandwidth, writer.datatype, tuple(size),
np.product(size) / (1024*1024),
np.product(paddedsize) / (1024*1024)))
np.prod(size) / (1024*1024),
np.prod(paddedsize) / (1024*1024)))

def parseints(s):
return tuple(map(int,s.split(",")))
Expand Down
4 changes: 2 additions & 2 deletions openzgy/tools/histcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def scanForRange(reader, verbose = False):
valuerange = (np.nanmin(realmin), np.nanmax(realmax))
print("VALUE RANGE", valuerange)
elapsed = time() - begtime
voxels = np.product(reader.size) / (1024*1024)
voxels = np.prod(reader.size) / (1024*1024)
print(" {0:.1f} MVoxel read in {1:.1f} sec, {2:.1f} Mvoxel/s".format(
voxels, elapsed, voxels/elapsed))
return valuerange
Expand All @@ -43,7 +43,7 @@ def scanForHistogram(reader, valuerange, verbose = False):
else:
hh += h[0]
elapsed = time() - begtime
voxels = np.product(reader.size) / (1024*1024)
voxels = np.prod(reader.size) / (1024*1024)
print(" {0:.1f} MVoxel read in {1:.1f} sec, {2:.1f} Mvoxel/s".format(
voxels, elapsed, voxels/elapsed))
return hh
Expand Down
2 changes: 1 addition & 1 deletion openzgy/tools/show.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def read_data_b_at_a_time(reader, lod, start, size):
def timing_report(reader, lod, start, size, elapsed):
bs = np.array(reader.bricksize if isinstance(reader, newzgy.ZgyReader) else (64, 64, 64), dtype=np.int64)
padsize = ((np.array(size, np.int64) + bs - 1) // bs) * bs
bandwidth = np.product(padsize) / elapsed # should I use size or padsize?
bandwidth = np.prod(padsize) / elapsed # should I use size or padsize?
bandwidth /= (1024*1024)
print("Elapsed {0:6.2f} seconds, bandwidth {1:6.2f} MVoxel/s reading {2} lod {3} size {4} start {5}".format(elapsed, bandwidth, reader.datatype, lod, tuple(size), tuple(start)))

Expand Down
6 changes: 3 additions & 3 deletions openzgy/tools/speedtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def _rounddown(x, step): return (x // step) * step
# No more than 1024 MVoxel to avoid running out of memory (and time).
# TODO-Low, would be nice to crop both inline and crossline range
# to have less likelyhood of dead traces.
print("BS", np.product(blocksize) // (1024*1024), "MB, count", total)
have_room = 1024*1024*1024 // np.product(blocksize)
print("BS", np.prod(blocksize) // (1024*1024), "MB, count", total)
have_room = 1024*1024*1024 // np.prod(blocksize)
new_total = min(total, max(have_room, 1))
first_il = ((total - new_total) // 2) * blocksize[0]
print("Adjust il count from start 0 count {0} to start {1} count {2}".format(total, first_il, new_total))
Expand Down Expand Up @@ -130,7 +130,7 @@ def slurp(filename):
starttime = time.time()
with ZgyReader(filename, iocontext = SDCredentials()) as r:
blocks = slurp_open_file(r, progress=ProgressWithDots())
bricksize_in_bytes = int(np.product(r.bricksize) * 4)
bricksize_in_bytes = int(np.prod(r.bricksize) * 4)
r_time = time.time() - starttime

for b in blocks:
Expand Down
4 changes: 2 additions & 2 deletions openzgy/tools/zgydump.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def summary_normal_size(reader, *, header = True):
bytespersample = {SampleDataType.int8: 1,
SampleDataType.int16: 2,
SampleDataType.float: 4}[reader.datatype]
bytesperbrick = np.product(reader.bricksize) * bytespersample
bytesperbrick = np.prod(reader.bricksize) * bytespersample
colsize = (reader.size[2] + reader.bricksize[2] - 1) // reader.bricksize[2]
bytespercolumn = bytesperbrick * colsize
fmt = "{0:30s} = LOD0: {1} {2} MB column {3} {4} MB brick {5}"
Expand Down Expand Up @@ -147,7 +147,7 @@ def run(filename, options):
summary_normal_size(reader, header=False)
return
args = dict(name=filename,
nsamples=np.product(reader.size),
nsamples=np.prod(reader.size),
r=reader)
#print(_brief_info.format(**args))
for line in _brief_info.split('\n'):
Expand Down

0 comments on commit a595514

Please sign in to comment.