diff --git a/bitshuffle/h5.pyx b/bitshuffle/h5.pyx index 978111d..0d12aef 100644 --- a/bitshuffle/h5.pyx +++ b/bitshuffle/h5.pyx @@ -87,7 +87,7 @@ if not sys.platform.startswith('win'): break if success == -1: - raise RuntimeError("Failed to load all HDF5 symbols using these libs: {}".format(libs)) + raise RuntimeError(f"Failed to load all HDF5 symbols using these libs: {libs}") def register_h5_filter(): @@ -134,7 +134,7 @@ def create_dataset(parent, name, shape, dtype, chunks=None, maxshape=None, for i, j in zip(tmp_shape, chunks) if i is not None])).any() if isinstance(chunks, tuple) and chunks_larger: errmsg = ("Chunk shape must not be greater than data shape in any " - "dimension. {} is not compatible with {}".format(chunks, shape)) + f"dimension. {chunks} is not compatible with {shape}") raise ValueError(errmsg) if isinstance(dtype, h5py.Datatype): diff --git a/setup.py b/setup.py index 5c29ba0..b456a2b 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function - # I didn't import unicode_literals. They break setuptools or Cython in python # 2.7, but python 3 seems to be happy with them. @@ -26,9 +24,9 @@ # Only unset in the 'release' branch and in tags. VERSION_DEV = None -VERSION = "%d.%d.%d" % (VERSION_MAJOR, VERSION_MINOR, VERSION_POINT) +VERSION = f"{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_POINT}" if VERSION_DEV: - VERSION = VERSION + ".dev%d" % VERSION_DEV + VERSION = f"{VERSION}.dev{VERSION_DEV}" COMPILE_FLAGS = ["-O3", "-ffast-math", "-std=c99"] @@ -55,10 +53,7 @@ # Build against the native architecture unless overridden by an environment variable # This can also be overridden by a direct command line argument, or a `setup.cfg` entry # This option is needed for the cibuildwheel action -if "BITSHUFFLE_ARCH" in os.environ: - MARCH_DEFAULT = os.environ["BITSHUFFLE_ARCH"] -else: - MARCH_DEFAULT = "native" +MARCH_DEFAULT = os.environ.get("BITSHUFFLE_ARCH", "native") FALLBACK_CONFIG = { "include_dirs": [], @@ -106,7 +101,7 @@ def pkgconfig(*packages, **kw): try: subprocess.check_output(["pkg-config", package]) except (subprocess.CalledProcessError, OSError): - print("Can't find %s with pkg-config fallback to static config" % package) + print(f"Can't find {package} with pkg-config fallback to static config") for distutils_key in flag_map: config.setdefault(distutils_key, []).extend( FALLBACK_CONFIG[distutils_key] @@ -165,7 +160,7 @@ def pkgconfig(*packages, **kw): "lz4/lz4.h", ], define_macros=MACROS + [("H5_USE_18_API", None)], - **pkgconfig("hdf5", config=dict(include_dirs=["src/", "lz4/"])) + **pkgconfig("hdf5", config=dict(include_dirs=["src/", "lz4/"])), ) if not sys.platform.startswith("win"): @@ -190,7 +185,7 @@ def pkgconfig(*packages, **kw): "lz4/lz4.h", ], define_macros=MACROS, - **pkgconfig("hdf5", config=dict(include_dirs=["src/", "lz4/"])) + **pkgconfig("hdf5", config=dict(include_dirs=["src/", "lz4/"])), ) lzf_plugin = Extension( @@ -202,7 +197,7 @@ def pkgconfig(*packages, **kw): "lzf/lzf/lzf_d.c", ], depends=["lzf/lzf_filter.h", "lzf/lzf/lzf.h", "lzf/lzf/lzfP.h"], - **pkgconfig("hdf5", config=dict(include_dirs=["lzf/", "lzf/lzf/"])) + **pkgconfig("hdf5", config=dict(include_dirs=["lzf/", "lzf/lzf/"])), ) @@ -261,7 +256,7 @@ class install(install_): ( "h5plugin-dir=", None, - "Where to install filter plugins. Default %s." % H5PLUGINS_DEFAULT, + f"Where to install filter plugins. Default {H5PLUGINS_DEFAULT}.", ), ("zstd", None, "Install ZSTD support."), ] @@ -313,7 +308,7 @@ def run(self): for plugin_lib in plugin_libs: plugin_name = path.split(plugin_lib)[1] shutil.copy2(plugin_lib, path.join(self.h5plugin_dir, plugin_name)) - print("Installed HDF5 filter plugins to %s" % self.h5plugin_dir) + print(f"Installed HDF5 filter plugins to {self.h5plugin_dir}") # Command line or site.cfg specification of OpenMP. @@ -323,13 +318,12 @@ class build_ext(build_ext_): "omp=", None, "Whether to compile with OpenMP threading. Default" - " on current system is %s." % str(OMP_DEFAULT), + f" on current system is {OMP_DEFAULT}.", ), ( "march=", None, - "Generate instructions for a specific machine type. Default is %s." - % MARCH_DEFAULT, + f"Generate instructions for a specific machine type. Default is {MARCH_DEFAULT}.", ), ] boolean_options = build_ext_.boolean_options + ["omp"] @@ -368,9 +362,9 @@ def build_extensions(self): openmpflag = "-fopenmp" archi = platform.machine() if archi in ("i386", "x86_64"): - compileflags = COMPILE_FLAGS + ["-march=%s" % self.march] + compileflags = COMPILE_FLAGS + [f"-march={self.march}"] else: - compileflags = COMPILE_FLAGS + ["-mcpu=%s" % self.march] + compileflags = COMPILE_FLAGS + [f"-mcpu={self.march}"] if archi == "ppc64le": compileflags = COMPILE_FLAGS + ["-DNO_WARN_X86_INTRINSICS"] @@ -435,6 +429,6 @@ def build_extensions(self): long_description=long_description, license="MIT", url="https://github.com/kiyo-masui/bitshuffle", - download_url=("https://github.com/kiyo-masui/bitshuffle/tarball/%s" % VERSION), + download_url=(f"https://github.com/kiyo-masui/bitshuffle/tarball/{VERSION}"), keywords=["compression", "hdf5", "numpy"], ) diff --git a/tests/make_regression_tdata.py b/tests/make_regression_tdata.py index 8608622..defab24 100644 --- a/tests/make_regression_tdata.py +++ b/tests/make_regression_tdata.py @@ -19,7 +19,7 @@ [(BLOCK_SIZE, h5.H5_COMPRESS_ZSTD, COMP_LVL)], ] -OUT_FILE = "tests/data/regression_%s.h5" % bitshuffle.__version__ +OUT_FILE = f"tests/data/regression_{bitshuffle.__version__}.h5" DTYPES = ["a1", "a2", "a3", "a4", "a6", "a8", "a10"] @@ -30,7 +30,7 @@ for dtype in DTYPES: for rep in ["a", "b", "c"]: - dset_name = "%s_%s" % (dtype, rep) + dset_name = f"{dtype}_{rep}" dtype = np.dtype(dtype) n_elem = 3 * BLOCK_SIZE + random.randint(0, BLOCK_SIZE) shape = (n_elem,) diff --git a/tests/test_ext.py b/tests/test_ext.py index 1fcdad3..c7473ee 100644 --- a/tests/test_ext.py +++ b/tests/test_ext.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - import unittest import time @@ -49,7 +47,7 @@ def tearDown(self): reps = 1 delta_ts = [] try: - for ii in range(reps): + for _ in range(reps): t0 = time.time() out = self.fun(self.data) delta_ts.append(time.time() - t0) @@ -68,7 +66,7 @@ def tearDown(self): size = max([size_i, size_o]) speed = ext.REPEAT * size / delta_t / 1024**3 # GB/s if TIME: - print("%-20s: %5.2f s/GB, %5.2f GB/s" % (self.case, 1.0 / speed, speed)) + print(f"{self.case:-20}: {1.0 / speed:5.2f} s/GB, {speed:%5.2f} GB/s") if self.check is not None: ans = self.check(self.data).view(np.uint8) self.assertTrue(np.all(ans == out.view(np.uint8))) @@ -126,9 +124,7 @@ def test_01g_trans_byte_elem_128(self): def test_01h_trans_byte_elem_96(self): self.case = "byte T elem SSE 96" n = self.data.size // 128 * 96 - dt = np.dtype( - [(str("a"), np.int32), (str("b"), np.int32), (str("c"), np.int32)] - ) + dt = np.dtype([("a", np.int32), ("b", np.int32), ("c", np.int32)]) self.data = self.data[:n].view(dt) self.fun = ext.trans_byte_elem_SSE self.check = trans_byte_elem @@ -138,11 +134,11 @@ def test_01i_trans_byte_elem_80(self): n = self.data.size // 128 * 80 dt = np.dtype( [ - (str("a"), np.int16), - (str("b"), np.int16), - (str("c"), np.int16), - (str("d"), np.int16), - (str("e"), np.int16), + ("a", np.int16), + ("b", np.int16), + ("c", np.int16), + ("d", np.int16), + ("e", np.int16), ] ) self.data = self.data[:n].view(dt) @@ -595,7 +591,7 @@ def tearDown(self): nbyte_max = self.nmax * itemsize dbuf = random.randint(0, 255, nbyte_max).astype(np.uint8) dbuf = dbuf.view(dtype) - for ii in range(self.reps): + for _ in range(self.reps): n = random.randint(0, self.nmax // 8, 1)[0] * 8 data = dbuf[:n] out = self.fun(data).view(np.uint8) @@ -624,7 +620,7 @@ def test_circle(self): nbyte_max = nmax * itemsize dbuf = random.randint(0, 255, nbyte_max).astype(np.uint8) dbuf = dbuf.view(dtype) - for ii in range(reps): + for _ in range(reps): n = random.randint(0, nmax, 1)[0] data = dbuf[:n] shuff = ext.bitshuffle(data) @@ -640,7 +636,7 @@ def test_circle_with_compression(self): nbyte_max = nmax * itemsize dbuf = random.randint(0, 255, nbyte_max).astype(np.uint8) dbuf = dbuf.view(dtype) - for ii in range(reps): + for _ in range(reps): n = random.randint(0, nmax, 1)[0] data = dbuf[:n] shuff = ext.compress_lz4(data) @@ -657,7 +653,7 @@ def test_circle_with_zstd_compression(self): nbyte_max = nmax * itemsize dbuf = random.randint(0, 255, nbyte_max).astype(np.uint8) dbuf = dbuf.view(dtype) - for ii in range(reps): + for _ in range(reps): n = random.randint(0, nmax, 1)[0] data = dbuf[:n] shuff = ext.compress_zstd(data) diff --git a/tests/test_h5filter.py b/tests/test_h5filter.py index 2dbb2c3..9990b1e 100644 --- a/tests/test_h5filter.py +++ b/tests/test_h5filter.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - import unittest import os import glob diff --git a/tests/test_h5plugin.py b/tests/test_h5plugin.py index 001fa9d..9e10fe0 100644 --- a/tests/test_h5plugin.py +++ b/tests/test_h5plugin.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import, division, print_function, unicode_literals import unittest import os import glob diff --git a/tests/test_regression.py b/tests/test_regression.py index bb9febc..3c386f3 100644 --- a/tests/test_regression.py +++ b/tests/test_regression.py @@ -3,8 +3,6 @@ """ -from __future__ import absolute_import, division, print_function - import pathlib import unittest @@ -29,14 +27,14 @@ def test_regression(self): g_orig = f["original"] g_comp = f["compressed"] - for dset_name in g_comp.keys(): + for dset_name in g_comp: self.assertTrue(np.all(g_comp[dset_name][:] == g_orig[dset_name][:])) # Only run ZSTD comparison on versions >= 0.4.0 and if ZSTD support # has been built into bitshuffle if version.parse(rev) >= version.parse("0.4.0") and __zstd__: g_comp_zstd = f["compressed_zstd"] - for dset_name in g_comp_zstd.keys(): + for dset_name in g_comp_zstd: self.assertTrue( np.all(g_comp_zstd[dset_name][:] == g_orig[dset_name][:]) )