diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2bfb1f3..5558c49 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,25 +12,24 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.10', '3.11', '3.12'] + python-version: ['3.10', '3.11', '3.12', '3.13'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install pytest coverage - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install -e '.[dev]' - name: Test with pytest run: | coverage run -m pytest -v && coverage xml - name: Upload coverage to Codecov - if: ${{ matrix.python-version == '3.12' }} - uses: codecov/codecov-action@v3 + if: ${{ matrix.python-version == '3.13' }} + uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} fail_ci_if_error: true diff --git a/.gitignore b/.gitignore index 0633d91..1f41eb6 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ venv*/ # Test coverage related files and directories .coverage coverage_html_report + +# Package build files and folders +*.egg-info/ diff --git a/COPYING b/LICENSE similarity index 100% rename from COPYING rename to LICENSE diff --git a/README.md b/README.md index 47bf983..cafe14b 100644 --- a/README.md +++ b/README.md @@ -5,30 +5,14 @@ ## Introduction -The nuclear mass tables produced by [NUBASE](http://amdc.in2p3.fr/web/nubase_en.html) and [AME](https://www-nds.iaea.org/amdc/) are parsed into pandas dataframes. -These dataframes are then read with the [dash](https://plotly.com/dash/) module to create an interactive webpage to allow the user to interogate the data they are interested in. +The nuclear mass tables produced by [NUBASE](http://amdc.in2p3.fr/web/nubase_en.html) and [AME](https://www-nds.iaea.org/amdc/) have unique formats. +This package does the hard work for you and parses the files into a single pandas dataframe for simpler access. No guarantee is supplied with regards to the accuracy of the data presented. Estimated values are included, please always refer to the original sources. All data should, however, be accurate. -Additional functionality and polish will be added as I learn more about [dash](https://plotly.com/dash/). -In the meantime, suggestions are welcome via [issues](https://github.com/php1ic/pynch/issues) or a pull request. - -## Setup - -As is the standard, you can confirm you have the necessary modules using the [requirements.txt](./requirements.txt) file -```bash -pip install --user -r requirements.txt -``` - -## Running - -With all of the necessary requirements installed, the below will start the app -```bash -python3 app.py -``` -The console will tell you where to point your browser - likely http://127.0.0.1:8050/. +Suggestions are welcome via [issues](https://github.com/php1ic/pynch/issues) or a pull request. ## Mass tables @@ -41,16 +25,23 @@ The data files released by the papers linked below are used to create the mass t The NUBASE files are read for all of the data values, with the AME files being used to populate an additional mass excess data field. No comparison or validation is done on common values. -## Additional uses +## Setup + +Until the package is added to Python Package Index, the local clone of the repo must be installed from within the top level directory. +```bash +pip install -e . +``` + +## Usage -If you want to do your own thing with the data, you could import this module, access `MassTable().full_data`, then sort, slice and filter the resultant dataframe to your heart's content. +Import the module to access `MassTable().full_data`, then sort, slice and filter the resultant dataframe to your heart's content. For example, track how the accuracy of the mass excess of 18B changes once it is experimentally measured ```python >>> import pynch.mass_table as mt >>> df = mt.MassTable().full_data ->>> df[(df['A'] == 18) & (df['Z'] == 5)][['Experimental', 'NubaseMassExcess', 'NubaseMassExcessError', 'NubaseRelativeError', 'DiscoveryYear']] - Experimental NubaseMassExcess NubaseMassExcessError NubaseRelativeError DiscoveryYear +>>> df[(df['A'] == 18) & (df['Z'] == 5)][['Experimental', 'NUBASEMassExcess', 'NUBASEMassExcessError', 'NUBASERelativeError', 'DiscoveryYear']] + Experimental NUBASEMassExcess NUBASEMassExcessError NUBASERelativeError DiscoveryYear TableYear 2003 False 52320.0 800.0 0.015291 1900 2012 True 51850.0 170.0 0.003279 2010 @@ -61,7 +52,7 @@ Or for all of the A=100 isotopes from the 2012 table that have a mass-excess err ```python >>> import pynch.mass_table as mt >>> df = mt.MassTable().full_data ->>> df.query('TableYear == 2012 and A == 100 and NubaseMassExcessError < 10.0')[['A', 'Z', 'Symbol', 'DiscoveryYear']] +>>> df.query('TableYear == 2012 and A == 100 and NUBASEMassExcessError < 10.0')[['A', 'Z', 'Symbol', 'DiscoveryYear']] A Z Symbol DiscoveryYear TableYear 2012 100 40 Zr 1970 @@ -77,12 +68,12 @@ Or how does the NUBASE mass-excess compare with the AME value for experimentally >>> import pynch.mass_table as mt >>> df = mt.MassTable().full_data >>> # Create a new column comparing the measured values ->>> df['NUBASE-AME'] = df['NubaseMassExcess'] - df['AMEMassExcess'] +>>> df['NUBASE-AME'] = df['NUBASEMassExcess'] - df['AMEMassExcess'] >>> # Extract the data for measured isotopes and from the latest table >>> df_comparison = df.query('TableYear == 2020 and Experimental == True') >>> # Sort the difference in measured data by absolute value and print the columns we are interested in ->>> df_comparison.sort_values(by=['NUBASE-AME'], key=abs, ascending=False)[['A', 'Z', 'Symbol', 'NubaseMassExcess', 'AMEMassExcess', 'NUBASE-AME']].head(n=10) - A Z Symbol NubaseMassExcess AMEMassExcess NUBASE-AME +>>> df_comparison.sort_values(by=['NUBASE-AME'], key=abs, ascending=False)[['A', 'Z', 'Symbol', 'NUBASEMassExcess', 'AMEMassExcess', 'NUBASE-AME']].head(n=10) + A Z Symbol NUBASEMassExcess AMEMassExcess NUBASE-AME TableYear 2020 221 91 Pa 20370.0 20374.937 -4.937 2020 57 23 V -44440.0 -44435.063 -4.937 diff --git a/app.py b/app.py deleted file mode 100755 index de9ddd8..0000000 --- a/app.py +++ /dev/null @@ -1,189 +0,0 @@ -"""dsfds.""" -import dash -from dash.dependencies import Input, Output - -import dash_bootstrap_components as dbc - -import plotly.express as px - -import pynch.mass_table as mt - -app = dash.Dash(__name__, external_stylesheets=[dbc.themes.CYBORG]) -server = app.server - -df = mt.MassTable().full_data - -table_years = df.index.unique() -variables = df.columns - -segre_colours = { - 'stable': 'black', - 'B-': 'red', - 'B+': 'blue', - 'A': 'yellow', - 'n': 'limegreen', - '2n': 'darkgreen', - 'p': 'magenta', - '2p': 'purple', - 'EC': 'orange', - 'SF': 'cyan' -} - - -@app.callback( - [ - Output("graph-title", "children"), - Output("a-graph", "figure"), - Output("z-graph", "figure"), - Output("n-graph", "figure"), - Output("xval_slider", "min"), - Output("xval_slider", "max"), - Output("xval_slider", "marks"), - ], - [ - Input("yaxis_dropdown", "value"), - Input("xval_slider", "value"), - Input("year_slider", "value"), - ], -) -def update_graph(y_var, x_value, year): - """The actions to do when the selected data is changed.""" - df_f = df.loc[table_years[year]][["Symbol", "Decay", "A", "Z", "N", y_var]] - df_ff = df_f.loc[(df_f["A"] == x_value)] - - logit = ( - True - if y_var in ["HalfLife", "NubaseRelativeError", "AMERelativeError"] - else False - ) - - a_fig = px.scatter( - data_frame=df_f, - x="N", - y="Z", - hover_name="Symbol", - color="Decay", - color_discrete_map=segre_colours, - template="plotly_dark", - ) - - z_fig = px.scatter( - data_frame=df_ff, - x="Z", - y=y_var, - hover_name="Symbol", - hover_data=[y_var], - log_y=logit, - template="plotly_dark", - ) - - n_fig = px.scatter( - data_frame=df_ff, - x="N", - y=y_var, - hover_name="Symbol", - hover_data=[y_var], - log_y=logit, - template="plotly_dark", - ) - - title = dash.html.H2(f"A = {x_value}") - - min_a = df_f["A"].min() - max_a = df_f["A"].max() - - marks_a = {i: f"{i}" for i in range(20, max_a, 20)} - - return ( - title, - a_fig, - z_fig.update_traces(mode="lines+markers"), - n_fig.update_traces(mode="lines+markers"), - min_a, - max_a, - marks_a, - ) - - -year_and_variable = dbc.Row( - [ - dbc.Col( - dash.html.Div( - [ - dash.html.H3("Year"), - dash.dcc.Slider( - id="year_slider", - min=0, - max=len(table_years) - 1, - marks={i: f"{table_years[i]}" for i in range(len(table_years))}, - value=0, - ), - ], - ) - ), - dbc.Col( - dash.html.Div( - [ - dash.html.H3("Value to plot"), - dash.dcc.Dropdown( - id="yaxis_dropdown", - options=[{"label": i, "value": i} for i in variables], - value=variables[7], - ), - ], - ) - ), - ] -) - -a_slider = dbc.Row( - dbc.Col( - [ - dash.html.Div(id="graph-title", children=[]), - dash.html.Div(dash.dcc.Slider(id="xval_slider", value=50)), - ] - ) -) - -graphs = dbc.Row( - [ - dbc.Col(dash.dcc.Graph(id="n-graph", figure={}), width=3), - dbc.Col(dash.dcc.Graph(id="a-graph", figure={}), width=6), - dbc.Col(dash.dcc.Graph(id="z-graph", figure={}), width=3), - ] -) - - -app.layout = dbc.Container(children=[year_and_variable, graphs, a_slider], fluid=True) - - -def main(): - """For testing.""" - - # df = MassData().full_data - # print(df) - # print(df.index.unique()) - # print(df.columns) - # print(df.loc['2003', ['A', 'Symbol']]) - # print(df.loc[(df['Z'] == 2) & (df['A'] == 3) & (df['TableYear'] == '2003')]) - # print(df.loc['2003'][['A', 'Z']]) - # df_f = df.loc["2003"][["A", "Z", "N", "NubaseRelativeError"]] - # df_ff = df_f.loc[(df_f["A"] == 12)] - # print(df_ff) - # print(df_ff["Z"].max()) - # print(df_ff["Z"].min()) - # print(df_f.loc[(df_f['A'] == 20)]) - # filtered = df[df["TableYear"] == "2003"] - # print(filtered) - # print(type(filtered)) - - # print(df.loc[(25), :]['2012']['NubaseMassExcess'].index.get_level_values('N')) - # print("~~~~~~~~~~~~~~~~~~~~~~") - # print(df[df.index.get_level_values('A') == 40]['2016']['NubaseMassExcess']) - # print("~~~~~~~~~~~~~~~~~~~~~~") - # print(df.loc[(40), :]["2016"]["NubaseMassExcess"]) - - -if __name__ == "__main__": - # main() - app.run_server(debug=True) diff --git a/data/2003/mass.mas03 b/pynch/data/2003/mass.mas03 similarity index 100% rename from data/2003/mass.mas03 rename to pynch/data/2003/mass.mas03 diff --git a/data/2003/nubtab03.asc b/pynch/data/2003/nubtab03.asc similarity index 100% rename from data/2003/nubtab03.asc rename to pynch/data/2003/nubtab03.asc diff --git a/data/2003/rct1.mas03 b/pynch/data/2003/rct1.mas03 similarity index 100% rename from data/2003/rct1.mas03 rename to pynch/data/2003/rct1.mas03 diff --git a/data/2003/rct2.mas03 b/pynch/data/2003/rct2.mas03 similarity index 100% rename from data/2003/rct2.mas03 rename to pynch/data/2003/rct2.mas03 diff --git a/data/2012/mass.mas12 b/pynch/data/2012/mass.mas12 similarity index 100% rename from data/2012/mass.mas12 rename to pynch/data/2012/mass.mas12 diff --git a/data/2012/nubtab12.asc b/pynch/data/2012/nubtab12.asc similarity index 100% rename from data/2012/nubtab12.asc rename to pynch/data/2012/nubtab12.asc diff --git a/data/2012/rct1.mas12 b/pynch/data/2012/rct1.mas12 similarity index 100% rename from data/2012/rct1.mas12 rename to pynch/data/2012/rct1.mas12 diff --git a/data/2012/rct2.mas12 b/pynch/data/2012/rct2.mas12 similarity index 100% rename from data/2012/rct2.mas12 rename to pynch/data/2012/rct2.mas12 diff --git a/data/2016/mass16.txt b/pynch/data/2016/mass16.txt similarity index 100% rename from data/2016/mass16.txt rename to pynch/data/2016/mass16.txt diff --git a/data/2016/nubase2016.txt b/pynch/data/2016/nubase2016.txt similarity index 100% rename from data/2016/nubase2016.txt rename to pynch/data/2016/nubase2016.txt diff --git a/data/2016/rct1-16.txt b/pynch/data/2016/rct1-16.txt similarity index 100% rename from data/2016/rct1-16.txt rename to pynch/data/2016/rct1-16.txt diff --git a/data/2016/rct2-16.txt b/pynch/data/2016/rct2-16.txt similarity index 100% rename from data/2016/rct2-16.txt rename to pynch/data/2016/rct2-16.txt diff --git a/data/2020/mass.mas20 b/pynch/data/2020/mass.mas20 similarity index 100% rename from data/2020/mass.mas20 rename to pynch/data/2020/mass.mas20 diff --git a/data/2020/nubase_1.mas20 b/pynch/data/2020/nubase_1.mas20 similarity index 100% rename from data/2020/nubase_1.mas20 rename to pynch/data/2020/nubase_1.mas20 diff --git a/data/2020/rct1.mas20 b/pynch/data/2020/rct1.mas20 similarity index 100% rename from data/2020/rct1.mas20 rename to pynch/data/2020/rct1.mas20 diff --git a/data/2020/rct2.mas20 b/pynch/data/2020/rct2.mas20 similarity index 100% rename from data/2020/rct2.mas20 rename to pynch/data/2020/rct2.mas20 diff --git a/pynch/mass_table.py b/pynch/mass_table.py index 1763506..3f9ba90 100644 --- a/pynch/mass_table.py +++ b/pynch/mass_table.py @@ -1,4 +1,5 @@ """Functionality to parse all data file into a single object.""" +import importlib.resources import logging import pathlib import typing @@ -8,7 +9,7 @@ from pynch.ame_mass_parse import AMEMassParser from pynch.ame_reaction_1_parse import AMEReactionParserOne from pynch.ame_reaction_2_parse import AMEReactionParserTwo -from pynch.nubase_parse import NubaseParser +from pynch.nubase_parse import NUBASEParser class MassTable: @@ -19,8 +20,8 @@ class MassTable: def __init__(self): """Do all of the work at construction.""" - # Assume this file is some/path/pynch/pynch/mass_table.py - self.data_path = pathlib.Path(__file__) / ".." / ".." / "data" + self.data_path = importlib.resources.files("pynch.data") + print(self.data_path) self.existing_years = [2003, 2012, 2016, 2020] self.nubase = pd.concat([self._parse_nubase_data(y) for y in self.existing_years], ignore_index=True) self.ame = pd.concat([self._parse_ame_data(y) for y in self.existing_years], ignore_index=True) @@ -78,7 +79,7 @@ def _validate_year(self, year: int) -> int: def _parse_nubase_data(self, year: int) -> pd.DataFrame: """Get the nubase for the given year as a pandas.DataFrame.""" year = self._validate_year(year) - return NubaseParser(self._get_nubase_datafile(year), year).read_file() + return NUBASEParser(self._get_nubase_datafile(year), year).read_file() def _parse_ame_data(self, year: int) -> pd.DataFrame: """Combine all the AME files from the given year into a pandas.DataFrame.""" @@ -97,14 +98,14 @@ def _combine_all_data(self) -> pd.DataFrame: common_columns = ['A', 'Z', 'N', 'TableYear', 'Symbol'] df = self.nubase.merge(self.ame, on=common_columns) - df["NubaseRelativeError"] = abs( - df["NubaseMassExcessError"] / df["NubaseMassExcess"] + df["NUBASERelativeError"] = abs( + df["NUBASEMassExcessError"] / df["NUBASEMassExcess"] ) df["AMERelativeError"] = abs(df["AMEMassExcessError"] / df["AMEMassExcess"]) # 12C has a 0.0 +/ 0.0 mass excess by definition so calculating relative error -> NaN # Set the value to 0.0 as that's what it is - df.loc[(df.Symbol == "C") & (df.A == 12), "NubaseRelativeError"] = 0.0 + df.loc[(df.Symbol == "C") & (df.A == 12), "NUBASERelativeError"] = 0.0 df.loc[(df.Symbol == "C") & (df.A == 12), "AMERelativeError"] = 0.0 # 198Au has a typo in it's decay mode in the 2012 table. It is recorded as '-' diff --git a/pynch/nubase_file.py b/pynch/nubase_file.py index cbf7888..de5b3ec 100644 --- a/pynch/nubase_file.py +++ b/pynch/nubase_file.py @@ -2,7 +2,7 @@ from pynch.parse import Parse -class NubaseFile(Parse): +class NUBASEFile(Parse): """Easy access to where variables are in the NUBASE file. The NUBASE data file is formatted by location in the line, values exist diff --git a/pynch/nubase_parse.py b/pynch/nubase_parse.py index 66bde04..e0bc4e2 100644 --- a/pynch/nubase_parse.py +++ b/pynch/nubase_parse.py @@ -6,10 +6,10 @@ import pandas as pd -from pynch.nubase_file import NubaseFile +from pynch.nubase_file import NUBASEFile -class NubaseParser(NubaseFile): +class NUBASEParser(NUBASEFile): """Parse the NUBASE data file. A collection of functions to parse the weird format of the NUBASE file. @@ -96,8 +96,8 @@ def _read_line(self, line: str) -> dict: "TableYear": self.year, "A": self._read_as_int(line, self.START_A, self.END_A), "Z": self._read_as_int(line, self.START_Z, self.END_Z), - "NubaseMassExcess": self._read_as_float(line, self.START_ME, self.END_ME), - "NubaseMassExcessError": self._read_as_float(line, self.START_DME, self.END_DME), + "NUBASEMassExcess": self._read_as_float(line, self.START_ME, self.END_ME), + "NUBASEMassExcessError": self._read_as_float(line, self.START_DME, self.END_DME), # "LevelEnergy" : self._read_as_float(, # line, self.START_ISOMER, self.END_ISOMER # ) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..55a1126 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,64 @@ +[build-system] +requires = ["setuptools", "setuptools-scm"] +build-backend = "setuptools.build_meta" + +[project] +name = "pynch" +authors = [ + {name = "Ian Cullen"}, + {name = "Soham Pal"}, +] +description = "A python package to parse and store the various files published by AME and NUBASE" +version = "0.0.1" +readme = "README.md" +requires-python = ">=3.10" +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Operating System :: OS Independent", + "Topic :: Education", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Physics", +] +dependencies = [ + "pandas", + "pytest", +] +keywords = [ + "nuclear", + "physics", + "decay", + "AME", + "NUBASE", +] + +[project.optional-dependencies] +dev = [ + "coverage", +] + +[project.urls] +GitHub = "https://github.com/php1ic/pynch" + +[tool.setuptools] +license-files = ["LICENSE"] + +[tool.setuptools.packages.find] +include = ["pynch"] + +[tool.setuptools.package-data] +pynch = [ + "data/2003/*.*", + "data/2012/*.*", + "data/2016/*.*", + "data/2020/*.*", +] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 803116c..0000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -dash -dash_html_components -dash_core_components -dash_bootstrap_components -plotly -pandas -pytest diff --git a/tests/test_mass_table.py b/tests/test_mass_table.py index d994939..04b2ca9 100644 --- a/tests/test_mass_table.py +++ b/tests/test_mass_table.py @@ -5,41 +5,41 @@ def test_get_nubase_datafile(): mt = MassTable() year = 2003 - assert mt._get_nubase_datafile(year) == mt.data_path.resolve() / str(year) / "nubtab03.asc" + assert mt._get_nubase_datafile(year) == mt.data_path / str(year) / "nubtab03.asc" year = 2012 - assert mt._get_nubase_datafile(year) == mt.data_path.resolve() / str(year) / "nubtab12.asc" + assert mt._get_nubase_datafile(year) == mt.data_path / str(year) / "nubtab12.asc" year = 2016 - assert mt._get_nubase_datafile(year) == mt.data_path.resolve() / str(year) / "nubase2016.txt" + assert mt._get_nubase_datafile(year) == mt.data_path / str(year) / "nubase2016.txt" year = 2020 - assert mt._get_nubase_datafile(year) == mt.data_path.resolve() / str(year) / "nubase_1.mas20" + assert mt._get_nubase_datafile(year) == mt.data_path / str(year) / "nubase_1.mas20" def test_get_ame_datafiles(): mt = MassTable() year = 2003 - data_path = mt.data_path.resolve() / str(year) + data_path = mt.data_path / str(year) mass, reaction01, reaction02 = mt._get_ame_datafiles(2003) assert mass == data_path / "mass.mas03" assert reaction01 == data_path / "rct1.mas03" assert reaction02 == data_path / "rct2.mas03" year = 2012 - data_path = mt.data_path.resolve() / str(year) + data_path = mt.data_path / str(year) mass, reaction01, reaction02 = mt._get_ame_datafiles(2012) assert mass == data_path / "mass.mas12" assert reaction01 == data_path / "rct1.mas12" assert reaction02 == data_path / "rct2.mas12" year = 2016 - data_path = mt.data_path.resolve() / str(year) + data_path = mt.data_path / str(year) mass, reaction01, reaction02 = mt._get_ame_datafiles(2016) assert mass == data_path / "mass16.txt" assert reaction01 == data_path / "rct1-16.txt" assert reaction02 == data_path / "rct2-16.txt" year = 2020 - data_path = mt.data_path.resolve() / str(year) + data_path = mt.data_path / str(year) mass, reaction01, reaction02 = mt._get_ame_datafiles(2020) assert mass == data_path / "mass.mas20" assert reaction01 == data_path / "rct1.mas20" diff --git a/tests/test_nubase_parse.py b/tests/test_nubase_parse.py index 461922f..13e9695 100644 --- a/tests/test_nubase_parse.py +++ b/tests/test_nubase_parse.py @@ -4,7 +4,7 @@ def test_read_halflife_value(): - parser = nbp.NubaseParser(pathlib.Path("."), 2003) + parser = nbp.NUBASEParser(pathlib.Path("."), 2003) line_01 = "232 0950 232Am 43400# 300# 1.31 m 0.04 91 B+=?;A=2#;B+SF=0.069 10" assert parser._read_halflife_value(line_01) == 1.31 @@ -14,7 +14,7 @@ def test_read_halflife_value(): def test_read_halflife_error(): - parser = nbp.NubaseParser(pathlib.Path("."), 2003) + parser = nbp.NUBASEParser(pathlib.Path("."), 2003) line_01 = "113 0500 113Sn -88333 4 115.09 d 0.03 1/2+ 00 B+=100" assert parser._read_halflife_error(line_01) == 0.03 @@ -24,14 +24,14 @@ def test_read_halflife_error(): def test_no_decay_mode(): - parser = nbp.NubaseParser(pathlib.Path("."), 2012) + parser = nbp.NUBASEParser(pathlib.Path("."), 2012) no_decay = "044 0212 44Scn -37669.9 1.8 146.224 0.022 50.4 us 0.7 0- 99" assert parser._read_decay_string(no_decay) == "UNKNOWN" def test_readable_line(): - parser = nbp.NubaseParser(pathlib.Path("."), 2003) + parser = nbp.NUBASEParser(pathlib.Path("."), 2003) bad_line01 = "003 0030 3Li 28670# 2000# RN p-unst 98 p ?" bad_line02 = "130 0556 130Csx -86873 17 27 15 R=.2~~~.1 fsmix" @@ -47,7 +47,7 @@ def test_readable_line(): def test_read_line(): - parser = nbp.NubaseParser(pathlib.Path("."), 2003) + parser = nbp.NUBASEParser(pathlib.Path("."), 2003) iso_line = "183 0791 183Aum -30114 10 73.3 0.4 >1 us (1/2)+ 99 IT=100" assert parser._read_line(iso_line) == dict() @@ -59,8 +59,8 @@ def test_read_line(): assert d['Z'] == 29 assert d['N'] == 28 assert d['Experimental'] is True - assert d['NubaseMassExcess'] == -47310.0 - assert d['NubaseMassExcessError'] == 16.0 + assert d['NUBASEMassExcess'] == -47310.0 + assert d['NUBASEMassExcessError'] == 16.0 assert d['HalfLifeValue'] == 196.3 assert d['HalfLifeUnit'] == "ms" assert d['HalfLifeError'] == 0.7 @@ -75,8 +75,8 @@ def test_read_line(): assert d['Z'] == 53 assert d['N'] == 57 assert d['Experimental'] is False - assert d['NubaseMassExcess'] == -60320 - assert d['NubaseMassExcessError'] == 310 + assert d['NUBASEMassExcess'] == -60320 + assert d['NUBASEMassExcessError'] == 310 assert d['HalfLifeValue'] == 650 assert d['HalfLifeUnit'] == "ms" assert d['HalfLifeError'] == 20 @@ -85,7 +85,7 @@ def test_read_line(): assert d['Decay'] == "B+" # 2020 table has a new format - parser = nbp.NubaseParser(pathlib.Path("."), 2020) + parser = nbp.NUBASEParser(pathlib.Path("."), 2020) # Use the same isotope as previously tested gs_line = "057 0290 57Cu -47309.0 0.5 196.4 ms 0.7 3/2-* 98 1976 B+=100" @@ -95,8 +95,8 @@ def test_read_line(): assert d['Z'] == 29 assert d['N'] == 28 assert d['Experimental'] is True - assert d['NubaseMassExcess'] == -47309.0 - assert d['NubaseMassExcessError'] == 0.5 + assert d['NUBASEMassExcess'] == -47309.0 + assert d['NUBASEMassExcessError'] == 0.5 assert d['HalfLifeValue'] == 196.4 assert d['HalfLifeUnit'] == "ms" assert d['HalfLifeError'] == 0.7