Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/examples/quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@

assert chd.data["head"][0, 0] == 1.0
assert chd.data.head.sel(per=0)[99] == 0.0
assert np.allclose(chd.data.head[:, 1:99], np.full(98, 1e30))
assert np.allclose(chd.data.head[:, 1:99], np.full(98, 3e30))

assert gwf.dis.data.botm.sel(lay=0, col=0, row=0) == 0.0

Expand Down
2 changes: 2 additions & 0 deletions flopy4/mf6/binding.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ def _get_binding_type(component: Component) -> str:
elif isinstance(component, Solution):
return f"{component.slntype}6"
else:
if len(cls_name) == 4 and (cls_name[3] == "g" or cls_name[3] == "a"):
return f"{cls_name[0:3]}6"
return f"{cls_name.upper()}6"

def _get_binding_terms(component: Component) -> tuple[str, ...] | None:
Expand Down
2 changes: 1 addition & 1 deletion flopy4/mf6/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@

MF6 = "mf6"
FILL_DEFAULT = np.nan
FILL_DNODATA = 1e30
FILL_DNODATA = 3e30
LENBOUNDNAME = 40
192 changes: 130 additions & 62 deletions flopy4/mf6/converter/unstructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
import xarray as xr
import xattree
from modflow_devtools.dfn.schema.block import block_sort_key
from xattree import XatSpec

from flopy4.mf6.binding import Binding
from flopy4.mf6.component import Component
from flopy4.mf6.constants import FILL_DNODATA
from flopy4.mf6.context import Context
from flopy4.mf6.spec import FileInOut

Expand Down Expand Up @@ -144,9 +146,89 @@ def oc_setting_data(rec):
return data


def _unstructure_block_param(
block_name: str,
field_name: str,
xatspec: XatSpec,
value: Component,
data: dict[str, Any],
blocks: dict,
period_data: dict,
) -> None:
# Skip child components that have been processed as bindings
if isinstance(value, Context) and field_name in xatspec.children:
child_spec = xatspec.children[field_name]
if hasattr(child_spec, "metadata") and "block" in child_spec.metadata: # type: ignore
if child_spec.metadata["block"] == block_name: # type: ignore
return

# filter out empty values and false keywords, and convert:
# - paths to records
# - datetimes to ISO format
# - filter out false keywords
# - 'auxiliary' fields to tuples
# - xarray DataArrays with 'nper' dim to dict of kper-sliced datasets
# - other values to their original form
# TODO: use cattrs converters for field unstructuring?
match field_value := data[field_name]:
case None:
return
case bool():
if field_value:
blocks[block_name][field_name] = field_value
case Path():
field_spec = xatspec.attrs[field_name]
field_meta = getattr(field_spec, "metadata", {})
t = _path_to_tuple(field_name, field_value, inout=field_meta.get("inout", "fileout"))
# name may have changed e.g dropping '_file' suffix
blocks[block_name][t[0]] = t
case datetime():
blocks[block_name][field_name] = field_value.isoformat()
case t if (
field_name == "auxiliary" and hasattr(field_value, "values") and field_value is not None
):
blocks[block_name][field_name] = tuple(field_value.values.tolist())
case xr.DataArray() if "nper" in field_value.dims:
has_spatial_dims = any(
dim in field_value.dims for dim in ["nlay", "nrow", "ncol", "nodes"]
)
if has_spatial_dims:
field_value = _hack_structured_grid_dims(
field_value,
structured_grid_dims=value.parent.data.dims, # type: ignore
)
if block_name == "period":
if not np.issubdtype(field_value.dtype, np.number):
dat = _hack_period_non_numeric(field_name, field_value)
for n, v in dat.items():
period_data[n] = v
else:
period_data[field_name] = {
kper: field_value.isel(nper=kper) # type: ignore
for kper in range(field_value.sizes["nper"])
}
else:
blocks[block_name][field_name] = field_value

case _:
blocks[block_name][field_name] = field_value


def unstructure_component(value: Component) -> dict[str, Any]:
from flopy4.mf6.constants import FILL_DNODATA
xatspec = xattree.get_xatspec(type(value))
if "readarraygrid" in xatspec.attrs:
return _unstructure_grid_component(value)
elif "readasarrays" in xatspec.attrs:
return _unstructure_layer_component(value)
else:
return _unstructure_component(value)


def _unstructure_layer_component(value: Component) -> dict[str, Any]:
return {}


def _unstructure_grid_component(value: Component) -> dict[str, Any]:
blockspec = dict(sorted(value.dfn.blocks.items(), key=block_sort_key)) # type: ignore
blocks: dict[str, dict[str, Any]] = {}
xatspec = xattree.get_xatspec(type(value))
Expand All @@ -167,67 +249,53 @@ def unstructure_component(value: Component) -> dict[str, Any]:
blocks[block_name] = {}

for field_name in block.keys():
# Skip child components that have been processed as bindings
if isinstance(value, Context) and field_name in xatspec.children:
child_spec = xatspec.children[field_name]
if hasattr(child_spec, "metadata") and "block" in child_spec.metadata: # type: ignore
if child_spec.metadata["block"] == block_name: # type: ignore
continue

# filter out empty values and false keywords, and convert:
# - paths to records
# - datetimes to ISO format
# - filter out false keywords
# - 'auxiliary' fields to tuples
# - xarray DataArrays with 'nper' dim to dict of kper-sliced datasets
# - other values to their original form
# TODO: use cattrs converters for field unstructuring?
match field_value := data[field_name]:
case None:
continue
case bool():
if field_value:
blocks[block_name][field_name] = field_value
case Path():
field_spec = xatspec.attrs[field_name]
field_meta = getattr(field_spec, "metadata", {})
t = _path_to_tuple(
field_name, field_value, inout=field_meta.get("inout", "fileout")
)
# name may have changed e.g dropping '_file' suffix
blocks[block_name][t[0]] = t
case datetime():
blocks[block_name][field_name] = field_value.isoformat()
case t if (
field_name == "auxiliary"
and hasattr(field_value, "values")
and field_value is not None
):
blocks[block_name][field_name] = tuple(field_value.values.tolist())
case xr.DataArray() if "nper" in field_value.dims:
has_spatial_dims = any(
dim in field_value.dims for dim in ["nlay", "nrow", "ncol", "nodes"]
)
if has_spatial_dims:
field_value = _hack_structured_grid_dims(
field_value,
structured_grid_dims=value.parent.data.dims, # type: ignore
)
if block_name == "period":
if not np.issubdtype(field_value.dtype, np.number):
dat = _hack_period_non_numeric(field_name, field_value)
for n, v in dat.items():
period_data[n] = v
else:
period_data[field_name] = {
kper: field_value.isel(nper=kper) # type: ignore
for kper in range(field_value.sizes["nper"])
}
else:
blocks[block_name][field_name] = field_value

case _:
blocks[block_name][field_name] = field_value
_unstructure_block_param(
block_name, field_name, xatspec, value, data, blocks, period_data
)

# invert key order, (arr_name, kper) -> (kper, arr_name)
for arr_name, periods in period_data.items():
for kper, arr in periods.items():
if kper not in period_blocks:
period_blocks[kper] = {}
period_blocks[kper][arr_name] = arr

# setup indexed period blocks, combine arrays into datasets
for kper, block in period_blocks.items():
key = f"period {kper + 1}"
for arr_name, val in block.items():
if not np.all(val == FILL_DNODATA):
if key not in blocks:
blocks[key] = {}
blocks[key][arr_name] = val

return {name: block for name, block in blocks.items() if name != "period"}


def _unstructure_component(value: Component) -> dict[str, Any]:
blockspec = dict(sorted(value.dfn.blocks.items(), key=block_sort_key)) # type: ignore
blocks: dict[str, dict[str, Any]] = {}
xatspec = xattree.get_xatspec(type(value))
data = xattree.asdict(value)

# create child component binding blocks
blocks.update(_make_binding_blocks(value))

# process blocks in order, unstructuring fields as needed,
# then slice period data into separate kper-indexed blocks
# each of which contains a dataset indexed for that period.
for block_name, block in blockspec.items():
period_data = {} # type: ignore
period_blocks = {} # type: ignore
period_block_name = None

if block_name not in blocks:
blocks[block_name] = {}

for field_name in block.keys():
_unstructure_block_param(
block_name, field_name, xatspec, value, data, blocks, period_data
)

# invert key order, (arr_name, kper) -> (kper, arr_name)
for arr_name, periods in period_data.items():
Expand Down
8 changes: 6 additions & 2 deletions flopy4/mf6/gwf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@
from xattree import xattree

from flopy4.mf6.gwf.chd import Chd
from flopy4.mf6.gwf.chdg import Chdg
from flopy4.mf6.gwf.dis import Dis
from flopy4.mf6.gwf.drn import Drn
from flopy4.mf6.gwf.drng import Drng
from flopy4.mf6.gwf.ic import Ic
from flopy4.mf6.gwf.npf import Npf
from flopy4.mf6.gwf.oc import Oc
Expand All @@ -21,7 +23,7 @@
from flopy4.mf6.utils import open_cbc, open_hds
from flopy4.utils import to_path

__all__ = ["Gwf", "Chd", "Dis", "Drn", "Ic", "Npf", "Oc", "Sto", "Wel", "Rch"]
__all__ = ["Gwf", "Chd", "Chdg", "Dis", "Drn", "Drng", "Ic", "Npf", "Oc", "Rch", "Sto", "Wel"]


def convert_grid(value):
Expand Down Expand Up @@ -79,9 +81,11 @@ def budget(self):
npf: Npf | None = field(block="packages", default=None)
sto: Sto | None = field(block="packages", default=None)
chd: list[Chd] = field(block="packages")
wel: list[Wel] = field(block="packages")
chdg: list[Chdg] = field(block="packages")
drn: list[Drn] = field(block="packages")
drng: list[Drng] = field(block="packages")
rch: list[Rch] = field(block="packages")
wel: list[Wel] = field(block="packages")
output: Output = attrs.field(
default=attrs.Factory(lambda self: Gwf.Output(self), takes_self=True)
)
Expand Down
51 changes: 51 additions & 0 deletions flopy4/mf6/gwf/chdg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
from pathlib import Path
from typing import ClassVar, Optional

import numpy as np

# from attrs import Converter
from numpy.typing import NDArray
from xattree import xattree

# from flopy4.mf6.converter import structure_array
from flopy4.mf6.package import Package
from flopy4.mf6.spec import array, field, path
from flopy4.mf6.utils.grid import update_maxbound
from flopy4.utils import to_path


@xattree
class Chdg(Package):
multi_package: ClassVar[bool] = True
auxiliary: Optional[list[str]] = array(block="options", default=None)
auxmultname: Optional[str] = field(block="options", default=None)
print_input: bool = field(block="options", default=False)
print_flows: bool = field(block="options", default=False)
readarraygrid: bool = field(block="options", default=True)
save_flows: bool = field(block="options", default=False)
obs_filerecord: Optional[Path] = path(
block="options", default=None, converter=to_path, inout="fileout"
)
export_array_netcdf: bool = field(block="options", default=False)
dev_no_newton: bool = field(default=False, block="options")
maxbound: Optional[int] = field(block="dimensions", default=None, init=False)
head: Optional[NDArray[np.float64]] = array(
block="period",
dims=(
"nper",
"nodes",
),
default=None,
# converter=Converter(structure_array, takes_self=True, takes_field=True),
on_setattr=update_maxbound,
)
aux: Optional[NDArray[np.float64]] = array(
block="period",
dims=(
"nper",
"nodes",
),
default=None,
# converter=Converter(structure_array, takes_self=True, takes_field=True),
on_setattr=update_maxbound,
)
62 changes: 62 additions & 0 deletions flopy4/mf6/gwf/drng.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from pathlib import Path
from typing import ClassVar, Optional

import numpy as np

# from attrs import Converter
from numpy.typing import NDArray
from xattree import xattree

# from flopy4.mf6.converter import structure_array
from flopy4.mf6.package import Package
from flopy4.mf6.spec import array, field, path
from flopy4.mf6.utils.grid import update_maxbound
from flopy4.utils import to_path


@xattree
class Drng(Package):
multi_package: ClassVar[bool] = True
auxiliary: Optional[list[str]] = array(block="options", default=None)
auxmultname: Optional[str] = field(block="options", default=None)
print_input: bool = field(block="options", default=False)
print_flows: bool = field(block="options", default=False)
readarraygrid: bool = field(block="options", default=True)
save_flows: bool = field(block="options", default=False)
obs_filerecord: Optional[Path] = path(
block="options", default=None, converter=to_path, inout="fileout"
)
export_array_netcdf: bool = field(block="options", default=False)
mover: bool = field(block="options", default=False)
dev_cubic_scaling: bool = field(default=False, block="options")
maxbound: Optional[int] = field(block="dimensions", default=None, init=False)
elev: Optional[NDArray[np.float64]] = array(
block="period",
dims=(
"nper",
"nodes",
),
default=None,
# converter=Converter(structure_array, takes_self=True, takes_field=True),
on_setattr=update_maxbound,
)
cond: Optional[NDArray[np.float64]] = array(
block="period",
dims=(
"nper",
"nodes",
),
default=None,
# converter=Converter(structure_array, takes_self=True, takes_field=True),
on_setattr=update_maxbound,
)
aux: Optional[NDArray[np.float64]] = array(
block="period",
dims=(
"nper",
"nodes",
),
default=None,
# converter=Converter(structure_array, takes_self=True, takes_field=True),
on_setattr=update_maxbound,
)
Loading
Loading