Skip to content

Commit 2b92c11

Browse files
authored
more reader tests and fixes (#250)
1 parent 07d5255 commit 2b92c11

File tree

6 files changed

+200
-19
lines changed

6 files changed

+200
-19
lines changed

flopy4/mf6/codec/reader/grammar/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ def _get_template_env():
1717
)
1818
env.filters["field_type"] = filters.field_type
1919
env.filters["record_child_type"] = filters.record_child_type
20+
env.filters["to_rule_name"] = filters.to_rule_name
2021
return env
2122

2223

flopy4/mf6/codec/reader/grammar/filters.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,3 +77,11 @@ def get_recarray_name(block_name: str) -> str:
7777
if block_name == "period":
7878
return "stress_period_data"
7979
return f"{block_name}data"
80+
81+
82+
def to_rule_name(name: str) -> str:
83+
"""Convert a field name to a valid Lark rule name.
84+
85+
Lark rule names must not contain hyphens, so we replace them with underscores.
86+
"""
87+
return name.replace("-", "_")

flopy4/mf6/codec/reader/grammar/generated/gwf-sto.lark

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ period_block: "begin"i "period"i block_index period_fields "end"i "period"i bloc
2222
block_index: integer
2323
options_fields: (save_flows | storagecoefficient | ss_confined_only | tvs_filerecord | export_array_ascii | export_array_netcdf | dev_original_specific_storage | dev_oldstorageformulation)*
2424
griddata_fields: (iconvert | ss | sy)*
25-
period_fields: (steady-state | transient)*
25+
period_fields: (steady_state | transient)*
2626
save_flows: "save_flows"i
2727
storagecoefficient: "storagecoefficient"i
2828
ss_confined_only: "ss_confined_only"i
@@ -34,5 +34,5 @@ dev_oldstorageformulation: "dev_oldstorageformulation"i
3434
iconvert: "iconvert"i array
3535
ss: "ss"i array
3636
sy: "sy"i array
37-
steady-state: "steady-state"i
37+
steady_state: "steady-state"i
3838
transient: "transient"i

flopy4/mf6/codec/reader/grammar/templates/macros.jinja

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
{# Field rendering macros #}
22

33
{% macro record_field(field_name, field) -%}
4-
{{ field_name }}: {% for child_name, child in field.children.items() -%}
4+
{{ field_name|to_rule_name }}: {% for child_name, child in field.children.items() -%}
55
{%- set child_type = child|record_child_type -%}
66
{%- if child.type == 'keyword' -%}
77
"{{ child.name }}"i
88
{%- elif child.type == 'union' -%}
9-
{{ child_name }}
9+
{{ child_name|to_rule_name }}
1010
{%- else -%}
1111
{{ child_type }}
1212
{%- endif -%}
@@ -15,12 +15,12 @@
1515
{%- endmacro %}
1616

1717
{% macro union_field(field_name, field) -%}
18-
{{ field_name }}: {% for child_name, child in field.children.items() -%}
19-
{{ field_name }}_{{ child_name }}
18+
{{ field_name|to_rule_name }}: {% for child_name, child in field.children.items() -%}
19+
{{ field_name|to_rule_name }}_{{ child_name|to_rule_name }}
2020
{%- if not loop.last %} | {% endif -%}
2121
{%- endfor %}
2222
{% for child_name, child in field.children.items() -%}
23-
{{ field_name }}_{{ child_name }}: {% if child.type == 'keyword' -%}
23+
{{ field_name|to_rule_name }}_{{ child_name|to_rule_name }}: {% if child.type == 'keyword' -%}
2424
"{{ child.name }}"i
2525
{%- else -%}
2626
"{{ child.name }}"i {{ child.type }}
@@ -29,17 +29,17 @@
2929
{%- endmacro %}
3030

3131
{% macro simple_field(field_name, field, field_type) -%}
32-
{{ field_name }}: "{{ field.name }}"i {{ field_type }}
32+
{{ field_name|to_rule_name }}: "{{ field.name }}"i {{ field_type }}
3333
{%- endmacro %}
3434

3535
{% macro nested_union(child_name, child) -%}
36-
{{ child_name }}: {% for opt_name, opt in child.children.items() -%}
37-
{{ child_name }}_{{ opt_name }}
36+
{{ child_name|to_rule_name }}: {% for opt_name, opt in child.children.items() -%}
37+
{{ child_name|to_rule_name }}_{{ opt_name|to_rule_name }}
3838
{%- if not loop.last %} | {% endif -%}
3939
{%- endfor %}
4040

4141
{% for opt_name, opt in child.children.items() -%}
42-
{{ child_name }}_{{ opt_name }}: {% if opt.type == 'keyword' -%}
42+
{{ child_name|to_rule_name }}_{{ opt_name|to_rule_name }}: {% if opt.type == 'keyword' -%}
4343
"{{ opt.name }}"i
4444
{%- else -%}
4545
"{{ opt.name }}"i {{ opt.type }}{% if opt.shape %}+{% endif %}
@@ -61,12 +61,12 @@
6161
{%- macro field_list(block_name, fields, recarray_name) -%}
6262
{%- if recarray_name -%}
6363
{{ block_name }}_fields: (
64-
{%- for field_name in fields %}{{ field_name }} | {% endfor -%}
64+
{%- for field_name in fields %}{{ field_name|to_rule_name }} | {% endfor -%}
6565
{{ recarray_name }})*
6666
{%- else -%}
6767
{{ block_name }}_fields: (
6868
{%- for field_name in fields %}
69-
{{- field_name }}{% if not loop.last %} | {% endif %}
69+
{{- field_name|to_rule_name }}{% if not loop.last %} | {% endif %}
7070
{%- endfor -%}
7171
)*
7272
{%- endif -%}

flopy4/mf6/codec/reader/transformer.py

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,14 @@ class BasicTransformer(Transformer):
2424
grammar. Yields blocks simply as collections of lines of tokens.
2525
"""
2626

27+
def __getattr__(self, name):
28+
"""Handle typed__ prefixed methods by delegating to the unprefixed version."""
29+
if name.startswith("typed__"):
30+
unprefixed = name[7:] # Remove "typed__" prefix
31+
if hasattr(self, unprefixed):
32+
return getattr(self, unprefixed)
33+
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
34+
2735
def start(self, items: list[Any]) -> dict[str, Any]:
2836
blocks = {}
2937
for item in items:
@@ -72,6 +80,14 @@ def __init__(self, visit_tokens=False, dfn: Dfn = None):
7280
# Create a flattened fields dict that includes nested fields
7381
self._flat_fields = self._flatten_fields(self.fields) if self.fields else None
7482

83+
def __getattr__(self, name):
84+
"""Handle typed__ prefixed methods by delegating to the unprefixed version."""
85+
if name.startswith("typed__"):
86+
unprefixed = name[7:] # Remove "typed__" prefix
87+
if hasattr(self, unprefixed):
88+
return getattr(self, unprefixed)
89+
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
90+
7591
def _flatten_fields(self, fields: dict) -> dict:
7692
"""Recursively flatten fields dict to include children of records and unions."""
7793
flat = dict(fields) # Start with top-level fields
@@ -181,7 +197,13 @@ def filename(self, items: list[Any]) -> Path:
181197
return Path(items[0].strip("\"'"))
182198

183199
def string(self, items: list[Any]) -> str:
184-
return items[0].strip("\"'")
200+
# String can be either a token or a tree (word)
201+
value = items[0]
202+
if hasattr(value, "strip"):
203+
return value.strip("\"'")
204+
else:
205+
# It's a tree, extract the token value
206+
return str(value.children[0]) if hasattr(value, "children") else str(value)
185207

186208
def simple_string(self, items: list[Any]) -> str:
187209
"""Handle simple string (unquoted word or escaped string)."""
@@ -321,7 +343,12 @@ def __default__(self, data, children, meta):
321343
else:
322344
# Non-keyword alternatives return the transformed children
323345
return children[0] if len(children) == 1 else children
324-
if (field := self._flat_fields.get(data, None)) is not None:
346+
# Try to find the field, checking with underscore replacement for hyphens
347+
field = self._flat_fields.get(data, None)
348+
if field is None and "-" in data:
349+
# Try with hyphens instead of underscores (reverse of to_rule_name)
350+
field = self._flat_fields.get(data.replace("_", "-"), None)
351+
if field is not None:
325352
if field.type == "keyword":
326353
return data, True
327354
elif field.type == "record" and hasattr(field, "children") and field.children:

test/test_mf6_reader.py

Lines changed: 149 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import pytest
88
import xarray as xr
99
from lark import Lark
10-
from modflow_devtools.dfn import Dfn, load_flat
10+
from modflow_devtools.dfn import Dfn, MapV1To2, load_flat
1111
from modflow_devtools.models import get_models
1212
from packaging.version import Version
1313

@@ -417,9 +417,15 @@ def test_transform_gwf_ic_file(model_workspace, dfn_path):
417417
assert "griddata" in result # IC has griddata block
418418
assert "strt" in result["griddata"] # Starting heads
419419

420-
# Check strt field exists (array transformation not fully implemented yet)
421-
strt_data = result["griddata"]["strt"]
422-
assert strt_data is not None
420+
# Check strt array structure
421+
strt = result["griddata"]["strt"]
422+
assert "control" in strt
423+
assert "data" in strt
424+
assert strt["control"]["type"] in ["constant", "internal", "external"]
425+
426+
# If internal or constant, should have data
427+
if strt["control"]["type"] in ["constant", "internal"]:
428+
assert strt["data"] is not None
423429

424430

425431
@pytest.mark.parametrize("model_workspace", ["mf6/example/ex-gwf-bcf2ss-p01a"], indirect=True)
@@ -568,3 +574,142 @@ def test_transform_gwf_oc_file(model_workspace, dfn_path):
568574
for rec in save_records:
569575
assert "ocsetting" in rec
570576
assert rec["ocsetting"] == "all"
577+
578+
579+
@pytest.mark.parametrize("model_workspace", ["mf6/example/ex-gwf-csub-p01"], indirect=True)
580+
def test_transform_gwf_dis_file(model_workspace, dfn_path):
581+
"""Test transforming a parsed GWF DIS file into structured data."""
582+
583+
# Load the DFN for DIS and convert to V2
584+
v1_dfns = load_flat(dfn_path)
585+
mapper = MapV1To2()
586+
dis_dfn = mapper.map(v1_dfns["gwf-dis"])
587+
588+
# Find the DIS file
589+
dis_files = list(model_workspace.rglob("*.dis"))
590+
assert len(dis_files) > 0
591+
592+
dis_file = dis_files[0]
593+
parser = get_typed_parser("gwf-dis")
594+
transformer = TypedTransformer(dfn=dis_dfn)
595+
596+
# Read, parse, and transform
597+
with open(dis_file, "r") as f:
598+
content = f.read()
599+
600+
tree = parser.parse(content)
601+
result = transformer.transform(tree)
602+
603+
# Check structure
604+
assert isinstance(result, dict)
605+
606+
# Check dimensions block
607+
assert "dimensions" in result
608+
assert "nlay" in result["dimensions"]
609+
assert "nrow" in result["dimensions"]
610+
assert "ncol" in result["dimensions"]
611+
assert result["dimensions"]["nlay"] > 0
612+
assert result["dimensions"]["nrow"] > 0
613+
assert result["dimensions"]["ncol"] > 0
614+
615+
# Check griddata block
616+
assert "griddata" in result
617+
griddata = result["griddata"]
618+
assert "delr" in griddata
619+
assert "delc" in griddata
620+
assert "top" in griddata
621+
assert "botm" in griddata
622+
623+
# Each array should have control and data
624+
assert "control" in griddata["delr"]
625+
assert "data" in griddata["delr"]
626+
627+
628+
@pytest.mark.parametrize("model_workspace", ["mf6/example/ex-gwf-csub-p01"], indirect=True)
629+
def test_transform_gwf_npf_file(model_workspace, dfn_path):
630+
"""Test transforming a parsed GWF NPF file into structured data."""
631+
632+
# Load the DFN for NPF and convert to V2
633+
v1_dfns = load_flat(dfn_path)
634+
mapper = MapV1To2()
635+
npf_dfn = mapper.map(v1_dfns["gwf-npf"])
636+
637+
# Find the NPF file
638+
npf_files = list(model_workspace.rglob("*.npf"))
639+
assert len(npf_files) > 0
640+
641+
npf_file = npf_files[0]
642+
parser = get_typed_parser("gwf-npf")
643+
transformer = TypedTransformer(dfn=npf_dfn)
644+
645+
# Read, parse, and transform
646+
with open(npf_file, "r") as f:
647+
content = f.read()
648+
649+
tree = parser.parse(content)
650+
result = transformer.transform(tree)
651+
652+
# Check structure
653+
assert isinstance(result, dict)
654+
655+
# Check options block
656+
assert "options" in result
657+
options = result["options"]
658+
659+
# Should have save_specific_discharge option
660+
assert "save_specific_discharge" in options
661+
assert options["save_specific_discharge"] is True
662+
663+
# Check griddata block
664+
assert "griddata" in result
665+
griddata = result["griddata"]
666+
667+
# NPF should have at least icelltype and k
668+
assert "icelltype" in griddata
669+
assert "k" in griddata
670+
671+
# Each array should have control and data
672+
assert "control" in griddata["icelltype"]
673+
assert "data" in griddata["icelltype"]
674+
assert "control" in griddata["k"]
675+
assert "data" in griddata["k"]
676+
677+
678+
@pytest.mark.parametrize("model_workspace", ["mf6/example/ex-gwf-csub-p01"], indirect=True)
679+
def test_transform_gwf_sto_file(model_workspace, dfn_path):
680+
"""Test transforming a parsed GWF STO file into structured data."""
681+
682+
# Load the DFN for STO and convert to V2
683+
v1_dfns = load_flat(dfn_path)
684+
mapper = MapV1To2()
685+
sto_dfn = mapper.map(v1_dfns["gwf-sto"])
686+
687+
# Find the STO file
688+
sto_files = list(model_workspace.rglob("*.sto"))
689+
690+
# Skip if no STO files (not all models have storage)
691+
if len(sto_files) == 0:
692+
pytest.skip("No STO files found in this model")
693+
694+
sto_file = sto_files[0]
695+
parser = get_typed_parser("gwf-sto")
696+
transformer = TypedTransformer(dfn=sto_dfn)
697+
698+
# Read, parse, and transform
699+
with open(sto_file, "r") as f:
700+
content = f.read()
701+
702+
tree = parser.parse(content)
703+
result = transformer.transform(tree)
704+
705+
# Check structure
706+
assert isinstance(result, dict)
707+
708+
# Check griddata block
709+
assert "griddata" in result
710+
griddata = result["griddata"]
711+
712+
# STO should have iconvert
713+
assert "iconvert" in griddata
714+
assert "control" in griddata["iconvert"]
715+
assert "data" in griddata["iconvert"]

0 commit comments

Comments
 (0)