- Description
- modifies the geometry of a .feb file based on the equivalently named physical groups of a .geo (the .geo physical group has the same name as a geometry subtree in the .feb)
import re
import itertools as itls
import numpy as np
import pandas as pd
from pandas import DataFrame as DF
# To run commands in GNU/Linux (or other platforms)
import subprocess as sp
# To process XML (febio files)
import xml.etree.ElementTree as et
# To check that files exist
from os import path
def check_path(fname, fname_type=None):
"""
Requires
--------
from os import path
"""
# Default value
if fname_type is None:
fname_type = " "
# Make sure that fname_type is a string
if isinstance(fname_type, str):
# set blank space before and after
fname_type = " " + fname_type.strip() + " "
else:
raise ValueError("fname_type must be a character string")
if not path.exists(path.expandvars(fname)):
raise ValueError("The" + fname_type + "file does not exist: " + fname)
def gmsh_geo2inp_3d(geofname):
inpFname = geofname.replace(".geo", ".inp")
sp.run(["gmsh", "-v", "0", "-3", "-optimize_ho", "-format",
"inp", "-o", inpFname, geofname, "."])
return inpFname
def inp2feb_elem(inp_type):
"""Takes a type of element from .inp and translates it to .feb"""
# From pyFEBio:
feb_type = {"c3d8": "hex8",
'c3d4': 'tet4',
'c3d6': 'penta6',
'cpe3': 'tri3',
'cpe4': 'quad4'}
return feb_type[inp_type]
def inp_default_tags():
return ["node", "element", "elset", "nset", "surface"]
def regex_comp(text, regex_tags):
"""Checks if a list of regular expressions (Python style)
match a line of text
Arguments
---------
text -- (str) some text to be compared to the regexes
regex_tags -- (list(str)) list of strings to be used to
do the regular expression comparison
Output
------
True if any of the regular expressions match the text
"""
# Compile every tag into a Python regular expression
regexes = list(map(re.compile, regex_tags))
# Check if the line has the tag
# https://stackoverflow.com/a/3040745
return any(regex.match(text) for regex in regexes)
def line2inp_tag(line, **kwargs):
"""Checks whether a line matches a series of regular
expressions and returns the line, the position (for
~fd.seek()~, type and name (if available))
Arguments
---------
line -- (str) a string character (a line of a file)
fd -- (file object: open(fname)) a file descriptor from
where the line comes. The current position of ~fd~
will be used as the returned position of the tag.
,**kwargs -- (dict()) a dictionary with extra arguments
- regex_tags -- (list(str()); optional) a list of
strings, each representing a regular expression
(Python) to test the line to see if it has any
known tags in the .inp file. If not provided,
the defaults are used.
"""
# Remove the spaces and return character
line = line.replace(" ", "")
# Store the lower case equivalent of the line;
# (faster instead of lower-casing every time?)
line_low = line.lower()
if "regex_tags" in kwargs:
regex_tags = kwargs["regex_tags"]
if not str_listortuple(regex_tags):
raise ValueError(
"The tags need to be regexes like ['regex1', 'regex2', ...].")
else:
# Lines starting with any amount of space, followed
# by an asterisk and the tag
regex_tags = ["^\s*\*" + tag
for tag in inp_default_tags()]
# Check if the line has any .inp tags
if regex_comp(line_low, regex_tags):
# If any tags are found, extract the type and name
# Get a list from the comma-separated text
fields = line.split(",")
# In the following, use the fields_low to compare
fields_low = line_low.split(",")
# Create an iterator for the fields' indices
ran_fields = range(len(fields))
# The tag is the first keyword without the asterisk
# Return lower case
ttag = fields_low[0].lstrip("*")
# Get the type by finding the first "type=" in the
# list. If not found return empty string.
# Return lower case
subtype = next((fields_low[idxfld].partition("=")[2]
for idxfld in ran_fields
if "type=" in fields_low[idxfld]), "")
# Get the name by finding the first "set=" or "name=" in
# the list. If not found return empty string
# Return original case (upper- or lowercase)
name = next((fields[idxfld].partition("=")[2]
for idxfld in ran_fields
if "set=" in fields_low[idxfld]
or "name=" in fields_low[idxfld]), "")
# Establish if this is an .inp elset
iselset = next((True
for idxfld in ran_fields
if "set=" in fields_low[idxfld]), "")
elif line_low[0] == "*":
warnstr = "WARNING: This line looks like a tag," + \
" but was not processed: " + line
print(warnstr)
# Set empty values
ttag = ""
subtype = ""
name = ""
iselset = False
return ttag, subtype, name, line, iselset
def feb_tag_types():
return ["nodes", "elements", "nodeset", "elementset"]
def csvline2lst(text):
return text.rstrip().split(",")
def csvlst2df(text_lst):
"""
Requires
--------
csvline2lst
from pandas import DataFrame as df
"""
return DF.from_records([csvline2lst(VAL)
for VAL in text_lst])
def csvlst2arr(text_lst):
"""
Requires
--------
csvline2lst
import numpy as np
"""
return np.array([csvline2lst(VAL)
for VAL in text_lst])
def inp_tags2sec(tags, fname):
""".inp sections to Pandas dataframe"""
# # Create container for the sections
# # (Use arrays instead of lists to be able to have
# # numpy.view to access the elements without creating new
# # lists; save memory and processing time)
# sections = np.array([SectionInp() for I in tags])
# # The above is not needed:
# >>> def test(arr):
# ...: arr[0] = None
# ...: return arr
# >>> arr = [[1, 2, 3], [45]]
# >>> [test(I) for I in arr]
# [[None, 2, 3], [None]]
# >>> arr
# [[None, 2, 3], [None]]
sections = [SectionInp() for I in tags]
with open(fname, "r", newline=None) as inp_fd:
# Iterate in-between tags to extract the values of each
# section
# TODO: consider pandas, numpy, h5f (my files are not
# that big)
# https://stackoverflow.com/a/45796754
# https://stackoverflow.com/a/40469450
# From the last tag to the end of the file (if there is
# only one tag, it is the same as the last one--see
# below)
# If there is more than one tag
if len(tags) > 1:
# Iterate until the penultimate tag
for idxtag, curtag in enumerate(tags[:-1]):
sec_ini = curtag[0]
sec_fin = tags[idxtag + 1][0] - 1
# -1 for 0 indexing
sec_len = sec_fin - sec_ini
if sec_len >= 0:
# Read one line (to move the file cursor)
# If the .inp file is well formed,
# sec_len >= 0 (always), but let's make sure
inp_fd.readline()
if sec_len > 0:
# Iterate between lines
# https://stackoverflow.com/a/36854340
sec_itr = itls.islice(inp_fd, 0, sec_len)
# Convert the lines into a DataFrame
contents = csvlst2df(sec_itr)
# Populate the sections container
ttag = curtag[1][0]
subtype = curtag[1][1]
name = curtag[1][2]
text = curtag[1][3]
iselset = curtag[1][4]
sections[idxtag].fill(curtag[0], ttag,
subtype, contents,
name, text, iselset)
if len(tags) > 0:
# The conditional above should not be necessary,
# but tags[-1] will fail if someone does strange
# things here
curtag = tags[-1]
# Whether it's the last or the only tag, do this:
# Read one line (to move the file cursor)
inp_fd.readline()
# TODO Consider iitertools or map
contents = csvlst2df(inp_fd)
# Populate the sections container
ttag = curtag[1][0]
subtype = curtag[1][1]
name = curtag[1][2]
text = curtag[1][3]
iselset = curtag[1][4]
sections[-1].fill(curtag[0], ttag, subtype,
contents, name, text, iselset)
else:
raise ValueError(
"The length of the tags is lower than 1." +
" Check your inputs.")
return sections
def df_remove_emptystr(data):
"""Remove cells with empty strings in a pandas DataFrame
Requires
--------
import numpy as np
"""
# Replace every empty cell with NaN and remove any
# column with NaN only
# http://stackoverflow.com/questions/10857924/ddg#10859883
return data.replace("", np.nan).dropna(axis=1,
how="all")
def inp_sec_test_elemelset(section):
return section.ttag == "element" and section.iselset
def inp_sec_test_tag(section, tag):
return section.ttag == tag
def inp_sec_test_elem(section):
"""TODO: replace by inp_sec_test_tag(section, tag)"""
return inp_sec_test_tag(section, "element")
def inp_seclst_elem_elsets_filt(sections):
return filter(inp_sec_test_elem, sections)
def inp_seclst_elem_elsets(sections):
"""Uses a filter to get those ~SectionInp~ in a list
whose ~ttag~ is "element" and are marked as being sets
through ~iselset~. It uses a filter to get the
equivalent of
for s in sections:
if s.ttag == "element" and s.iselset:
s
See also
--------
SectionInp (class)
"""
test_fun = inp_sec_test_elemelset
return [s for s in
filter(test_fun, sections)]
def inp_seclst_elem_elsets_inds(sections):
"""Uses a filter to get the element indices (first
column in the original .inp file) those ~SectionInp~ in
a list whose ~ttag~ is "element" and are marked as being
sets through ~iselset~. It uses a filter to get the
equivalent of
for s in sections:
if s.ttag == "element" and s.iselset:
s.contentss.iloc[:, 0]
See also
--------
SectionInp (class)
"""
test_fun = inp_seclst_elem_elsets_filt
return [section.contents.iloc[:, 0]
for section in test_fun(sections)]
def inp_seclst_elem_elsets2nodes(elemset, elset,
slow=False):
"""Returns the unique nodes of an .inp elemental node
set, where ~elemset.contents~ is a pandas.DataFrame
where the first column has the indices of the elements
(one element per row), and the rest are tne node indices
which make each element. In the following example,
pandas prints the column number above the data and the
row number to the left. The data is given from the
second row and second column.
>>> elemset.contents.head()
0 1 2 3 4 5 6 7 8
0 873 1 25 195 21 119 350 619 299
1 874 119 350 619 299 120 351 620 300
2 875 120 351 620 300 121 352 621 301
3 876 121 352 621 301 11 118 377 93
4 877 21 195 196 22 299 619 622 302
>>> elem_inds = elemcont.iloc[:,0]
>>> elem_inds.head().values
array([873, 874, 875, 876, 877])
"""
if slow:
# Shorter variable
elemcont = elemset.contents
# Extract the indices of the element (first
# column in the original .inp file)
elem_inds = elemcont.iloc[:, 0]
# All values of the list of elements (~elset~) in
# a vector
all_elem = elset.values.flat
# Find those elements in ~elset~ which are in
# ~elemset~
elms_mask = elem_inds.isin(all_elem)
# Extract the list of element indices and nodes
subelmset = elemcont[elms_mask]
# Get the nodes only (get rid of first column)
subelmnodes = subelmset.iloc[:, 1:]
else:
# This code does the same as the block
# above, but faster
# elemcont = elemset.contents
subelmnodes = \
elemset.contents[ # elms_mask =
# elem_inds
elemset.contents.iloc[:, 0].isin(
# all_elem
elset.contents.values.flat)].iloc[:, 1:]
return subelmnodes.values.flat
def inp_seclst_elset_elements2nodes(sections, safer_code=True):
"""For all the sections whose tag is ~elset~, find the
corresponding nodes in the sections whose ~ttag~ is
"element" and are marked as being sets through ~iselset~.
Explanation
-----------
>>> for I, section in enumerate(sections):
>>> print(I, section.ttag, section.name)
0
1 node
2
3 element Surface6
4 element Surface21
5 element Surface30
6 element Surface42
7 element Surface64
8 element Surface86
9 element Surface100
10 element Surface104
11 element Volume1
12 element Volume2
13 element Volume3
14 element Volume4
15 elset zFix
16 elset yFix
17 elset xFix
18 elset interfaceRigid
19 elset dbWide_vol
>>> # Get the index of the first section whose name is
>>> # "volume" (for example)
>>> idxelms = next((I
...: for I in range(len(sections))
...: if "volume" in sections[I].name.lower()
...: and sections[I].ttag == "element"
...: and sections[I].iselset))
>>> elms = sections[idxelms]
>>> elms.ttag
'element'
>>> elms.iselms
True
>>> elms.name
'Volume1'
>>> # The first column has the indices of the elements
>>> # The rest are tne node indices which make each element
>>> elms.head()
0 1 2 3 4 5 6 7 8
0 873 1 25 195 21 119 350 619 299
1 874 119 350 619 299 120 351 620 300
2 875 120 351 620 300 121 352 621 301
3 876 121 352 621 301 11 118 377 93
4 877 21 195 196 22 299 619 622 302
>>>
>>> # Get the indices
>>> elms_indxs = elms.contents.iloc[:,0]
>>> elms_indxs.head().values
array([873, 874, 875, 876, 877])
>>> elms_indxs.tail().values
array([1068, 1069, 1070, 1071, 1072])
>>>
>>>
>>> # Use last elset (I know it has values from "Volume1")
>>> elset = sections[-1]
>>> elset.name
'dbWide_vol'
>>> con = elset.contents
>>> # These are the indices of elements which belong to
>>> # ~dbWide_vol~
>>> con.head()
0 1 2 3 4 5 6 7 8 9
0 873 874 875 876 877 878 879 880 881 882
1 883 884 885 886 887 888 889 890 891 892
2 893 894 895 896 897 898 899 900 901 902
3 903 904 905 906 907 908 909 910 911 912
4 913 914 915 916 917 918 919 920 921 922
>>> # Check if a list of values is in a DataFrame
>>> # (with Numpy)
>>> list(con.iloc[:2].values.flat)
[873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883,
884, 885, 886, 887, 888, 889, 890, 891, 892]
>>> # Use test instead, to make it more evident
>>> test = [873, 900, 901, 902, 903, 904, 999]
>>> res = np.isin(set_indxs.values, test)
>>> # Node numbers of the sought elements
>>> # Compare this result with elms.head()
>>> elemset.contents.iloc[res, 1:]
1 2 3 4 5 6 7 8
0 1 25 195 21 119 350 619 299
27 621 633 636 624 377 381 382 378
28 196 200 201 197 622 634 637 625
29 622 634 637 625 623 635 638 626
30 623 635 638 626 624 636 639 627
31 624 636 639 627 378 382 383 379
126 680 692 695 683 681 693 696 684
>>>
>>> # Check if a list of values is in a DataFrame
>>> # (with Pandas)
>>> # Node numbers of the sought elements
>>> # Compare this result with elms.head()
>>> elemset.contents[set_indxs.isin(test)].iloc[:, 1:]
1 2 3 4 5 6 7 8
0 1 25 195 21 119 350 619 299
27 621 633 636 624 377 381 382 378
28 196 200 201 197 622 634 637 625
29 622 634 637 625 623 635 638 626
30 623 635 638 626 624 636 639 627
31 624 636 639 627 378 382 383 379
126 680 692 695 683 681 693 696 684
>>> # This would give the whole result
>>> elemset.contents[set_indxs.isin(con.values.flat)].iloc[:, 1:]
"""
# Slower is here to make the code understandable. The
# code produces the same for each level, but slower
# means more maintainable
# Work with the sections whose tag is "elset"
# (a set of elements)
# The iterator can be created as
# 1.
# def check_elset(section):
# return section.ttag == "elset"
# elset_itr = list(filter(check_elset, sections))
# or 2.
# elset_itr = [s for s in sections if s.ttag == "elset"]
if safer_code:
# Slower calculation, clearer and safer code
get_nodes_func = inp_seclst_elem_elsets2nodes
# Iterator for sections whose tag is "element" and
# which are sets (reused inside the loop)
elemset_itr = list(
inp_seclst_elem_elsets_filt(sections))
# This line is the equivalent as the 2 comments above,
# but it creates an iterator instead of a list (faster)
for elset in filter(
lambda s:
s.ttag == "elset",
sections):
subelmnodes = [get_nodes_func(elemset, elset)
for elemset in elemset_itr]
# Join all the sub-element nodes for the final
# nodal set for each elset
# TODO: is the unique really necessary?
if len(subelmnodes) > 0:
elset.nodes = np.unique(np.hstack(subelmnodes))
# else:
# # Same result as above
# # Faster calculation for larger data ??
# # testnodes = np.unique(
# nodes = [np.unique(np.hstack(
# [
# get_nodes_func(elemset, slow=0)
# for elemset in filter(
# lambda s:
# s.ttag == "element"
# and s.iselset,
# sections)]))
# for elset in filter(
# lambda s:
# s.ttag == "elset",
# sections)
# ]
def inp_tags(inpfname):
# On input, if newline is None, universal newlines mode is
# enabled
with open(inpfname, "r", newline=None) as inp_fd:
# Read all lines, take away the newline character
# Add 1 to the end of ~open()~ to read in buffered mode
# (I ghess it's the same for enumerate):
# https://stackoverflow.com/a/620494
tags_pos = ((I, line.rstrip("\n"))
for I, line in enumerate(inp_fd, 1)
if line[0] == "*")
tags = [(tag[0], line2inp_tag(tag[1])) for tag in tags_pos]
return tags
class SectionInp:
def __init__(self):
"""
tagtext -- str() the whole tag, unprocessed, e.g.
'*ELEMENT,type=C3D8,ELSET=Volume1'
pos -- float() line number of the tag in the
original file
subtype -- str() type of tag, e.g. "c3d8"
name -- str() name, if provided
ttag -- str() type of tag, e.g.'element'
contents -- pandas.DataFrame() contents of the section
1, 2, 3, NaN
4, 5, NaN, NaN
6, 7, 8, 9
"""
# Initialize with empty values
# No type declaration in Python
self.tagtext = ""
self.pos = int()
self.name = ""
self.subtype = ""
self.ttag = ""
self.nodes = np.array([])
# I use Pandas DataFrame and not Numpy arrays,
# because a section in the .inp file could very well
# look like this:
# 1, 2, 3,
# 4, 5,
# 6, 7, 8, 9,
# and reading this as a numpy array would take extra
# procesing. By using DataFrame, I get this:
# 1, 2, 3, NaN, NaN
# 4, 5, NaN, NaN, NaN
# 6, 7, 8, 9, NaN
self.contents = DF([[0]])
self.iselset = False
def fill(self, pos, ttag, subtype, contents, name="",
tagtext="", iselset=False, nodes=np.array([])):
"""Populates a SectionInp. Turns empty strings into
NaN from ~contents~, and removes columns whose only
values are NaN.
"""
self.tagtext = tagtext
self.pos = pos
self.name = name
self.subtype = subtype
self.ttag = ttag
# Remove empty strings (from ending lines with
# comma) and assign to contents
contents = df_remove_emptystr(contents)
# Convert the data into numbers, and replace any
# error to NaN
self.contents = contents.apply(pd.to_numeric,
errors="coerce")
self.iselset = iselset
self.nodes = nodes
def feb_geom_childtag_from_elsubtype(subtype):
subel = {
"Nodes": "node",
"NodeSet": "node",
"Elements": "elem",
}
return subel[subtype]
def feb_childtag_attr_format(childtag, childnum):
attr = {
"elem": {"id": "%d" % childnum},
"node": {"id": "%d" % childnum},
}
return attr[childtag]
def feb_childtag_text_format(childtag, vals,
def_fmt="{},"):
# def_fmt=" {: 0.6e},"
text = {
# Comma separated values
"elem": "".join(def_fmt.format(VAL)
for VAL in vals)[:-1],
"node": "".join(def_fmt.format(VAL)
for VAL in vals)[:-1],
}
return text[childtag]
def inp_feb_match_children_by_tag(febsubset, elmtag,
inpnames):
"""Find which children of ~febsubset~
(i.e. ~febsubset[I].get("name")~) have a pair in
~inpnames~
Arguments
---------
febsubset is an ElementTree with some substructure of a
FEBio file
elmtag is a string with the tag of a node
(a.k.a. branch, subelement, child, tag, etc.) of FEBio
(e.g. "NodeSet")
inpnames is a list with strings of the names
(e.g. elset=name) of a section in an .inp file
Requires
--------
import numpy as np
"""
# *** Container for the matches
matchind = [-1 for I in febsubset if "name" in I.keys()]
# *** Find those `elmtag' (e.g. NodeSet) in the .feb
# geometry which are named in the .inp
# The filter: I need the index (for the container)
# of each item in the .feb geometry which has a name
# I could use ~matchind~, because it does not get
# modified in the loop, but I fear confusion
for xmlind in filter(lambda I:
"name" in febsubset[I].keys(),
range(len(febsubset))):
xmlset = febsubset[xmlind]
xmlname = xmlset.get("name")
ind = np.where(np.isin(inpnames, xmlname))[0]
if len(ind) > 1 and xmlset.tag == elmtag:
# TODO: check for multiple occurences
print("WARN: Found a repeated section in the .inp")
print("WARN: " + elmtag)
print("WARN: Using the first occurrence")
# np.where returns an array, I get the first match only
if len(ind) > 0 and xmlset.tag == elmtag:
matchind[xmlind] = ind[0]
return matchind
def inp_sec_node_coord(sections, inpsec, nodaltag="node"):
"""Get the coordinates of the nodes of a SectionInp
Arguments
---------
sections -- (list(SectionInp())) One of these sections
has a ~SectionInp.ttag = nodaltag~ with the indices
and coordinates of the nodes
inpsec -- (SectionInp()) A section with a ~numpy.array~
of nodes in ~SectionInp.nodes~. The nodal indices
and coordinates are needed for this section.
inpsec.nodes -- (numpy.array()) nodal indices which
make up the SectionInp
nodaltag -- (str(); default="node") Used to find the
first section from ~sections~ which has the nodal
indices and coordinates
Output
------
numpy.array
Requires
--------
import numpy as np
See also
--------
inp_feb_replace_children_by_tag
"""
# Get the .inp section with the nodal coordinates
nodsec = next(filter(lambda s:
s.ttag == nodaltag,
sections))
# The section has a numerical list of nodes.
secnodes = inpsec.nodes
# The values that we want are the
# indices and coordinates of those nodes
# In the .inp, the indexing starts with 1, in the
# DataFrame, it starts with 0, thus -1
res = nodsec.contents.iloc[secnodes - 1]
return res.values
def inp_feb_add_indexed_vals(xmlset,
febchildtag,
childval,
childfmt,
txtfmt,
colini,
colidx=0):
"""Used in inp_feb_replace_children_by_tag.
Takes an xmlset, the values from a corresponding .inp,
For .inp sections like
index1 val11 val12 ...
index2 val21 val23 ...
index3 val31 val33 ...
In this case, colini = 1 (the second column)
In this case, colidx = 0 (the first column) with the
indices
values includes the indices and the values.
febchildtag can be something like "Elements" or "Nodes"
(could be something else).
childfmt is a function to format the attributes
(ElemenTree) of a subtree child
txtfmt is a function to format the values as a string
(it must return a string and take one line of values,
i.e. [index3, val31, val33] → "index3, val31, val33")
"""
# Indices are in the first column
childnum = childval[colidx]
# Parse the required attributes for the given tag
attr = childfmt(febchildtag, childnum)
# Add the main attributes of the
# children
child = et.SubElement(xmlset, # parent
febchildtag, # tag
attrib=attr)
# Values from a specific column onwards (usually the
# second: colini = 1)
valcrop = childval[colini:]
# Format values accorting to the tag
childtxt = txtfmt(febchildtag, valcrop)
# Add the values as text
child.text = childtxt
def inp_feb_replace_children_by_tag(febsubset, sections,
elmtag):
"""For each child of ~febsubset~
(i.e. ~febsubset[I].get("name")~) which has a pair in
~inpnames~, replace
Arguments
---------
febsubset is an ElementTree with some substructure of a
FEBio file with (possibly) named subtrees
sections is a list of SectionInp (class) which
presumably has some elmements with names matching in
~febsubset~
elmtag is a string with the tag of a node
(a.k.a. branch, subelement, child, tag, etc.) of FEBio
(e.g. "NodeSet")
Requires
--------
import numpy as np
"""
# ** Get the names of each element (branch, tab, node,
# subtree) in the XML and compare them to the names
# in the .inp
# *** Get the names of the sections in the .inp
inpnames = [VAL.name for VAL in sections]
# *** - ~matchind~ indexing is the same as ~febgeo~
# - an item in ~matchind~ points to an item in
# ~sections~. When there is no match matchind = -1
matchind = inp_feb_match_children_by_tag(febsubset,
elmtag,
inpnames)
# ** Modify the matched items in the .feb with the
# corresponding items from the .inp
febchildtag = feb_geom_childtag_from_elsubtype(elmtag)
# *** Function to format the children of the subelement
childfmt = feb_childtag_attr_format
# *** Function to format the values of the children (not
# always used)
txtfmt = feb_childtag_text_format
for febidx, inpidx in enumerate(matchind):
# When inpidx < 0, it means that there is no match
# in febsubset
if 0 <= inpidx:
# Convenient name for the matching subtree and
# section
xmlset = febgeo[febidx]
inpsec = sections[inpidx]
# Remove all children (xmlset.clear deletes
# every spec of the set, not just its children)
# None of these work FYI
# [xmlset.remove(VAL) for VAL in xmlset]
# [xmlset.remove(VAL) for VAL in iter(xmlset)]
# map(xmlset.remove, xmlset)
#
# This works, but creates an intermediate list
[xmlset.remove(VAL) for VAL in list(xmlset)]
if elmtag in ["Elements", "Nodes"]:
# These elements have a simiilar structure:
# index1 val11 val12 ...
# index2 val21 val23 ...
# index3 val31 val33 ...
if elmtag == "Elements":
# The section has a numerical list of
# elements
values = inpsec.contents.values
# The values start from the second
# column, Python indexing starts at 0
colini = 1
# The column where the indices are
colidx = 0
if elmtag == "Nodes":
values = inp_sec_node_coord(sections, inpsec)
# The values start from the second
# column, Python indexing starts at 0
colini = 1
# The column where the indices are
colidx = 0
# TODO This could use a map(..., zip(...)) ??
for childval in iter(values):
# Add values to each child
inp_feb_add_indexed_vals(xmlset,
febchildtag,
childval,
childfmt,
txtfmt,
colini,
colidx)
elif elmtag in ["NodeSet"]:
# TODO This could use a map(..., zip(...)) ??
for childnum in inpsec.nodes:
et.SubElement(xmlset, # parent
febchildtag, # tag
attrib=childfmt(febchildtag,
childnum)
# Note to self: I convert the
# strings from the .inp into
# numbers with SectionInp, and I
# am converting back to string
# here. Is that desirable?
)
# Gmsh .geo file
check_path(geofname, "Gmsh")
# Convert geo to .inp
inpfname = gmsh_geo2inp_3d(geofname)
# # Extract nodes from .inp
# nodes = get_inp_nodes_awk(inpFname)
# Get the position and headings (tags) of the sections
tags = inp_tags(inpfname)
# Extract the values in-between tags
# (opens and reads the file again)
sections = inp_tags2sec(tags, inpfname)
# Set nodes to each ~elset~ section from the .inp
inp_seclst_elset_elements2nodes(sections)
# * Give it a name which clearly relates it to .inp
inpsecs = sections
# Open the input .feb
febtree = et.parse(infebfname)
# Get the geometry section
febgeo = febtree.find("./Geometry")
if "DEBUG" in dir() and DEBUG:
for I in list(febgeo):
print(I.tag, I.attrib)
# * Copy contents of parsed .inp into parsed .feb
for subtree in ["NodeSet", "Elements", "Nodes"]:
inp_feb_replace_children_by_tag(febgeo, inpsecs,
subtree)
if "DEBUG" in dir() and DEBUG: print(subtree)
# * Save the result
if "DEBUG" in dir() and DEBUG:
print("Saving as " + outfebfname)
# TODO: pretty print
febtree.write(outfebfname, encoding='UTF-8', xml_declaration=True)
def feb_geom_childtag_from_elsubtype(subtype):
subel = {
"Nodes": "node",
"NodeSet": "node",
"Elements": "elem",
}
return subel[subtype]
def feb_childtag_attr_format(childtag, childnum):
attr = {
"elem": {"id": "%d" % childnum},
"node": {"id": "%d" % childnum},
}
return attr[childtag]
def feb_childtag_text_format(childtag, vals,
def_fmt="{},"):
# def_fmt=" {: 0.6e},"
text = {
# Comma separated values
"elem": "".join(def_fmt.format(VAL)
for VAL in vals)[:-1],
"node": "".join(def_fmt.format(VAL)
for VAL in vals)[:-1],
}
return text[childtag]
def inp_feb_match_children_by_tag(febsubset, elmtag,
inpnames):
"""Find which children of ~febsubset~
(i.e. ~febsubset[I].get("name")~) have a pair in
~inpnames~
Arguments
---------
febsubset is an ElementTree with some substructure of a
FEBio file
elmtag is a string with the tag of a node
(a.k.a. branch, subelement, child, tag, etc.) of FEBio
(e.g. "NodeSet")
inpnames is a list with strings of the names
(e.g. elset=name) of a section in an .inp file
Requires
--------
import numpy as np
"""
# *** Container for the matches
matchind = [-1 for I in febsubset if "name" in I.keys()]
# *** Find those `elmtag' (e.g. NodeSet) in the .feb
# geometry which are named in the .inp
# The filter: I need the index (for the container)
# of each item in the .feb geometry which has a name
# I could use ~matchind~, because it does not get
# modified in the loop, but I fear confusion
for xmlind in filter(lambda I:
"name" in febsubset[I].keys(),
range(len(febsubset))):
xmlset = febsubset[xmlind]
xmlname = xmlset.get("name")
ind = np.where(np.isin(inpnames, xmlname))[0]
if len(ind) > 1 and xmlset.tag == elmtag:
# TODO: check for multiple occurences
print("WARN: Found a repeated section in the .inp")
print("WARN: " + elmtag)
print("WARN: Using the first occurrence")
# np.where returns an array, I get the first match only
if len(ind) > 0 and xmlset.tag == elmtag:
matchind[xmlind] = ind[0]
return matchind
def inp_sec_node_coord(sections, inpsec, nodaltag="node"):
"""Get the coordinates of the nodes of a SectionInp
Arguments
---------
sections -- (list(SectionInp())) One of these sections
has a ~SectionInp.ttag = nodaltag~ with the indices
and coordinates of the nodes
inpsec -- (SectionInp()) A section with a ~numpy.array~
of nodes in ~SectionInp.nodes~. The nodal indices
and coordinates are needed for this section.
inpsec.nodes -- (numpy.array()) nodal indices which
make up the SectionInp
nodaltag -- (str(); default="node") Used to find the
first section from ~sections~ which has the nodal
indices and coordinates
Output
------
numpy.array
Requires
--------
import numpy as np
See also
--------
inp_feb_replace_children_by_tag
"""
# Get the .inp section with the nodal coordinates
nodsec = next(filter(lambda s:
s.ttag == nodaltag,
sections))
# The section has a numerical list of nodes.
secnodes = inpsec.nodes
# The values that we want are the
# indices and coordinates of those nodes
# In the .inp, the indexing starts with 1, in the
# DataFrame, it starts with 0, thus -1
res = nodsec.contents.iloc[secnodes - 1]
return res.values
def inp_feb_add_indexed_vals(xmlset,
febchildtag,
childval,
childfmt,
txtfmt,
colini,
colidx=0):
"""Used in inp_feb_replace_children_by_tag.
Takes an xmlset, the values from a corresponding .inp,
For .inp sections like
index1 val11 val12 ...
index2 val21 val23 ...
index3 val31 val33 ...
In this case, colini = 1 (the second column)
In this case, colidx = 0 (the first column) with the
indices
values includes the indices and the values.
febchildtag can be something like "Elements" or "Nodes"
(could be something else).
childfmt is a function to format the attributes
(ElemenTree) of a subtree child
txtfmt is a function to format the values as a string
(it must return a string and take one line of values,
i.e. [index3, val31, val33] → "index3, val31, val33")
"""
# Indices are in the first column
childnum = childval[colidx]
# Parse the required attributes for the given tag
attr = childfmt(febchildtag, childnum)
# Add the main attributes of the
# children
child = et.SubElement(xmlset, # parent
febchildtag, # tag
attrib=attr)
# Values from a specific column onwards (usually the
# second: colini = 1)
valcrop = childval[colini:]
# Format values accorting to the tag
childtxt = txtfmt(febchildtag, valcrop)
# Add the values as text
child.text = childtxt
def inp_feb_replace_children_by_tag(febsubset, sections,
elmtag):
"""For each child of ~febsubset~
(i.e. ~febsubset[I].get("name")~) which has a pair in
~inpnames~, replace
Arguments
---------
febsubset is an ElementTree with some substructure of a
FEBio file with (possibly) named subtrees
sections is a list of SectionInp (class) which
presumably has some elmements with names matching in
~febsubset~
elmtag is a string with the tag of a node
(a.k.a. branch, subelement, child, tag, etc.) of FEBio
(e.g. "NodeSet")
Requires
--------
import numpy as np
"""
# ** Get the names of each element (branch, tab, node,
# subtree) in the XML and compare them to the names
# in the .inp
# *** Get the names of the sections in the .inp
inpnames = [VAL.name for VAL in sections]
# *** - ~matchind~ indexing is the same as ~febgeo~
# - an item in ~matchind~ points to an item in
# ~sections~. When there is no match matchind = -1
matchind = inp_feb_match_children_by_tag(febsubset,
elmtag,
inpnames)
# ** Modify the matched items in the .feb with the
# corresponding items from the .inp
febchildtag = feb_geom_childtag_from_elsubtype(elmtag)
# *** Function to format the children of the subelement
childfmt = feb_childtag_attr_format
# *** Function to format the values of the children (not
# always used)
txtfmt = feb_childtag_text_format
for febidx, inpidx in enumerate(matchind):
# When inpidx < 0, it means that there is no match
# in febsubset
if 0 <= inpidx:
# Convenient name for the matching subtree and
# section
xmlset = febgeo[febidx]
inpsec = sections[inpidx]
# Remove all children (xmlset.clear deletes
# every spec of the set, not just its children)
# None of these work FYI
# [xmlset.remove(VAL) for VAL in xmlset]
# [xmlset.remove(VAL) for VAL in iter(xmlset)]
# map(xmlset.remove, xmlset)
#
# This works, but creates an intermediate list
[xmlset.remove(VAL) for VAL in list(xmlset)]
if elmtag in ["Elements", "Nodes"]:
# These elements have a simiilar structure:
# index1 val11 val12 ...
# index2 val21 val23 ...
# index3 val31 val33 ...
if elmtag == "Elements":
# The section has a numerical list of
# elements
values = inpsec.contents.values
# The values start from the second
# column, Python indexing starts at 0
colini = 1
# The column where the indices are
colidx = 0
if elmtag == "Nodes":
values = inp_sec_node_coord(sections, inpsec)
# The values start from the second
# column, Python indexing starts at 0
colini = 1
# The column where the indices are
colidx = 0
# TODO This could use a map(..., zip(...)) ??
for childval in iter(values):
# Add values to each child
inp_feb_add_indexed_vals(xmlset,
febchildtag,
childval,
childfmt,
txtfmt,
colini,
colidx)
elif elmtag in ["NodeSet"]:
# TODO This could use a map(..., zip(...)) ??
for childnum in inpsec.nodes:
et.SubElement(xmlset, # parent
febchildtag, # tag
attrib=childfmt(febchildtag,
childnum)
# Note to self: I convert the
# strings from the .inp into
# numbers with SectionInp, and I
# am converting back to string
# here. Is that desirable?
)
# Gmsh .geo file
check_path(geofname, "Gmsh")
# Convert geo to .inp
inpfname = gmsh_geo2inp_3d(geofname)
# # Extract nodes from .inp
# nodes = get_inp_nodes_awk(inpFname)
# Get the position and headings (tags) of the sections
tags = inp_tags(inpfname)
# Extract the values in-between tags
# (opens and reads the file again)
sections = inp_tags2sec(tags, inpfname)
# Set nodes to each ~elset~ section from the .inp
inp_seclst_elset_elements2nodes(sections)
# * Give it a name which clearly relates it to .inp
inpsecs = sections
# Open the input .feb
febtree = et.parse(infebfname)
# Get the geometry section
febgeo = febtree.find("./Geometry")
if "DEBUG" in dir() and DEBUG:
for I in list(febgeo):
print(I.tag, I.attrib)
# * Copy contents of parsed .inp into parsed .feb
for subtree in ["NodeSet", "Elements", "Nodes"]:
inp_feb_replace_children_by_tag(febgeo, inpsecs,
subtree)
if "DEBUG" in dir() and DEBUG: print(subtree)
# * Save the result
if "DEBUG" in dir() and DEBUG:
print("Saving as " + outfebfname)
# TODO: pretty print
febtree.write(outfebfname, encoding='UTF-8', xml_declaration=True)