geo2secinp.org 46 KB


  # To run commands in GNU/Linux (or other platforms)
  import subprocess as sp
  # To process XML (febio files)
  import xml.etree.ElementTree as et
  # To check that files exist
  from os import path

  def check_path(fname, fname_type=None):
      """
      Requires
      --------
      from os import path
      """
      # Default value
      if fname_type is None:
          fname_type = " "
      # Make sure that fname_type is a string
      if isinstance(fname_type, str):
          # set blank space before and after
          fname_type = " " + fname_type.strip() + " "
      else:
          raise ValueError("fname_type must be a character string")

      if not path.exists(path.expandvars(fname)):
          raise ValueError("The" + fname_type + "file does not exist:  " + fname)

  def gmsh_geo2inp_3d(geoFname):
      inpFname = geoFname.replace(".geo", ".inp")
      sp.run(["gmsh", "-v", "0", "-3", "-optimize_ho", "-format",
              "inp", "-o", inpFname, geoFname, "."])
      return inpFname


  def inp2feb_elem(inp_type):
      """Takes a type of element from .inp and translates it to .feb"""
      # From pyFEBio:
      feb_type = {"c3d8": "hex8",
                  'c3d4': 'tet4',
                  'c3d6': 'penta6',
                  'cpe3': 'tri3',
                  'cpe4': 'quad4'}
      return feb_type[inp_type]


  def inp_default_tags():
      return ["node", "element", "elset", "nset", "surface"]


  def regex_comp(text, regex_tags):
      """Checks if a list of regular expressions (Python style)
      match a line of text

      Arguments
      ---------
      text -- (str) some text to be compared to the regexes
      regex_tags -- (list(str)) list of strings to be used to
          do the regular expression comparison

      Output
      ------
      True if any of the regular expressions match the text
      """
      # Compile every tag into a Python regular expression
      regexes = list(map(re.compile, regex_tags))
      # Check if the line has the tag
      # https://stackoverflow.com/a/3040745
      return any(regex.match(text) for regex in regexes)


  def line2inp_tag(line, **kwargs):
      """Checks whether a line matches a series of regular
      expressions and returns the line, the position (for
      ~fd.seek()~, type and name (if available))

      Arguments
      ---------
      line -- (str) a string character (a line of a file)
      fd -- (file object: open(fname)) a file descriptor from
          where the line comes. The current position of ~fd~
          will be used as the returned position of the tag.
      ,**kwargs -- (dict()) a dictionary with extra arguments
          - regex_tags -- (list(str()); optional) a list of
              strings, each representing a regular expression
              (Python) to test the line to see if it has any
              known tags in the .inp file. If not provided,
              the defaults are used.
      """
      # Remove the spaces and return character
      line = line.replace(" ", "")
      # Store the lower case equivalent of the line;
      # (faster instead of lower-casing every time?)
      line_low = line.lower()

      if "regex_tags" in kwargs:
          regex_tags = kwargs["regex_tags"]
          if not str_listortuple(regex_tags):
              raise ValueError(
                  "The tags need to be regexes like ['regex1', 'regex2', ...].")
      else:
          # Lines starting with any amount of space, followed
          # by an asterisk and the tag
          regex_tags = ["^\s*\*" + tag
                        for tag in inp_default_tags()]
      # Check if the line has any .inp tags
      if regex_comp(line_low, regex_tags):
          # If any tags are found, extract the type and name
          # Get a list from the comma-separated text
          fields = line.split(",")
          # In the following, use the fields_low to compare
          fields_low = line_low.split(",")
          # Create an iterator for the fields' indices
          ran_fields = range(len(fields))
          # The tag is the first keyword without the asterisk
          # Return lower case
          ttag = fields_low[0].lstrip("*")
          # Get the type by finding the first "type=" in the
          # list. If not found return empty string.
          # Return lower case
          subtype = next((fields_low[idxfld].partition("=")[2]
                          for idxfld in ran_fields
                          if "type=" in fields_low[idxfld]), "")
          # Get the name by finding the first "set=" or "name=" in
          # the list. If not found return empty string
          # Return original case (upper- or lowercase)
          name = next((fields[idxfld].partition("=")[2]
                       for idxfld in ran_fields
                       if "set=" in fields_low[idxfld]
                       or "name=" in fields_low[idxfld]), "")
          # Establish if this is an .inp elset
          iselset = next((True
                          for idxfld in ran_fields
                          if "set=" in fields_low[idxfld]), "")
      elif line_low[0] == "*":
          warnstr = "WARNING: This line looks like a tag," + \
              " but was not processed: " + line
          print(warnstr)
          # Set empty values
          ttag = ""
          subtype = ""
          name = ""
          iselset = False
      return ttag, subtype, name, line, iselset


  def feb_tag_types():
      return ["nodes", "elements", "nodeset", "elementset"]


  def csvline2lst(text):
      return text.rstrip().split(",")


  def csvlst2df(text_lst):
      """
      Requires
      --------
      csvline2lst
      from pandas import DataFrame as df
      """
      return DF.from_records([csvline2lst(VAL)
                              for VAL in text_lst])


  def csvlst2arr(text_lst):
      """
      Requires
      --------
      csvline2lst
      import numpy as np
      """
      return np.array([csvline2lst(VAL)
                       for VAL in text_lst])


  def inp_tags2sec(tags, fname):
      """.inp sections to Pandas dataframe"""
      # # Create container for the sections
      # # (Use arrays instead of lists to be able to have
      # # numpy.view to access the elements without creating new
      # # lists; save memory and processing time)
      # sections = np.array([SectionInp() for I in tags])
      # # The above is not needed:
      # >>> def test(arr):
      # ...:     arr[0] = None
      # ...:     return arr
      # >>> arr = [[1, 2, 3], [45]]
      # >>> [test(I) for I in arr]
      # [[None, 2, 3], [None]]
      # >>> arr
      # [[None, 2, 3], [None]]
      sections = [SectionInp() for I in tags]

      with open(fname, "r", newline=None) as inp_fd:
          # Iterate in-between tags to extract the values of each
          # section
          # TODO: consider pandas, numpy, h5f (my files are not
          # that big)
          #       https://stackoverflow.com/a/45796754
          #       https://stackoverflow.com/a/40469450

          # From the last tag to the end of the file (if there is
          # only one tag, it is the same as the last one--see
          # below)

          # If there is more than one tag
          if len(tags) > 1:
              # Iterate until the penultimate tag
              for idxtag, curtag in enumerate(tags[:-1]):
                  sec_ini = curtag[0]
                  sec_fin = tags[idxtag + 1][0] - 1
                  # -1 for 0 indexing
                  sec_len = sec_fin - sec_ini
                  if sec_len >= 0:
                      # Read one line (to move the file cursor)
                      # If the .inp file is well formed,
                      # sec_len >= 0 (always), but let's make sure
                      inp_fd.readline()
                  if sec_len > 0:
                      # Iterate between lines
                      # https://stackoverflow.com/a/36854340
                      sec_itr = itls.islice(inp_fd, 0, sec_len)
                      # Convert the lines into a DataFrame
                      contents = csvlst2df(sec_itr)
                      # Populate the sections container
                      ttag = curtag[1][0]
                      subtype = curtag[1][1]
                      name = curtag[1][2]
                      text = curtag[1][3]
                      iselset = curtag[1][4]
                      sections[idxtag].fill(curtag[0], ttag,
                                            subtype, contents,
                                            name, text, iselset)
          if len(tags) > 0:
              # The conditional above should not be necessary,
              # but tags[-1] will fail if someone does strange
              # things here
              curtag = tags[-1]

              # Whether it's the last or the only tag, do this:
              # Read one line (to move the file cursor)
              inp_fd.readline()
              # TODO Consider iitertools or map
              contents = csvlst2df(inp_fd)
              # Populate the sections container
              ttag = curtag[1][0]
              subtype = curtag[1][1]
              name = curtag[1][2]
              text = curtag[1][3]
              iselset = curtag[1][4]
              sections[-1].fill(curtag[0], ttag, subtype,
                                contents, name, text, iselset)
          else:
              raise ValueError(
                  "The length of the tags is lower than 1." +
                  " Check your inputs.")
          return sections


  def df_remove_emptystr(data):
      """Remove cells with empty strings in a pandas DataFrame
      Requires
      --------
      import numpy as np
      """
      # Replace every empty cell with NaN and remove any
      # column with NaN only
      # http://stackoverflow.com/questions/10857924/ddg#10859883
      return data.replace("", np.nan).dropna(axis=1,
                                             how="all")


  def inp_sec_test_elemelset(section):
      return section.ttag == "element" and section.iselset


  def inp_sec_test_tag(section, tag):
      return section.ttag == tag


  def inp_sec_test_elem(section):
      """TODO: replace by inp_sec_test_tag(section, tag)"""
      return inp_sec_test_tag(section, "element")


  def inp_seclst_elem_elsets_filt(sections):
      return filter(inp_sec_test_elem, sections)


  def inp_seclst_elem_elsets(sections):
      """Uses a filter to get those ~SectionInp~ in a list
      whose ~ttag~ is "element" and are marked as being sets
      through ~iselset~. It uses a filter to get the
      equivalent of
          for s in sections:
              if s.ttag == "element" and s.iselset:
                  s

      See also
      --------
      SectionInp (class)
      """
      test_fun = inp_sec_test_elemelset
      return [s for s in
              filter(test_fun, sections)]


  def inp_seclst_elem_elsets_inds(sections):
      """Uses a filter to get the element indices (first
      column in the original .inp file) those ~SectionInp~ in
      a list whose ~ttag~ is "element" and are marked as being
      sets through ~iselset~. It uses a filter to get the
      equivalent of
          for s in sections:
              if s.ttag == "element" and s.iselset:
                  s.contentss.iloc[:, 0]

      See also
      --------
      SectionInp (class)
      """
      test_fun = inp_seclst_elem_elsets_filt
      return [section.contents.iloc[:, 0]
              for section in test_fun(sections)]


  def inp_seclst_elem_elsets2nodes(elemset, elset,
                                   slow=False):
      """Returns the unique nodes of an .inp elemental node
      set, where ~elemset.contents~ is a pandas.DataFrame
      where the first column has the indices of the elements
      (one element per row), and the rest are tne node indices
      which make each element. In the following example,
      pandas prints the column number above the data and the
      row number to the left. The data is given from the
      second row and second column.
      >>> elemset.contents.head()
           0    1    2    3    4    5    6    7    8
      0  873    1   25  195   21  119  350  619  299
      1  874  119  350  619  299  120  351  620  300
      2  875  120  351  620  300  121  352  621  301
      3  876  121  352  621  301   11  118  377   93
      4  877   21  195  196   22  299  619  622  302
      >>> elem_inds = elemcont.iloc[:,0]
      >>> elem_inds.head().values
      array([873, 874, 875, 876, 877])
      """
      if slow:
          # Shorter variable
          elemcont = elemset.contents
          # Extract the indices of the element (first
          # column in the original .inp file)
          elem_inds = elemcont.iloc[:, 0]
          # All values of the list of elements (~elset~) in
          # a vector
          all_elem = elset.values.flat
          # Find those elements in ~elset~ which are in
          # ~elemset~
          elms_mask = elem_inds.isin(all_elem)
          # Extract the list of element indices and nodes
          subelmset = elemcont[elms_mask]
          # Get the nodes only (get rid of first column)
          subelmnodes = subelmset.iloc[:, 1:]
      else:
          # This code does the same as the block
          # above, but faster
          # elemcont = elemset.contents
          subelmnodes = \
              elemset.contents[  # elms_mask =
                  # elem_inds
                  elemset.contents.iloc[:, 0].isin(
                      # all_elem
                      elset.contents.values.flat)].iloc[:, 1:]
      return subelmnodes.values.flat


  def inp_seclst_elset_elements2nodes(sections, safer_code=True):
      """For all the sections whose tag is ~elset~, find the
      corresponding nodes in the sections whose ~ttag~ is
      "element" and are marked as being sets through ~iselset~.

      Explanation
      -----------
      >>> for I, section in enumerate(sections):
      >>>     print(I, section.ttag, section.name)
      0
      1 node
      2
      3 element Surface6
      4 element Surface21
      5 element Surface30
      6 element Surface42
      7 element Surface64
      8 element Surface86
      9 element Surface100
      10 element Surface104
      11 element Volume1
      12 element Volume2
      13 element Volume3
      14 element Volume4
      15 elset zFix
      16 elset yFix
      17 elset xFix
      18 elset interfaceRigid
      19 elset dbWide_vol
      >>> # Get the index of the first section whose name is
      >>> # "volume" (for example)
      >>> idxelms = next((I
      ...:                 for I in range(len(sections))
      ...:                 if "volume" in sections[I].name.lower()
      ...:                 and sections[I].ttag == "element"
      ...:                 and sections[I].iselset))
      >>> elms = sections[idxelms]
      >>> elms.ttag
      'element'
      >>> elms.iselms
      True
      >>> elms.name
      'Volume1'
      >>> # The first column has the indices of the elements
      >>> # The rest are tne node indices which make each element
      >>> elms.head()
           0    1    2    3    4    5    6    7    8
      0  873    1   25  195   21  119  350  619  299
      1  874  119  350  619  299  120  351  620  300
      2  875  120  351  620  300  121  352  621  301
      3  876  121  352  621  301   11  118  377   93
      4  877   21  195  196   22  299  619  622  302
      >>>
      >>> # Get the indices
      >>> elms_indxs = elms.contents.iloc[:,0]
      >>> elms_indxs.head().values
      array([873, 874, 875, 876, 877])
      >>> elms_indxs.tail().values
      array([1068, 1069, 1070, 1071, 1072])
      >>>
      >>>
      >>> # Use last elset (I know it has values from "Volume1")
      >>> elset = sections[-1]
      >>> elset.name
      'dbWide_vol'
      >>> con = elset.contents
      >>> # These are the indices of elements which belong to
      >>> # ~dbWide_vol~
      >>> con.head()
           0    1    2    3    4    5    6    7    8    9
      0  873  874  875  876  877  878  879  880  881  882
      1  883  884  885  886  887  888  889  890  891  892
      2  893  894  895  896  897  898  899  900  901  902
      3  903  904  905  906  907  908  909  910  911  912
      4  913  914  915  916  917  918  919  920  921  922


      >>> # Check if a list of values is in a DataFrame
      >>> # (with Numpy)
      >>> list(con.iloc[:2].values.flat)
      [873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883,
      884, 885, 886, 887, 888, 889, 890, 891, 892]
      >>> # Use test instead, to make it more evident
      >>> test = [873, 900, 901, 902, 903, 904, 999]
      >>> res = np.isin(set_indxs.values, test)
      >>> # Node numbers of the sought elements
      >>> # Compare this result with elms.head()
      >>> elemset.contents.iloc[res, 1:]
             1    2    3    4    5    6    7    8
      0      1   25  195   21  119  350  619  299
      27   621  633  636  624  377  381  382  378
      28   196  200  201  197  622  634  637  625
      29   622  634  637  625  623  635  638  626
      30   623  635  638  626  624  636  639  627
      31   624  636  639  627  378  382  383  379
      126  680  692  695  683  681  693  696  684
      >>>
      >>> # Check if a list of values is in a DataFrame
      >>> # (with Pandas)
      >>> # Node numbers of the sought elements
      >>> # Compare this result with elms.head()
      >>> elemset.contents[set_indxs.isin(test)].iloc[:, 1:]
             1    2    3    4    5    6    7    8
      0      1   25  195   21  119  350  619  299
      27   621  633  636  624  377  381  382  378
      28   196  200  201  197  622  634  637  625
      29   622  634  637  625  623  635  638  626
      30   623  635  638  626  624  636  639  627
      31   624  636  639  627  378  382  383  379
      126  680  692  695  683  681  693  696  684
      >>> # This would give the whole result
      >>> elemset.contents[set_indxs.isin(con.values.flat)].iloc[:, 1:]
      """
      # Slower is here to make the code understandable. The
      # code produces the same for each level, but slower
      # means more maintainable

      # Work with the sections whose tag is "elset"
      # (a set of elements)

      # The iterator can be created as
      # 1.
      #     def check_elset(section):
      #         return section.ttag == "elset"
      #     elset_itr = list(filter(check_elset, sections))
      # or 2.
      #     elset_itr = [s for s in sections if s.ttag == "elset"]
      if safer_code:
          # Slower calculation, clearer and safer code

          get_nodes_func = inp_seclst_elem_elsets2nodes
          # Iterator for sections whose tag is "element" and
          # which are sets (reused inside the loop)
          elemset_itr = list(
              inp_seclst_elem_elsets_filt(sections))
          # This line is the equivalent as the 2 comments above,
          # but it creates an iterator instead of a list (faster)
          for elset in filter(
                  lambda s:
                      s.ttag == "elset",
                  sections):
              subelmnodes = [get_nodes_func(elemset, elset)
                             for elemset in elemset_itr]
              # Join all the sub-element nodes for the final
              # nodal set for each elset
              # TODO: is the unique really necessary?
              if len(subelmnodes) > 0:
                  elset.nodes = np.unique(np.hstack(subelmnodes))
      # else:
      #     # Same result as above
      #     # Faster calculation for larger data ??
      #     # testnodes = np.unique(
      #     nodes = [np.unique(np.hstack(
      #         [
      #             get_nodes_func(elemset, slow=0)
      #             for elemset in filter(
      #                 lambda s:
      #                     s.ttag == "element"
      #                     and s.iselset,
      #                     sections)]))
      #         for elset in filter(
      #             lambda s:
      #                 s.ttag == "elset",
      #             sections)
      #         ]

  def inp_tags(inpfname):
      # On input, if newline is None, universal newlines mode is
      # enabled
      with open(inpfname, "r", newline=None) as inp_fd:
          # Read all lines, take away the newline character
          #   Add 1 to the end of ~open()~ to read in buffered mode
          #   (I ghess it's the same for enumerate):
          #   https://stackoverflow.com/a/620494
          tags_pos = ((I, line.rstrip("\n"))
                      for I, line in enumerate(inp_fd, 1)
                      if line[0] == "*")
          tags = [(tag[0], line2inp_tag(tag[1])) for tag in tags_pos]

      return  tags


  class SectionInp:
      def __init__(self):
          """
          tagtext -- str() the whole tag, unprocessed, e.g.
              '*ELEMENT,type=C3D8,ELSET=Volume1'
          pos -- float() line number of the tag in the
              original file
          subtype -- str() type of tag, e.g. "c3d8"
          name -- str() name, if provided
          ttag -- str() type of tag, e.g.'element'
          contents -- pandas.DataFrame() contents of the section
                  1, 2,   3, NaN
                  4, 5, NaN, NaN
                  6, 7,   8,   9
          """
          # Initialize with empty values
          # No type declaration in Python
          self.tagtext = ""
          self.pos = int()
          self.name = ""
          self.subtype = ""
          self.ttag = ""
          self.nodes = np.array([])
          # I use Pandas DataFrame and not Numpy arrays,
          # because a section in the .inp file could very well
          # look like this:
          #     1, 2, 3,
          #     4, 5,
          #     6, 7, 8, 9,
          # and reading this as a numpy array would take extra
          # procesing. By using DataFrame, I get this:
          #     1, 2,   3, NaN, NaN
          #     4, 5, NaN, NaN, NaN
          #     6, 7,   8,   9, NaN
          self.contents = DF([[0]])
          self.iselset = False

      def fill(self, pos, ttag, subtype, contents, name="",
               tagtext="", iselset=False, nodes=np.array([])):
          """Populates a SectionInp. Turns empty strings into
          NaN from ~contents~, and removes columns whose only
          values are NaN.
          """
          self.tagtext = tagtext
          self.pos = pos
          self.name = name
          self.subtype = subtype
          self.ttag = ttag
          # Remove empty strings (from ending lines with
          # comma) and assign to contents
          contents = df_remove_emptystr(contents)
          # Convert the data into numbers, and replace any
          # error to NaN
          self.contents = contents.apply(pd.to_numeric,
                                         errors="coerce")
          self.iselset = iselset
          self.nodes = nodes

  import re
  import itertools as itls
  import numpy as np
  import pandas as pd
  from pandas import DataFrame as DF
  import re
  import itertools as itls
  import numpy as np
  import pandas as pd
  from pandas import DataFrame as DF
  # To run commands in GNU/Linux (or other platforms)
  import subprocess as sp
  # To process XML (febio files)
  import xml.etree.ElementTree as et
  # To check that files exist
  from os import path

  import re
  import itertools as itls
  import numpy as np
  import pandas as pd
  from pandas import DataFrame as DF
  # To run commands in GNU/Linux (or other platforms)
  import subprocess as sp
  # To process XML (febio files)
  import xml.etree.ElementTree as et
  # To check that files exist
  from os import path


  def check_path(fname, fname_type=None):
      """
      Requires
      --------
      from os import path
      """
      # Default value
      if fname_type is None:
          fname_type = " "
      # Make sure that fname_type is a string
      if isinstance(fname_type, str):
          # set blank space before and after
          fname_type = " " + fname_type.strip() + " "
      else:
          raise ValueError("fname_type must be a character string")

      if not path.exists(path.expandvars(fname)):
          raise ValueError("The" + fname_type + "file does not exist:  " + fname)

  def gmsh_geo2inp_3d(geoFname):
      inpFname = geoFname.replace(".geo", ".inp")
      sp.run(["gmsh", "-v", "0", "-3", "-optimize_ho", "-format",
              "inp", "-o", inpFname, geoFname, "."])
      return inpFname


  def inp2feb_elem(inp_type):
      """Takes a type of element from .inp and translates it to .feb"""
      # From pyFEBio:
      feb_type = {"c3d8": "hex8",
                  'c3d4': 'tet4',
                  'c3d6': 'penta6',
                  'cpe3': 'tri3',
                  'cpe4': 'quad4'}
      return feb_type[inp_type]


  def inp_default_tags():
      return ["node", "element", "elset", "nset", "surface"]


  def regex_comp(text, regex_tags):
      """Checks if a list of regular expressions (Python style)
      match a line of text

      Arguments
      ---------
      text -- (str) some text to be compared to the regexes
      regex_tags -- (list(str)) list of strings to be used to
          do the regular expression comparison

      Output
      ------
      True if any of the regular expressions match the text
      """
      # Compile every tag into a Python regular expression
      regexes = list(map(re.compile, regex_tags))
      # Check if the line has the tag
      # https://stackoverflow.com/a/3040745
      return any(regex.match(text) for regex in regexes)


  def line2inp_tag(line, **kwargs):
      """Checks whether a line matches a series of regular
      expressions and returns the line, the position (for
      ~fd.seek()~, type and name (if available))

      Arguments
      ---------
      line -- (str) a string character (a line of a file)
      fd -- (file object: open(fname)) a file descriptor from
          where the line comes. The current position of ~fd~
          will be used as the returned position of the tag.
      ,**kwargs -- (dict()) a dictionary with extra arguments
          - regex_tags -- (list(str()); optional) a list of
              strings, each representing a regular expression
              (Python) to test the line to see if it has any
              known tags in the .inp file. If not provided,
              the defaults are used.
      """
      # Remove the spaces and return character
      line = line.replace(" ", "")
      # Store the lower case equivalent of the line;
      # (faster instead of lower-casing every time?)
      line_low = line.lower()

      if "regex_tags" in kwargs:
          regex_tags = kwargs["regex_tags"]
          if not str_listortuple(regex_tags):
              raise ValueError(
                  "The tags need to be regexes like ['regex1', 'regex2', ...].")
      else:
          # Lines starting with any amount of space, followed
          # by an asterisk and the tag
          regex_tags = ["^\s*\*" + tag
                        for tag in inp_default_tags()]
      # Check if the line has any .inp tags
      if regex_comp(line_low, regex_tags):
          # If any tags are found, extract the type and name
          # Get a list from the comma-separated text
          fields = line.split(",")
          # In the following, use the fields_low to compare
          fields_low = line_low.split(",")
          # Create an iterator for the fields' indices
          ran_fields = range(len(fields))
          # The tag is the first keyword without the asterisk
          # Return lower case
          ttag = fields_low[0].lstrip("*")
          # Get the type by finding the first "type=" in the
          # list. If not found return empty string.
          # Return lower case
          subtype = next((fields_low[idxfld].partition("=")[2]
                          for idxfld in ran_fields
                          if "type=" in fields_low[idxfld]), "")
          # Get the name by finding the first "set=" or "name=" in
          # the list. If not found return empty string
          # Return original case (upper- or lowercase)
          name = next((fields[idxfld].partition("=")[2]
                       for idxfld in ran_fields
                       if "set=" in fields_low[idxfld]
                       or "name=" in fields_low[idxfld]), "")
          # Establish if this is an .inp elset
          iselset = next((True
                          for idxfld in ran_fields
                          if "set=" in fields_low[idxfld]), "")
      elif line_low[0] == "*":
          warnstr = "WARNING: This line looks like a tag," + \
              " but was not processed: " + line
          print(warnstr)
          # Set empty values
          ttag = ""
          subtype = ""
          name = ""
          iselset = False
      return ttag, subtype, name, line, iselset


  def feb_tag_types():
      return ["nodes", "elements", "nodeset", "elementset"]


  def csvline2lst(text):
      return text.rstrip().split(",")


  def csvlst2df(text_lst):
      """
      Requires
      --------
      csvline2lst
      from pandas import DataFrame as df
      """
      return DF.from_records([csvline2lst(VAL)
                              for VAL in text_lst])


  def csvlst2arr(text_lst):
      """
      Requires
      --------
      csvline2lst
      import numpy as np
      """
      return np.array([csvline2lst(VAL)
                       for VAL in text_lst])


  def inp_tags2sec(tags, fname):
      """.inp sections to Pandas dataframe"""
      # # Create container for the sections
      # # (Use arrays instead of lists to be able to have
      # # numpy.view to access the elements without creating new
      # # lists; save memory and processing time)
      # sections = np.array([SectionInp() for I in tags])
      # # The above is not needed:
      # >>> def test(arr):
      # ...:     arr[0] = None
      # ...:     return arr
      # >>> arr = [[1, 2, 3], [45]]
      # >>> [test(I) for I in arr]
      # [[None, 2, 3], [None]]
      # >>> arr
      # [[None, 2, 3], [None]]
      sections = [SectionInp() for I in tags]

      with open(fname, "r", newline=None) as inp_fd:
          # Iterate in-between tags to extract the values of each
          # section
          # TODO: consider pandas, numpy, h5f (my files are not
          # that big)
          #       https://stackoverflow.com/a/45796754
          #       https://stackoverflow.com/a/40469450

          # From the last tag to the end of the file (if there is
          # only one tag, it is the same as the last one--see
          # below)

          # If there is more than one tag
          if len(tags) > 1:
              # Iterate until the penultimate tag
              for idxtag, curtag in enumerate(tags[:-1]):
                  sec_ini = curtag[0]
                  sec_fin = tags[idxtag + 1][0] - 1
                  # -1 for 0 indexing
                  sec_len = sec_fin - sec_ini
                  if sec_len >= 0:
                      # Read one line (to move the file cursor)
                      # If the .inp file is well formed,
                      # sec_len >= 0 (always), but let's make sure
                      inp_fd.readline()
                  if sec_len > 0:
                      # Iterate between lines
                      # https://stackoverflow.com/a/36854340
                      sec_itr = itls.islice(inp_fd, 0, sec_len)
                      # Convert the lines into a DataFrame
                      contents = csvlst2df(sec_itr)
                      # Populate the sections container
                      ttag = curtag[1][0]
                      subtype = curtag[1][1]
                      name = curtag[1][2]
                      text = curtag[1][3]
                      iselset = curtag[1][4]
                      sections[idxtag].fill(curtag[0], ttag,
                                            subtype, contents,
                                            name, text, iselset)
          if len(tags) > 0:
              # The conditional above should not be necessary,
              # but tags[-1] will fail if someone does strange
              # things here
              curtag = tags[-1]

              # Whether it's the last or the only tag, do this:
              # Read one line (to move the file cursor)
              inp_fd.readline()
              # TODO Consider iitertools or map
              contents = csvlst2df(inp_fd)
              # Populate the sections container
              ttag = curtag[1][0]
              subtype = curtag[1][1]
              name = curtag[1][2]
              text = curtag[1][3]
              iselset = curtag[1][4]
              sections[-1].fill(curtag[0], ttag, subtype,
                                contents, name, text, iselset)
          else:
              raise ValueError(
                  "The length of the tags is lower than 1." +
                  " Check your inputs.")
          return sections


  def df_remove_emptystr(data):
      """Remove cells with empty strings in a pandas DataFrame
      Requires
      --------
      import numpy as np
      """
      # Replace every empty cell with NaN and remove any
      # column with NaN only
      # http://stackoverflow.com/questions/10857924/ddg#10859883
      return data.replace("", np.nan).dropna(axis=1,
                                             how="all")


  def inp_sec_test_elemelset(section):
      return section.ttag == "element" and section.iselset


  def inp_sec_test_tag(section, tag):
      return section.ttag == tag


  def inp_sec_test_elem(section):
      """TODO: replace by inp_sec_test_tag(section, tag)"""
      return inp_sec_test_tag(section, "element")


  def inp_seclst_elem_elsets_filt(sections):
      return filter(inp_sec_test_elem, sections)


  def inp_seclst_elem_elsets(sections):
      """Uses a filter to get those ~SectionInp~ in a list
      whose ~ttag~ is "element" and are marked as being sets
      through ~iselset~. It uses a filter to get the
      equivalent of
          for s in sections:
              if s.ttag == "element" and s.iselset:
                  s

      See also
      --------
      SectionInp (class)
      """
      test_fun = inp_sec_test_elemelset
      return [s for s in
              filter(test_fun, sections)]


  def inp_seclst_elem_elsets_inds(sections):
      """Uses a filter to get the element indices (first
      column in the original .inp file) those ~SectionInp~ in
      a list whose ~ttag~ is "element" and are marked as being
      sets through ~iselset~. It uses a filter to get the
      equivalent of
          for s in sections:
              if s.ttag == "element" and s.iselset:
                  s.contentss.iloc[:, 0]

      See also
      --------
      SectionInp (class)
      """
      test_fun = inp_seclst_elem_elsets_filt
      return [section.contents.iloc[:, 0]
              for section in test_fun(sections)]


  def inp_seclst_elem_elsets2nodes(elemset, elset,
                                   slow=False):
      """Returns the unique nodes of an .inp elemental node
      set, where ~elemset.contents~ is a pandas.DataFrame
      where the first column has the indices of the elements
      (one element per row), and the rest are tne node indices
      which make each element. In the following example,
      pandas prints the column number above the data and the
      row number to the left. The data is given from the
      second row and second column.
      >>> elemset.contents.head()
           0    1    2    3    4    5    6    7    8
      0  873    1   25  195   21  119  350  619  299
      1  874  119  350  619  299  120  351  620  300
      2  875  120  351  620  300  121  352  621  301
      3  876  121  352  621  301   11  118  377   93
      4  877   21  195  196   22  299  619  622  302
      >>> elem_inds = elemcont.iloc[:,0]
      >>> elem_inds.head().values
      array([873, 874, 875, 876, 877])
      """
      if slow:
          # Shorter variable
          elemcont = elemset.contents
          # Extract the indices of the element (first
          # column in the original .inp file)
          elem_inds = elemcont.iloc[:, 0]
          # All values of the list of elements (~elset~) in
          # a vector
          all_elem = elset.values.flat
          # Find those elements in ~elset~ which are in
          # ~elemset~
          elms_mask = elem_inds.isin(all_elem)
          # Extract the list of element indices and nodes
          subelmset = elemcont[elms_mask]
          # Get the nodes only (get rid of first column)
          subelmnodes = subelmset.iloc[:, 1:]
      else:
          # This code does the same as the block
          # above, but faster
          # elemcont = elemset.contents
          subelmnodes = \
              elemset.contents[  # elms_mask =
                  # elem_inds
                  elemset.contents.iloc[:, 0].isin(
                      # all_elem
                      elset.contents.values.flat)].iloc[:, 1:]
      return subelmnodes.values.flat


  def inp_seclst_elset_elements2nodes(sections, safer_code=True):
      """For all the sections whose tag is ~elset~, find the
      corresponding nodes in the sections whose ~ttag~ is
      "element" and are marked as being sets through ~iselset~.

      Explanation
      -----------
      >>> for I, section in enumerate(sections):
      >>>     print(I, section.ttag, section.name)
      0
      1 node
      2
      3 element Surface6
      4 element Surface21
      5 element Surface30
      6 element Surface42
      7 element Surface64
      8 element Surface86
      9 element Surface100
      10 element Surface104
      11 element Volume1
      12 element Volume2
      13 element Volume3
      14 element Volume4
      15 elset zFix
      16 elset yFix
      17 elset xFix
      18 elset interfaceRigid
      19 elset dbWide_vol
      >>> # Get the index of the first section whose name is
      >>> # "volume" (for example)
      >>> idxelms = next((I
      ...:                 for I in range(len(sections))
      ...:                 if "volume" in sections[I].name.lower()
      ...:                 and sections[I].ttag == "element"
      ...:                 and sections[I].iselset))
      >>> elms = sections[idxelms]
      >>> elms.ttag
      'element'
      >>> elms.iselms
      True
      >>> elms.name
      'Volume1'
      >>> # The first column has the indices of the elements
      >>> # The rest are tne node indices which make each element
      >>> elms.head()
           0    1    2    3    4    5    6    7    8
      0  873    1   25  195   21  119  350  619  299
      1  874  119  350  619  299  120  351  620  300
      2  875  120  351  620  300  121  352  621  301
      3  876  121  352  621  301   11  118  377   93
      4  877   21  195  196   22  299  619  622  302
      >>>
      >>> # Get the indices
      >>> elms_indxs = elms.contents.iloc[:,0]
      >>> elms_indxs.head().values
      array([873, 874, 875, 876, 877])
      >>> elms_indxs.tail().values
      array([1068, 1069, 1070, 1071, 1072])
      >>>
      >>>
      >>> # Use last elset (I know it has values from "Volume1")
      >>> elset = sections[-1]
      >>> elset.name
      'dbWide_vol'
      >>> con = elset.contents
      >>> # These are the indices of elements which belong to
      >>> # ~dbWide_vol~
      >>> con.head()
           0    1    2    3    4    5    6    7    8    9
      0  873  874  875  876  877  878  879  880  881  882
      1  883  884  885  886  887  888  889  890  891  892
      2  893  894  895  896  897  898  899  900  901  902
      3  903  904  905  906  907  908  909  910  911  912
      4  913  914  915  916  917  918  919  920  921  922


      >>> # Check if a list of values is in a DataFrame
      >>> # (with Numpy)
      >>> list(con.iloc[:2].values.flat)
      [873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883,
      884, 885, 886, 887, 888, 889, 890, 891, 892]
      >>> # Use test instead, to make it more evident
      >>> test = [873, 900, 901, 902, 903, 904, 999]
      >>> res = np.isin(set_indxs.values, test)
      >>> # Node numbers of the sought elements
      >>> # Compare this result with elms.head()
      >>> elemset.contents.iloc[res, 1:]
             1    2    3    4    5    6    7    8
      0      1   25  195   21  119  350  619  299
      27   621  633  636  624  377  381  382  378
      28   196  200  201  197  622  634  637  625
      29   622  634  637  625  623  635  638  626
      30   623  635  638  626  624  636  639  627
      31   624  636  639  627  378  382  383  379
      126  680  692  695  683  681  693  696  684
      >>>
      >>> # Check if a list of values is in a DataFrame
      >>> # (with Pandas)
      >>> # Node numbers of the sought elements
      >>> # Compare this result with elms.head()
      >>> elemset.contents[set_indxs.isin(test)].iloc[:, 1:]
             1    2    3    4    5    6    7    8
      0      1   25  195   21  119  350  619  299
      27   621  633  636  624  377  381  382  378
      28   196  200  201  197  622  634  637  625
      29   622  634  637  625  623  635  638  626
      30   623  635  638  626  624  636  639  627
      31   624  636  639  627  378  382  383  379
      126  680  692  695  683  681  693  696  684
      >>> # This would give the whole result
      >>> elemset.contents[set_indxs.isin(con.values.flat)].iloc[:, 1:]
      """
      # Slower is here to make the code understandable. The
      # code produces the same for each level, but slower
      # means more maintainable

      # Work with the sections whose tag is "elset"
      # (a set of elements)

      # The iterator can be created as
      # 1.
      #     def check_elset(section):
      #         return section.ttag == "elset"
      #     elset_itr = list(filter(check_elset, sections))
      # or 2.
      #     elset_itr = [s for s in sections if s.ttag == "elset"]
      if safer_code:
          # Slower calculation, clearer and safer code

          get_nodes_func = inp_seclst_elem_elsets2nodes
          # Iterator for sections whose tag is "element" and
          # which are sets (reused inside the loop)
          elemset_itr = list(
              inp_seclst_elem_elsets_filt(sections))
          # This line is the equivalent as the 2 comments above,
          # but it creates an iterator instead of a list (faster)
          for elset in filter(
                  lambda s:
                      s.ttag == "elset",
                  sections):
              subelmnodes = [get_nodes_func(elemset, elset)
                             for elemset in elemset_itr]
              # Join all the sub-element nodes for the final
              # nodal set for each elset
              # TODO: is the unique really necessary?
              if len(subelmnodes) > 0:
                  elset.nodes = np.unique(np.hstack(subelmnodes))
      # else:
      #     # Same result as above
      #     # Faster calculation for larger data ??
      #     # testnodes = np.unique(
      #     nodes = [np.unique(np.hstack(
      #         [
      #             get_nodes_func(elemset, slow=0)
      #             for elemset in filter(
      #                 lambda s:
      #                     s.ttag == "element"
      #                     and s.iselset,
      #                     sections)]))
      #         for elset in filter(
      #             lambda s:
      #                 s.ttag == "elset",
      #             sections)
      #         ]

  def inp_tags(inpfname):
      # On input, if newline is None, universal newlines mode is
      # enabled
      with open(inpfname, "r", newline=None) as inp_fd:
          # Read all lines, take away the newline character
          #   Add 1 to the end of ~open()~ to read in buffered mode
          #   (I ghess it's the same for enumerate):
          #   https://stackoverflow.com/a/620494
          tags_pos = ((I, line.rstrip("\n"))
                      for I, line in enumerate(inp_fd, 1)
                      if line[0] == "*")
          tags = [(tag[0], line2inp_tag(tag[1])) for tag in tags_pos]

      return  tags


  class SectionInp:
      def __init__(self):
          """
          tagtext -- str() the whole tag, unprocessed, e.g.
              '*ELEMENT,type=C3D8,ELSET=Volume1'
          pos -- float() line number of the tag in the
              original file
          subtype -- str() type of tag, e.g. "c3d8"
          name -- str() name, if provided
          ttag -- str() type of tag, e.g.'element'
          contents -- pandas.DataFrame() contents of the section
                  1, 2,   3, NaN
                  4, 5, NaN, NaN
                  6, 7,   8,   9
          """
          # Initialize with empty values
          # No type declaration in Python
          self.tagtext = ""
          self.pos = int()
          self.name = ""
          self.subtype = ""
          self.ttag = ""
          self.nodes = np.array([])
          # I use Pandas DataFrame and not Numpy arrays,
          # because a section in the .inp file could very well
          # look like this:
          #     1, 2, 3,
          #     4, 5,
          #     6, 7, 8, 9,
          # and reading this as a numpy array would take extra
          # procesing. By using DataFrame, I get this:
          #     1, 2,   3, NaN, NaN
          #     4, 5, NaN, NaN, NaN
          #     6, 7,   8,   9, NaN
          self.contents = DF([[0]])
          self.iselset = False

      def fill(self, pos, ttag, subtype, contents, name="",
               tagtext="", iselset=False, nodes=np.array([])):
          """Populates a SectionInp. Turns empty strings into
          NaN from ~contents~, and removes columns whose only
          values are NaN.
          """
          self.tagtext = tagtext
          self.pos = pos
          self.name = name
          self.subtype = subtype
          self.ttag = ttag
          # Remove empty strings (from ending lines with
          # comma) and assign to contents
          contents = df_remove_emptystr(contents)
          # Convert the data into numbers, and replace any
          # error to NaN
          self.contents = contents.apply(pd.to_numeric,
                                         errors="coerce")
          self.iselset = iselset
          self.nodes = nodes