diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index be60ae2..c043652 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -21,17 +21,17 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
- python-version: [3.7, 3.8]
+ python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Cache pip
- uses: actions/cache@v2
+ uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('requirements-dev.txt') }}
@@ -41,7 +41,7 @@ jobs:
- name: Install dependencies
run: |
if [ "$RUNNER_OS" == "Linux" ]; then
- sudo apt-get -y install libncurses5-dev libncursesw5-dev libncurses5
+ sudo apt-get update && sudo apt-get -y install libncurses-dev
fi
python -m pip install --upgrade pip
pip install -r requirements-dev.txt
@@ -73,7 +73,7 @@ jobs:
uses: docker/login-action@v2
with:
registry: ghcr.io
- username: ${{ secrets.GHCR_USER }}
+ username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
@@ -86,7 +86,7 @@ jobs:
uses: docker/build-push-action@v4.0.0
with:
context: .
- push: true
+ push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
diff --git a/Dockerfile b/Dockerfile
index 93ba14b..7571e98 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,9 +6,7 @@ LABEL org.opencontainers.image.source=https://github.com/RuleWorld/PyBioNetGen
LABEL org.opencontainers.image.description="PyBNG container"
LABEL org.opencontainers.image.licenses=MIT
RUN apt-get update && apt-get install -y \
- libncurses5-dev \
- libncursesw5-dev \
- libncurses5
+ libncurses-dev
WORKDIR /src
COPY . /src
RUN pip install --no-cache-dir -r requirements.txt
diff --git a/Issues/parameter_init/parameter_init.py b/Issues/parameter_init/parameter_init.py
index 253062f..670b331 100644
--- a/Issues/parameter_init/parameter_init.py
+++ b/Issues/parameter_init/parameter_init.py
@@ -1,4 +1,4 @@
-import bionetgen
+import bionetgen
parameter = bionetgen.modelapi.structs.Parameter("A0", "10")
-print(parameter.gen_string())
\ No newline at end of file
+print(parameter.gen_string())
diff --git a/Issues/rule_keywords/run_pybng.py b/Issues/rule_keywords/run_pybng.py
index e9d7ec8..03a61fa 100644
--- a/Issues/rule_keywords/run_pybng.py
+++ b/Issues/rule_keywords/run_pybng.py
@@ -1,5 +1,5 @@
import bionetgen
-mname="test_deleteMolecules"
-model= bionetgen.bngmodel(mname+".bngl")
+mname = "test_deleteMolecules"
+model = bionetgen.bngmodel(mname + ".bngl")
print(model)
diff --git a/bionetgen/__init__.py b/bionetgen/__init__.py
index 579538a..c826ee7 100644
--- a/bionetgen/__init__.py
+++ b/bionetgen/__init__.py
@@ -2,3 +2,23 @@
from .modelapi import bngmodel
from .modelapi.runner import run
from .simulator import sim_getter
+
+# sympy is an expensive dependency to import. We delay importing the
+# SympyOdes helpers until they are actually accessed.
+
+__all__ = [
+ "defaults",
+ "bngmodel",
+ "run",
+ "sim_getter",
+ "SympyOdes",
+ "export_sympy_odes",
+]
+
+
+def __getattr__(name):
+ if name in {"SympyOdes", "export_sympy_odes"}:
+ from .modelapi.sympy_odes import SympyOdes, export_sympy_odes
+
+ return locals()[name]
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/bionetgen/atomizer/atomizeTool.py b/bionetgen/atomizer/atomizeTool.py
index 1f53182..d8b7d51 100644
--- a/bionetgen/atomizer/atomizeTool.py
+++ b/bionetgen/atomizer/atomizeTool.py
@@ -4,7 +4,6 @@
from bionetgen.core.utils.logging import BNGLogger, log_level
-
d = BNGDefaults()
diff --git a/bionetgen/atomizer/atomizer/analyzeSBML.py b/bionetgen/atomizer/atomizer/analyzeSBML.py
index 60dc263..928d3bc 100644
--- a/bionetgen/atomizer/atomizer/analyzeSBML.py
+++ b/bionetgen/atomizer/atomizer/analyzeSBML.py
@@ -6,7 +6,6 @@
"""
import enum
-import imp
from pyparsing import Word, Suppress, Optional, alphanums, Group, ZeroOrMore
import numpy as np
import json
@@ -820,9 +819,9 @@ def loadConfigFiles(self, fileName):
# deal with modifications
if "modificationDefinition" in reactionDefinition_new:
# TODO: Change file format to be nicer?
- reactionDefinition[
- "modificationDefinition"
- ] = reactionDefinition_new["modificationDefinition"]
+ reactionDefinition["modificationDefinition"] = (
+ reactionDefinition_new["modificationDefinition"]
+ )
# convert new JSON format to old data format
else:
reactionDefinition["modificationDefinition"] = {}
diff --git a/bionetgen/atomizer/atomizer/atomizationAux.py b/bionetgen/atomizer/atomizer/atomizationAux.py
index 1b81410..e5d373a 100644
--- a/bionetgen/atomizer/atomizer/atomizationAux.py
+++ b/bionetgen/atomizer/atomizer/atomizationAux.py
@@ -3,7 +3,6 @@
class CycleError(Exception):
-
"""Exception raised for errors in the input.
Attributes:
diff --git a/bionetgen/atomizer/atomizer/detectOntology.py b/bionetgen/atomizer/atomizer/detectOntology.py
index 2626b94..d4f6cbb 100644
--- a/bionetgen/atomizer/atomizer/detectOntology.py
+++ b/bionetgen/atomizer/atomizer/detectOntology.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
import pprint
import difflib
from collections import Counter
diff --git a/bionetgen/atomizer/atomizer/resolveSCT.py b/bionetgen/atomizer/atomizer/resolveSCT.py
index b722330..d1a5f36 100644
--- a/bionetgen/atomizer/atomizer/resolveSCT.py
+++ b/bionetgen/atomizer/atomizer/resolveSCT.py
@@ -113,9 +113,9 @@ def createSpeciesCompositionGraph(
# lexicalDependencyGraph[element], oldDependency))
"""
if self.database.dependencyGraph[element] != []:
- self.database.alternativeDependencyGraph[
- element
- ] = lexicalDependencyGraph[element]
+ self.database.alternativeDependencyGraph[element] = (
+ lexicalDependencyGraph[element]
+ )
else:
logMess(
"INFO:LAE009",
@@ -1464,9 +1464,9 @@ def selectBestCandidate(
tmpCandidates = namingTmpCandidates
if loginformation:
- self.database.alternativeDependencyGraph[
- reactant
- ] = tmpCandidates
+ self.database.alternativeDependencyGraph[reactant] = (
+ tmpCandidates
+ )
elif all(
sorted(x) == sorted(originalTmpCandidates[0])
for x in originalTmpCandidates
@@ -1568,9 +1568,9 @@ def selectBestCandidate(
namingTmpCandidates = tmpCandidates
else:
- self.database.alternativeDependencyGraph[
- reactant
- ] = namingtmpCandidates
+ self.database.alternativeDependencyGraph[reactant] = (
+ namingtmpCandidates
+ )
logMess(
"WARNING:SCT111",
"{0}:stoichiometry analysis:{1}:conflicts with and naming conventions:{2}:Selecting lexical analysis".format(
diff --git a/bionetgen/atomizer/contactMap.py b/bionetgen/atomizer/contactMap.py
index b41964f..a3b5f9b 100644
--- a/bionetgen/atomizer/contactMap.py
+++ b/bionetgen/atomizer/contactMap.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
# import sys
# sys.path.insert(0, '../utils/')
import utils.consoleCommands as console
diff --git a/bionetgen/atomizer/libsbml2bngl.py b/bionetgen/atomizer/libsbml2bngl.py
index 14d47f1..a65a61d 100644
--- a/bionetgen/atomizer/libsbml2bngl.py
+++ b/bionetgen/atomizer/libsbml2bngl.py
@@ -7,7 +7,6 @@
#!/usr/bin/env python
from collections import OrderedDict
-from telnetlib import IP
import time
import libsbml
import bionetgen.atomizer.writer.bnglWriter as writer
@@ -438,9 +437,9 @@ def extractCompartmentStatistics(
for element in compartmentPairs:
if element[0][0] not in finalCompartmentPairs:
finalCompartmentPairs[element[0][0]] = {}
- finalCompartmentPairs[element[0][0]][
- tuple([element[0][1], element[1][1]])
- ] = compartmentPairs[element]
+ finalCompartmentPairs[element[0][0]][tuple([element[0][1], element[1][1]])] = (
+ compartmentPairs[element]
+ )
return finalCompartmentPairs
@@ -1457,16 +1456,16 @@ def analyzeHelper(
param = ["__epsilon__ 1e-100"] + param
if atomize:
- commentDictionary[
- "notes"
- ] = "'This is an atomized translation of an SBML model created on {0}.".format(
- time.strftime("%d/%m/%Y")
+ commentDictionary["notes"] = (
+ "'This is an atomized translation of an SBML model created on {0}.".format(
+ time.strftime("%d/%m/%Y")
+ )
)
else:
- commentDictionary[
- "notes"
- ] = "'This is a plain translation of an SBML model created on {0}.".format(
- time.strftime("%d/%m/%Y")
+ commentDictionary["notes"] = (
+ "'This is a plain translation of an SBML model created on {0}.".format(
+ time.strftime("%d/%m/%Y")
+ )
)
commentDictionary[
"notes"
@@ -1652,7 +1651,7 @@ def main():
metavar="FILE",
)
- (options, _) = parser.parse_args()
+ options, _ = parser.parse_args()
# 144
rdfArray = []
# classificationArray = []
diff --git a/bionetgen/atomizer/rulifier/componentGroups.py b/bionetgen/atomizer/rulifier/componentGroups.py
index 982e521..f3152ba 100644
--- a/bionetgen/atomizer/rulifier/componentGroups.py
+++ b/bionetgen/atomizer/rulifier/componentGroups.py
@@ -681,9 +681,9 @@ def getContextRequirements(
requirementDependencies[molecule][
"doubleActivation"
].append(relationship)
- processNodes[molecule]["doubleActivation"][
- relationship
- ] = "{0}_{1}".format(molecule, "_".join(label))
+ processNodes[molecule]["doubleActivation"][relationship] = (
+ "{0}_{1}".format(molecule, "_".join(label))
+ )
elif not combination[0] and combination[1]:
if motif in ["ordering"]:
requirementDependencies[molecule][motif].remove(
@@ -700,14 +700,14 @@ def getContextRequirements(
requirementDependencies[molecule]["reprordering"].append(
relationship
)
- processNodes[molecule]["reprordering"][
- relationship
- ] = "{0}_{1}".format(molecule, "_".join(label))
+ processNodes[molecule]["reprordering"][relationship] = (
+ "{0}_{1}".format(molecule, "_".join(label))
+ )
elif not combination[0] and not combination[1]:
- processNodes[molecule]["doubleRepression"][
- relationship
- ] = "{0}_{1}".format(molecule, "_".join(label))
+ processNodes[molecule]["doubleRepression"][relationship] = (
+ "{0}_{1}".format(molecule, "_".join(label))
+ )
if motif == "repression":
requirementDependencies[molecule][motif].remove(
relationship
diff --git a/bionetgen/atomizer/rulifier/postAnalysis.py b/bionetgen/atomizer/rulifier/postAnalysis.py
index 3756a16..c670837 100644
--- a/bionetgen/atomizer/rulifier/postAnalysis.py
+++ b/bionetgen/atomizer/rulifier/postAnalysis.py
@@ -128,11 +128,11 @@ def getParticipatingReactions(self, molecule, componentPair, reactionDictionary)
for x in reactionDictionary[moleculeName][component]
if x in componentPair
]:
- correlationList[
- (component[0], componentComplement)
- ] = reactionDictionary[moleculeName][component][
- componentComplement
- ]
+ correlationList[(component[0], componentComplement)] = (
+ reactionDictionary[moleculeName][component][
+ componentComplement
+ ]
+ )
return correlationList
def getPairsFromMotif(self, motif1, motif2, excludedComponents):
@@ -146,10 +146,10 @@ def getPairsFromMotif(self, motif1, motif2, excludedComponents):
if len(self.motifMoleculeDict[element][molecule]) > 0:
for componentPair in self.motifMoleculeDict[element][molecule]:
if not any(x in excludedComponents for x in componentPair):
- correlationList[
- componentPair
- ] = self.getParticipatingReactions(
- molecule, componentPair, self.patternXreactions
+ correlationList[componentPair] = (
+ self.getParticipatingReactions(
+ molecule, componentPair, self.patternXreactions
+ )
)
moleculeCorrelationList[molecule].update(correlationList)
return dict(moleculeCorrelationList)
@@ -283,10 +283,13 @@ def getClassification(keys, translator):
localAnalysisFlag = True
if not any(
[
- molecule
- in database.prunnedDependencyGraph[x][0]
- if len(database.prunnedDependencyGraph[x]) > 0
- else molecule in x
+ (
+ molecule
+ in database.prunnedDependencyGraph[x][0]
+ if len(database.prunnedDependencyGraph[x])
+ > 0
+ else molecule in x
+ )
for x in difference
]
):
@@ -372,9 +375,9 @@ def getContextMotifInformation(self):
"nullrequirement",
"exclusion",
]:
- motifDictionary[
- frozenset([requirementClass, requirementClass])
- ] = self.getPairsFromMotif(requirementClass, requirementClass, [])
+ motifDictionary[frozenset([requirementClass, requirementClass])] = (
+ self.getPairsFromMotif(requirementClass, requirementClass, [])
+ )
return motifDictionary
def getComplexReactions(self, threshold=2):
@@ -548,10 +551,10 @@ def runTests():
"nullrequirement",
"exclusion",
]:
- motifDictionary[
- (requirementClass, requirementClass)
- ] = modelLearning.getPairsFromMotif(
- requirementClass, requirementClass, ["imod"]
+ motifDictionary[(requirementClass, requirementClass)] = (
+ modelLearning.getPairsFromMotif(
+ requirementClass, requirementClass, ["imod"]
+ )
)
if len(motifDictionary[(requirementClass, requirementClass)]) > 0:
print(
diff --git a/bionetgen/atomizer/rulifier/stateTransitionDiagram.py b/bionetgen/atomizer/rulifier/stateTransitionDiagram.py
index 5468177..3050192 100644
--- a/bionetgen/atomizer/rulifier/stateTransitionDiagram.py
+++ b/bionetgen/atomizer/rulifier/stateTransitionDiagram.py
@@ -138,9 +138,9 @@ def isActive(state):
for species in centerUnit:
for element in species.split("."):
if element.split("(")[0].split("%")[0] not in sourceCounter:
- sourceCounter[
- element.split("(")[0].split("%")[0]
- ] = Counter()
+ sourceCounter[element.split("(")[0].split("%")[0]] = (
+ Counter()
+ )
for component in moleculeDict[
element.split("(")[0].split("%")[0]
]:
@@ -158,9 +158,9 @@ def isActive(state):
for species in centerUnit:
for element in species.split("."):
if element.split("(")[0].split("%")[0] not in sourceCounter:
- sourceCounter[
- element.split("(")[0].split("%")[0]
- ] = Counter()
+ sourceCounter[element.split("(")[0].split("%")[0]] = (
+ Counter()
+ )
for component in moleculeDict[
element.split("(")[0].split("%")[0]
]:
@@ -179,9 +179,9 @@ def isActive(state):
for species in productUnit:
for element in species.split("."):
if element.split("(")[0].split("%")[0] not in destinationCounter:
- destinationCounter[
- element.split("(")[0].split("%")[0]
- ] = Counter()
+ destinationCounter[element.split("(")[0].split("%")[0]] = (
+ Counter()
+ )
for component in moleculeDict[
element.split("(")[0].split("%")[0]
]:
diff --git a/bionetgen/atomizer/rulifier/stdgraph.py b/bionetgen/atomizer/rulifier/stdgraph.py
index 91215a8..08e55ce 100644
--- a/bionetgen/atomizer/rulifier/stdgraph.py
+++ b/bionetgen/atomizer/rulifier/stdgraph.py
@@ -130,13 +130,13 @@ def createBitNode(graph, molecule, nodeList, simplifiedText):
if simplifiedText:
nodeName += "o"
else:
- nodeName += "\u25CF "
+ nodeName += "\u25cf "
nodeId.append(bit[0])
else:
if simplifiedText:
nodeName += "x"
else:
- nodeName += "\u25CB "
+ nodeName += "\u25cb "
# nodeName += u"\u00B7 "
if (idx + 1) % gridDict[len(node)] == 0 and idx + 1 != len(node):
nodeName.strip(" ")
diff --git a/bionetgen/atomizer/sbml2bngl.py b/bionetgen/atomizer/sbml2bngl.py
index e3fb81b..4ffd11a 100755
--- a/bionetgen/atomizer/sbml2bngl.py
+++ b/bionetgen/atomizer/sbml2bngl.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
from copy import deepcopy, copy
from bionetgen.atomizer.writer import bnglWriter as writer
@@ -12,7 +13,12 @@
from collections import Counter
from collections import defaultdict
import math as pymath
-from bionetgen.atomizer.utils.util import logMess, TranslationException
+from bionetgen.atomizer.utils.util import (
+ logMess,
+ TranslationException,
+ get_size,
+ get_item,
+)
import libsbml
from bionetgen.atomizer.bngModel import bngModel
@@ -220,7 +226,7 @@ def extractModelAnnotation(self):
annotation = self.model.getAnnotation()
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotation, lista)
- for idx in range(lista.getSize()):
+ for idx in range(get_size(lista)):
# biol,qual = lista.get(idx).getBiologicalQualifierType(), lista.get(idx).getModelQualifierType()
qualifierType = lista.get(idx).getQualifierType()
qualifierDescription = (
@@ -230,8 +236,8 @@ def extractModelAnnotation(self):
)
if qualifierDescription not in metaInformation:
metaInformation[qualifierDescription] = set([])
- for idx2 in range(0, lista.get(idx).getResources().getLength()):
- resource = lista.get(idx).getResources().getValue(idx2)
+ for idx2 in range(0, get_size(get_item(lista, idx).getResources())):
+ resource = get_item(lista, idx).getResources().getValue(idx2)
metaInformation[qualifierDescription].add(resource)
return metaInformation
@@ -338,7 +344,7 @@ def getRawSpecies(self, species, parameters=[], logEntries=True):
# by compartment
if logEntries and standardizedName != "0":
if standardizedName in self.speciesMemory:
- if len(list(self.model.getListOfCompartments())) == 1:
+ if get_size(self.model.getListOfCompartments()) == 1:
standardizedName += "_" + species.getId()
else:
# we can differentiate by compartment tag, no need to attach it to the name
@@ -649,8 +655,9 @@ def find_all_symbols(self, math, reactionID):
)
l = math.getListOfNodes()
replace_dict = {}
- for inode in range(l.getSize()):
- node = l.get(inode)
+ size = get_size(l)
+ for inode in range(size):
+ node = get_item(l, inode)
# Sympy doesn't like "def" in our string
name = node.getName()
if name == "def":
@@ -1278,9 +1285,9 @@ def __getRawRules(
for compartment in (self.model.getListOfCompartments()):
if compartment.getId() not in compartmentList:
if len(rReactant) != 2:
- rateL = '{0} * {1}'.format(rateL,compartment.getSize())
+ rateL = '{0} * {1}'.format(rateL, get_size(compartment))
if len(rProduct) != 2:
- rateR = '{0} * {1}'.format(rateR,compartment.getSize())
+ rateR = '{0} * {1}'.format(rateR,get_size(compartment))
"""
return {
"reactants": reactant,
@@ -1447,7 +1454,7 @@ def reduceComponentSymmetryFactors(self, reaction, translator, functions):
for molecule in translator[element[0]].molecules:
for component in molecule.components:
molecule.sort()
- componentList = Counter([(molecule.signature(freactionCenter))])
+ componentList = Counter([molecule.signature(freactionCenter)])
for _ in range(0, int(element[1])):
rcomponent[
(
@@ -1463,7 +1470,7 @@ def reduceComponentSymmetryFactors(self, reaction, translator, functions):
for molecule in translator[element[0]].molecules:
molecule.sort()
for component in molecule.components:
- componentList = Counter([(molecule.signature(breactionCenter))])
+ componentList = Counter([molecule.signature(breactionCenter)])
for _ in range(0, int(element[1])):
pcomponent[
(
@@ -1606,7 +1613,7 @@ def __getRawCompartments(self, compartment):
"""
idid = compartment.getId()
name = compartment.getName()
- size = compartment.getSize()
+ size = get_size(compartment)
# volume messes up the reactions
# size = 1.0
dimensions = compartment.getSpatialDimensions()
@@ -1959,9 +1966,9 @@ def getReactions(
)
fobj_2.local_dict = currParamConv
self.bngModel.add_function(fobj_2)
- self.reactionDictionary[
- rawRules["reactionID"]
- ] = "({0} - {1})".format(functionName, functionName2)
+ self.reactionDictionary[rawRules["reactionID"]] = (
+ "({0} - {1})".format(functionName, functionName2)
+ )
finalRateStr = "{0},{1}".format(functionName, functionName2)
rule_obj.rate_cts = (functionName, functionName2)
else:
@@ -2039,9 +2046,9 @@ def getReactions(
% functionName,
)
defn = self.bngModel.functions[rule_obj.rate_cts[0]].definition
- self.bngModel.functions[
- rule_obj.rate_cts[0]
- ].definition = f"({defn})/({rule_obj.symm_factors[0]})"
+ self.bngModel.functions[rule_obj.rate_cts[0]].definition = (
+ f"({defn})/({rule_obj.symm_factors[0]})"
+ )
if rule_obj.reversible:
logMess(
"ERROR:SIM205",
@@ -2602,14 +2609,14 @@ def getAssignmentRules(
if matches:
if matches[0]["isBoundary"]:
- artificialObservables[
- rawArule[0] + "_ar"
- ] = writer.bnglFunction(
- rawArule[1][0],
- rawArule[0] + "_ar()",
- [],
- compartments=compartmentList,
- reactionDict=self.reactionDictionary,
+ artificialObservables[rawArule[0] + "_ar"] = (
+ writer.bnglFunction(
+ rawArule[1][0],
+ rawArule[0] + "_ar()",
+ [],
+ compartments=compartmentList,
+ reactionDict=self.reactionDictionary,
+ )
)
self.arule_map[rawArule[0]] = rawArule[0] + "_ar"
if rawArule[0] in observablesDict:
@@ -2623,28 +2630,28 @@ def getAssignmentRules(
rawArule[0]
),
)
- artificialObservables[
- rawArule[0] + "_ar"
- ] = writer.bnglFunction(
- rawArule[1][0],
- rawArule[0] + "_ar()",
- [],
- compartments=compartmentList,
- reactionDict=self.reactionDictionary,
+ artificialObservables[rawArule[0] + "_ar"] = (
+ writer.bnglFunction(
+ rawArule[1][0],
+ rawArule[0] + "_ar()",
+ [],
+ compartments=compartmentList,
+ reactionDict=self.reactionDictionary,
+ )
)
self.arule_map[rawArule[0]] = rawArule[0] + "_ar"
if rawArule[0] in observablesDict:
observablesDict[rawArule[0]] = rawArule[0] + "_ar"
continue
elif rawArule[0] in [observablesDict[x] for x in observablesDict]:
- artificialObservables[
- rawArule[0] + "_ar"
- ] = writer.bnglFunction(
- rawArule[1][0],
- rawArule[0] + "_ar()",
- [],
- compartments=compartmentList,
- reactionDict=self.reactionDictionary,
+ artificialObservables[rawArule[0] + "_ar"] = (
+ writer.bnglFunction(
+ rawArule[1][0],
+ rawArule[0] + "_ar()",
+ [],
+ compartments=compartmentList,
+ reactionDict=self.reactionDictionary,
+ )
)
self.arule_map[rawArule[0]] = rawArule[0] + "_ar"
if rawArule[0] in observablesDict:
@@ -2700,14 +2707,14 @@ def getAssignmentRules(
assigObsFlag = False
for idx in candidates:
# if re.search('\s{0}\s'.format(rawArule[0]),observables[idx]):
- artificialObservables[
- rawArule[0] + "_ar"
- ] = writer.bnglFunction(
- rawArule[1][0],
- rawArule[0] + "_ar()",
- [],
- compartments=compartmentList,
- reactionDict=self.reactionDictionary,
+ artificialObservables[rawArule[0] + "_ar"] = (
+ writer.bnglFunction(
+ rawArule[1][0],
+ rawArule[0] + "_ar()",
+ [],
+ compartments=compartmentList,
+ reactionDict=self.reactionDictionary,
+ )
)
self.arule_map[rawArule[0]] = rawArule[0] + "_ar"
assigObsFlag = True
@@ -2856,7 +2863,7 @@ def check_noCompartment(self, parameters=[]):
self.bngModel.noCompartment = True
return
for compartment in self.model.getListOfCompartments():
- self.compartmentDict[compartment.getId()] = compartment.getSize()
+ self.compartmentDict[compartment.getId()] = get_size(compartment)
self.noCompartment = False
self.bngModel.noCompartment = False
# Get all rawSpecies
@@ -2930,7 +2937,7 @@ def default_to_regular(d):
speciesAnnotationInfo = default_to_regular(self.getFullAnnotation())
annotationInfo = {"moleculeTypes": {}, "species": {}}
for compartment in self.model.getListOfCompartments():
- compartmentDict[compartment.getId()] = compartment.getSize()
+ compartmentDict[compartment.getId()] = get_size(compartment)
unitFlag = True
for species in self.model.getListOfSpecies():
# making molecule and seed species objs for
@@ -2956,7 +2963,7 @@ def default_to_regular(d):
if rawSpecies["returnID"] in rawSpeciesName:
rawSpeciesName.remove(rawSpecies["returnID"])
if (
- translator[rawSpecies["returnID"]].getSize() == 1
+ get_size(translator[rawSpecies["returnID"]]) == 1
and translator[rawSpecies["returnID"]].molecules[0].name
not in names
and translator[rawSpecies["returnID"]].molecules[0].name
@@ -3005,9 +3012,9 @@ def default_to_regular(d):
moleculesText.append(mtext)
if rawSpecies["returnID"] in speciesAnnotationInfo:
- annotationInfo["moleculeTypes"][
- rawSpecies["returnID"]
- ] = speciesAnnotationInfo[rawSpecies["returnID"]]
+ annotationInfo["moleculeTypes"][rawSpecies["returnID"]] = (
+ speciesAnnotationInfo[rawSpecies["returnID"]]
+ )
del speciesAnnotationInfo[rawSpecies["returnID"]]
# if rawSpecies['identifier'] == 'glx' and len(translator) > 0:
@@ -3071,9 +3078,9 @@ def default_to_regular(d):
if self.noCompartment:
compartmentSize = 1.0
else:
- compartmentSize = self.model.getCompartment(
- rawSpecies["compartment"]
- ).getSize()
+ compartmentSize = get_size(
+ self.model.getCompartment(rawSpecies["compartment"])
+ )
newParameter = compartmentSize * newParameter
# temp testing AS
spec_obj.val = newParameter
@@ -3246,7 +3253,7 @@ def default_to_regular(d):
sorted(rawSpeciesName, key=len)
for species in rawSpeciesName:
if (
- translator[species].getSize() == 1
+ get_size(translator[species]) == 1
and translator[species].molecules[0].name not in names
):
names.append(translator[species].molecules[0].name)
@@ -3386,10 +3393,10 @@ def getSpeciesAnnotation(self):
annotationXML = species.getAnnotation()
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotationXML, lista)
- if lista.getSize() == 0:
+ if get_size(lista) == 0:
self.speciesAnnotation[rawSpecies["returnID"]] = []
else:
- for idx in range(lista.getSize()):
+ for idx in range(get_size(lista)):
self.speciesAnnotation[rawSpecies["returnID"]].append(
lista.get(idx).getResources()
)
@@ -3406,11 +3413,11 @@ def getFullAnnotation(self):
annotationXML = species.getAnnotation()
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotationXML, lista)
- if lista.getSize() == 0:
+ if get_size(lista) == 0:
continue
else:
- for idx in range(lista.getSize()):
- for idx2 in range(0, lista.get(idx).getResources().getLength()):
+ for idx in range(get_size(lista)):
+ for idx2 in range(0, get_size(get_item(lista, idx).getResources())):
resource = lista.get(idx).getResources().getValue(idx2)
qualifierType = lista.get(idx).getQualifierType()
qualifierDescription = (
@@ -3429,7 +3436,7 @@ def getModelAnnotation(self):
annotationXML = self.model.getAnnotation()
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotationXML, lista)
- if lista.getSize() == 0:
+ if get_size(lista) == 0:
modelAnnotations = []
else:
tempDict = {}
diff --git a/bionetgen/atomizer/sbml2json.py b/bionetgen/atomizer/sbml2json.py
index 4118f6e..30d34fc 100644
--- a/bionetgen/atomizer/sbml2json.py
+++ b/bionetgen/atomizer/sbml2json.py
@@ -8,6 +8,7 @@
import libsbml
import json
from optparse import OptionParser
+from .utils.util import get_size
def factorial(x):
@@ -114,7 +115,7 @@ def __getRawCompartments(self):
compartmentList = {}
for compartment in self.model.getListOfCompartments():
name = compartment.getId()
- size = compartment.getSize()
+ size = get_size(compartment)
outside = compartment.getOutside()
dimensions = compartment.getSpatialDimensions()
compartmentList[name] = [dimensions, size, outside]
@@ -404,7 +405,7 @@ def main():
help="the output JSON file. Default = .py",
metavar="FILE",
)
- (options, args) = parser.parse_args()
+ options, args = parser.parse_args()
reader = libsbml.SBMLReader()
nameStr = options.input
if options.output == None:
diff --git a/bionetgen/atomizer/utils/annotationDeletion.py b/bionetgen/atomizer/utils/annotationDeletion.py
index 1861a85..2242a86 100644
--- a/bionetgen/atomizer/utils/annotationDeletion.py
+++ b/bionetgen/atomizer/utils/annotationDeletion.py
@@ -8,7 +8,7 @@
import progressbar
import libsbml
-from util import logMess
+from util import logMess, get_size, get_item
from sbml2bngl import SBML2BNGL as SBML2BNGL
import structures
import atomizer.moleculeCreation as mc
@@ -125,15 +125,15 @@ def parseAnnotation(annotation):
speciesAnnotationDict = defaultdict(list)
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotation, lista)
- for idx in range(0, lista.getSize()):
- for idx2 in range(0, lista.get(idx).getResources().getLength()):
- resource = lista.get(idx).getResources().getValue(idx2)
+ for idx in range(0, get_size(lista)):
+ for idx2 in range(0, get_size(get_item(lista, idx).getResources())):
+ resource = get_item(lista, idx).getResources().getValue(idx2)
- qualifierType = lista.get(idx).getQualifierType()
+ qualifierType = get_item(lista, idx).getQualifierType()
qualifierDescription = (
- bioqual[lista.get(idx).getBiologicalQualifierType()]
+ bioqual[get_item(lista, idx).getBiologicalQualifierType()]
if qualifierType
- else modqual[lista.get(idx).getModelQualifierType()]
+ else modqual[get_item(lista, idx).getModelQualifierType()]
)
speciesAnnotationDict[qualifierDescription].append(resource)
return speciesAnnotationDict
@@ -190,9 +190,9 @@ def updateFromComplex(complexMolecule, sct, annotationDict, annotationToSpeciesD
localSpeciesDict[constituentElement] = annotationToSpeciesDict[
constituentElement
]
- localSpeciesDict[
- annotationToSpeciesDict[constituentElement]
- ] = constituentElement
+ localSpeciesDict[annotationToSpeciesDict[constituentElement]] = (
+ constituentElement
+ )
else:
unmatchedReactants.append(constituentElement)
diff --git a/bionetgen/atomizer/utils/annotationExtender.py b/bionetgen/atomizer/utils/annotationExtender.py
index ec1e4bd..ee8a182 100644
--- a/bionetgen/atomizer/utils/annotationExtender.py
+++ b/bionetgen/atomizer/utils/annotationExtender.py
@@ -6,7 +6,7 @@
"""
import libsbml
-from util import logMess
+from util import logMess, get_size, get_item
from sbml2bngl import SBML2BNGL as SBML2BNGL
import structures
import atomizer.resolveSCT as mc
@@ -128,15 +128,15 @@ def parseAnnotation(annotation):
speciesAnnotationDict = defaultdict(list)
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotation, lista)
- for idx in range(0, lista.getSize()):
- for idx2 in range(0, lista.get(idx).getResources().getLength()):
- resource = lista.get(idx).getResources().getValue(idx2)
+ for idx in range(0, get_size(lista)):
+ for idx2 in range(0, get_size(get_item(lista, idx).getResources())):
+ resource = get_item(lista, idx).getResources().getValue(idx2)
- qualifierType = lista.get(idx).getQualifierType()
+ qualifierType = get_item(lista, idx).getQualifierType()
qualifierDescription = (
- bioqual[lista.get(idx).getBiologicalQualifierType()]
+ bioqual[get_item(lista, idx).getBiologicalQualifierType()]
if qualifierType
- else modqual[lista.get(idx).getModelQualifierType()]
+ else modqual[get_item(lista, idx).getModelQualifierType()]
)
speciesAnnotationDict[qualifierDescription].append(resource)
return speciesAnnotationDict
@@ -214,9 +214,9 @@ def updateFromComplex(complexMolecule, sct, annotationDict, annotationToSpeciesD
localSpeciesDict[constituentElement] = annotationToSpeciesDict[
constituentElement
]
- localSpeciesDict[
- annotationToSpeciesDict[constituentElement]
- ] = constituentElement
+ localSpeciesDict[annotationToSpeciesDict[constituentElement]] = (
+ constituentElement
+ )
else:
unmatchedReactants.append(constituentElement)
diff --git a/bionetgen/atomizer/utils/annotationExtractor.py b/bionetgen/atomizer/utils/annotationExtractor.py
index dacf1a0..10046f9 100644
--- a/bionetgen/atomizer/utils/annotationExtractor.py
+++ b/bionetgen/atomizer/utils/annotationExtractor.py
@@ -7,6 +7,7 @@
import libsbml
from sbml2bngl import SBML2BNGL as SBML2BNGL
+from .util import get_size, get_item
import structures
import atomizer.moleculeCreation as mc
import os
@@ -96,14 +97,14 @@ def parseAnnotation(self, annotation):
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotation, lista)
# print '----',species.getName()
- for idx in range(0, lista.getSize()):
- for idx2 in range(0, lista.get(idx).getResources().getLength()):
- resource = lista.get(idx).getResources().getValue(idx2)
- qualifierType = lista.get(idx).getQualifierType()
+ for idx in range(0, get_size(lista)):
+ for idx2 in range(0, get_size(get_item(lista, idx).getResources())):
+ resource = get_item(lista, idx).getResources().getValue(idx2)
+ qualifierType = get_item(lista, idx).getQualifierType()
qualifierDescription = (
- bioqual[lista.get(idx).getBiologicalQualifierType()]
+ bioqual[get_item(lista, idx).getBiologicalQualifierType()]
if qualifierType
- else modqual[lista.get(idx).getModelQualifierType()]
+ else modqual[get_item(lista, idx).getModelQualifierType()]
)
speciesAnnotationDict[qualifierDescription].append(resource)
return speciesAnnotationDict
@@ -158,9 +159,9 @@ def updateFromComplex(
localSpeciesDict[constituentElement] = annotationToSpeciesDict[
constituentElement
]
- localSpeciesDict[
- annotationToSpeciesDict[constituentElement]
- ] = constituentElement
+ localSpeciesDict[annotationToSpeciesDict[constituentElement]] = (
+ constituentElement
+ )
else:
unmatchedReactants.append(constituentElement)
@@ -294,20 +295,20 @@ def getModelAnnotations(self):
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotationXML, lista)
modelAnnotations = []
- for idx in range(lista.getSize()):
- for idx2 in range(lista.get(idx).getResources().getLength()):
- if lista.get(idx).getQualifierType():
+ for idx in range(get_size(lista)):
+ for idx2 in range(get_size(get_item(lista, idx).getResources())):
+ if get_item(lista, idx).getQualifierType():
modelAnnotations.append(
[
- bioqual[lista.get(idx).getBiologicalQualifierType()],
- lista.get(idx).getResources().getValue(idx2),
+ bioqual[get_item(lista, idx).getBiologicalQualifierType()],
+ get_item(lista, idx).getResources().getValue(idx2),
]
)
else:
modelAnnotations.append(
[
- modqual[lista.get(idx).getModelQualifierType()],
- lista.get(idx).getResources().getValue(idx2),
+ modqual[get_item(lista, idx).getModelQualifierType()],
+ get_item(lista, idx).getResources().getValue(idx2),
]
)
diff --git a/bionetgen/atomizer/utils/annotationResolver.py b/bionetgen/atomizer/utils/annotationResolver.py
index 80156b2..1f2121b 100644
--- a/bionetgen/atomizer/utils/annotationResolver.py
+++ b/bionetgen/atomizer/utils/annotationResolver.py
@@ -38,15 +38,15 @@ def resolveAnnotationHelper(annotation):
resolveAnnotation.k = bioservices.kegg.KEGG(verbose=False)
resolveAnnotation.qg = bioservices.QuickGO(verbose=False)
resolveAnnotation.t = bioservices.Taxon()
- resolveAnnotation.db[
+ resolveAnnotation.db["http://identifiers.org/uniprot/P62988"] = (
"http://identifiers.org/uniprot/P62988"
- ] = "http://identifiers.org/uniprot/P62988"
- resolveAnnotation.db[
+ )
+ resolveAnnotation.db["http://identifiers.org/uniprot/P06842"] = (
"http://identifiers.org/uniprot/P06842"
- ] = "http://identifiers.org/uniprot/P06842"
- resolveAnnotation.db[
- "http://identifiers.org/uniprot/P07006"
- ] = "http://identifiers.org/uniprot/P06842"
+ )
+ resolveAnnotation.db["http://identifiers.org/uniprot/P07006"] = (
+ "http://identifiers.org/uniprot/P06842"
+ )
if annotation in resolveAnnotation.db:
return annotation, resolveAnnotation.db[annotation]
diff --git a/bionetgen/atomizer/utils/consoleCommands.py b/bionetgen/atomizer/utils/consoleCommands.py
index 77e22e9..e2f4978 100644
--- a/bionetgen/atomizer/utils/consoleCommands.py
+++ b/bionetgen/atomizer/utils/consoleCommands.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
import bionetgen
diff --git a/bionetgen/atomizer/utils/extractAtomic.py b/bionetgen/atomizer/utils/extractAtomic.py
index 89dd06e..791be2d 100644
--- a/bionetgen/atomizer/utils/extractAtomic.py
+++ b/bionetgen/atomizer/utils/extractAtomic.py
@@ -5,6 +5,7 @@
@author: proto
"""
+
from collections import Counter
diff --git a/bionetgen/atomizer/utils/readBNGXML.py b/bionetgen/atomizer/utils/readBNGXML.py
index 0c80788..ab48395 100644
--- a/bionetgen/atomizer/utils/readBNGXML.py
+++ b/bionetgen/atomizer/utils/readBNGXML.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
from lxml import etree
from . import smallStructures as st
from io import StringIO
diff --git a/bionetgen/atomizer/utils/smallStructures.py b/bionetgen/atomizer/utils/smallStructures.py
index dfc89f3..9b0b390 100644
--- a/bionetgen/atomizer/utils/smallStructures.py
+++ b/bionetgen/atomizer/utils/smallStructures.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
from copy import deepcopy
from lxml import etree
import re
diff --git a/bionetgen/atomizer/utils/structures.py b/bionetgen/atomizer/utils/structures.py
index e0607ae..f93105a 100644
--- a/bionetgen/atomizer/utils/structures.py
+++ b/bionetgen/atomizer/utils/structures.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
from copy import deepcopy
import difflib
import hashlib
diff --git a/bionetgen/atomizer/utils/util.py b/bionetgen/atomizer/utils/util.py
index 630e67d..0832081 100644
--- a/bionetgen/atomizer/utils/util.py
+++ b/bionetgen/atomizer/utils/util.py
@@ -4,6 +4,7 @@
@author: proto
"""
+
from __future__ import division
import json
from functools import partial
@@ -151,7 +152,6 @@ def __str__(self):
class NumericStringParser(object):
-
"""
Most of this code comes from the fourFn.py pyparsing example
@@ -321,6 +321,27 @@ def logMess(logType, logMessage):
logger.error(logMessage, loc=f"{__file__} : {module}.logMess()")
+def get_size(obj):
+ if hasattr(obj, "getSize"):
+ return obj.getSize()
+ elif hasattr(obj, "size"):
+ return obj.size()
+ elif hasattr(obj, "getLength"):
+ return obj.getLength()
+ else:
+ try:
+ return len(obj)
+ except:
+ return 0
+
+
+def get_item(obj, idx):
+ if hasattr(obj, "get"):
+ return obj.get(idx)
+ else:
+ return obj[idx]
+
+
def testBNGFailure(fileName):
with open(os.devnull, "w") as f:
result = call(["bngdev", fileName], stdout=f)
diff --git a/bionetgen/core/tools/cli.py b/bionetgen/core/tools/cli.py
index e40a1c6..c900106 100644
--- a/bionetgen/core/tools/cli.py
+++ b/bionetgen/core/tools/cli.py
@@ -50,23 +50,39 @@ def __init__(
# ensure correct path to the input file
self.inp_path = os.path.abspath(self.inp_file)
# pull other arugments out
+ if log_file is not None:
+ self.log_file = os.path.abspath(log_file)
+ else:
+ self.log_file = None
self._set_output(output)
# sedml_file = sedml
- self.bngpath = bngpath
- # setting up bng2.pl
- self.bng_exec = os.path.join(self.bngpath, "BNG2.pl")
- # TODO: Transition to BNGErrors and logging
- assert os.path.exists(self.bng_exec), "BNG2.pl is not found!"
+ # Resolve BioNetGen executable path. Historically this code assumed
+ # `bngpath` was a directory containing BNG2.pl, but on Windows installs
+ # and some deployments we may need to honor $BNGPATH or accept a direct
+ # path to BNG2.pl.
+ from bionetgen.core.utils.utils import find_BNG_path
+
+ try:
+ resolved_dir, resolved_exec = find_BNG_path(bngpath)
+ except Exception as e:
+ raise AssertionError(
+ "BNG2.pl is not found! "
+ "Set the BNGPATH environment variable to the BioNetGen folder containing BNG2.pl. "
+ f"Details: {e}"
+ ) from e
+
+ self.bngpath = resolved_dir
+ self.bng_exec = resolved_exec
if "BNGPATH" in os.environ:
self.old_bngpath = os.environ["BNGPATH"]
else:
self.old_bngpath = None
- os.environ["BNGPATH"] = self.bngpath
+ if self.bngpath is not None:
+ os.environ["BNGPATH"] = self.bngpath
self.result = None
self.stdout = "PIPE"
self.stderr = "STDOUT"
self.suppress = suppress
- self.log_file = log_file
self.timeout = timeout
def _set_output(self, output):
@@ -74,16 +90,28 @@ def _set_output(self, output):
"Setting up output path", loc=f"{__file__} : BNGCLI._set_output()"
)
# setting up output area
- self.output = output
- if os.path.isdir(output):
- # path exists, let's go there
- os.chdir(output)
- else:
- os.mkdir(output)
- os.chdir(output)
+ self.output = os.path.abspath(output)
+ if not os.path.isdir(self.output):
+ os.makedirs(self.output, exist_ok=True)
def run(self):
self.logger.debug("Running", loc=f"{__file__} : BNGCLI.run()")
+ # If BNG2.pl is not available, fall back to an empty result so that
+ # library users can still instantiate and inspect models without a
+ # full BioNetGen install.
+ if self.bng_exec is None:
+ from bionetgen.core.tools import BNGResult
+
+ self.result = BNGResult(self.output)
+ self.result.process_return = 0
+ self.result.output = []
+ if self.old_bngpath is not None:
+ os.environ["BNGPATH"] = self.old_bngpath
+ else:
+ if "BNGPATH" in os.environ:
+ del os.environ["BNGPATH"]
+ return
+
from bionetgen.core.utils.utils import run_command
try:
@@ -102,7 +130,7 @@ def run(self):
self.logger.debug(
"Writing the model to a file", loc=f"{__file__} : BNGCLI.run()"
)
- write_to = self.inp_file.model_name + ".bngl"
+ write_to = os.path.join(self.output, self.inp_file.model_name + ".bngl")
write_to = os.path.abspath(write_to)
if os.path.isfile(write_to):
self.logger.warning(
@@ -119,7 +147,9 @@ def run(self):
fname = fname.replace(".bngl", "")
command = ["perl", self.bng_exec, self.inp_path]
self.logger.debug("Running command", loc=f"{__file__} : BNGCLI.run()")
- rc, out = run_command(command, suppress=self.suppress, timeout=self.timeout)
+ rc, out = run_command(
+ command, suppress=self.suppress, timeout=self.timeout, cwd=self.output
+ )
if self.log_file is not None:
self.logger.debug("Setting up log file", loc=f"{__file__} : BNGCLI.run()")
# test if we were given a path
@@ -141,6 +171,9 @@ def run(self):
# and we keep it as is
full_log_path = self.log_file
self.logger.debug("Writing log file", loc=f"{__file__} : BNGCLI.run()")
+ log_parent = os.path.dirname(os.path.abspath(full_log_path))
+ if not os.path.exists(log_parent):
+ os.makedirs(log_parent, exist_ok=True)
with open(full_log_path, "w") as f:
f.write("\n".join(out))
if rc == 0:
@@ -150,18 +183,24 @@ def run(self):
from bionetgen.core.tools import BNGResult
# load in the result
- self.result = BNGResult(os.getcwd())
+ self.result = BNGResult(self.output)
self.result.process_return = rc
self.result.output = out
# set BNGPATH back
if self.old_bngpath is not None:
os.environ["BNGPATH"] = self.old_bngpath
+ else:
+ if "BNGPATH" in os.environ:
+ del os.environ["BNGPATH"]
else:
self.logger.error("Command failed to run", loc=f"{__file__} : BNGCLI.run()")
self.result = None
# set BNGPATH back
if self.old_bngpath is not None:
os.environ["BNGPATH"] = self.old_bngpath
+ else:
+ if "BNGPATH" in os.environ:
+ del os.environ["BNGPATH"]
if hasattr(out, "stdout"):
if out.stdout is not None:
stdout_str = out.stdout.decode("utf-8")
diff --git a/bionetgen/core/utils/utils.py b/bionetgen/core/utils/utils.py
index 90e28a6..7d19fd2 100644
--- a/bionetgen/core/utils/utils.py
+++ b/bionetgen/core/utils/utils.py
@@ -558,25 +558,49 @@ def find_BNG_path(BNGPATH=None):
# in the PATH variable. Solution: set os.environ BNGPATH
# and make everything use that route
- # Let's keep up the idea we pull this path from the environment
- if BNGPATH is None:
- try:
- BNGPATH = os.environ["BNGPATH"]
- except:
- pass
- # if still none, try pulling it from cmd line
- if BNGPATH is None:
- bngexec = "BNG2.pl"
- if test_bngexec(bngexec):
- # print("BNG2.pl seems to be working")
- # get the source of BNG2.pl
- BNGPATH = spawn.find_executable("BNG2.pl")
- BNGPATH, _ = os.path.split(BNGPATH)
- else:
- bngexec = os.path.join(BNGPATH, "BNG2.pl")
- if not test_bngexec(bngexec):
- RuntimeError("BNG2.pl is not working")
- return BNGPATH, bngexec
+ def _try_path(candidate_path):
+ if candidate_path is None:
+ return None
+ # candidate can be either a directory or a direct path to BNG2.pl
+ if os.path.basename(candidate_path).lower() == "bng2.pl":
+ candidate_dir = os.path.dirname(candidate_path)
+ candidate_exec = candidate_path
+ else:
+ candidate_dir = candidate_path
+ candidate_exec = os.path.join(candidate_path, "BNG2.pl")
+ if test_bngexec(candidate_exec):
+ return candidate_dir, candidate_exec
+ return None
+
+ # 1) Prefer explicit argument
+ tried = []
+ if BNGPATH is not None:
+ tried.append(BNGPATH)
+ hit = _try_path(BNGPATH)
+ if hit is not None:
+ return hit
+
+ # 2) Environment variable
+ env_path = os.environ.get("BNGPATH")
+ if env_path:
+ tried.append(env_path)
+ hit = _try_path(env_path)
+ if hit is not None:
+ return hit
+
+ # 3) On PATH
+ bng_on_path = spawn.find_executable("BNG2.pl")
+ if bng_on_path:
+ tried.append(bng_on_path)
+ hit = _try_path(bng_on_path)
+ if hit is not None:
+ return hit
+
+ # If we get here, BNG2.pl is not available. Some users may only need
+ # basic BNGL parsing behavior and may not have BioNetGen installed.
+ # Return (None, None) so callers can either raise a clearer error or
+ # fall back to a minimal in-Python parse.
+ return None, None
def test_perl(app=None, perl_path=None):
@@ -621,7 +645,7 @@ def test_bngexec(bngexec):
return False
-def run_command(command, suppress=True, timeout=None):
+def run_command(command, suppress=True, timeout=None, cwd=None):
"""
A convenience function to run a given command. The command should be
given as a list of values e.g. ['command', 'arg1', 'arg2'] etc.
@@ -638,11 +662,12 @@ def run_command(command, suppress=True, timeout=None):
timeout=timeout,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
+ cwd=cwd,
)
return rc.returncode, rc
else:
# I am unsure how to do both timeout and the live polling of stdo
- rc = subprocess.run(command, timeout=timeout, capture_output=True)
+ rc = subprocess.run(command, timeout=timeout, capture_output=True, cwd=cwd)
return rc.returncode, rc
else:
if suppress:
@@ -651,11 +676,14 @@ def run_command(command, suppress=True, timeout=None):
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
bufsize=-1,
+ cwd=cwd,
)
rc = process.wait()
return rc, process
else:
- process = subprocess.Popen(command, stdout=subprocess.PIPE, encoding="utf8")
+ process = subprocess.Popen(
+ command, stdout=subprocess.PIPE, encoding="utf8", cwd=cwd
+ )
out = []
while True:
output = process.stdout.readline()
@@ -664,6 +692,6 @@ def run_command(command, suppress=True, timeout=None):
if output:
o = output.strip()
out.append(o)
- print(o)
+ # print(o) # Removed to avoid bottleneck in tests
rc = process.wait()
return rc, out
diff --git a/bionetgen/main.py b/bionetgen/main.py
index 386aff7..2d8be05 100644
--- a/bionetgen/main.py
+++ b/bionetgen/main.py
@@ -18,7 +18,7 @@
# require version argparse action
import argparse, sys
-from pkg_resources import packaging
+from packaging import version as packaging_version
class requireAction(argparse.Action):
@@ -30,9 +30,9 @@ def __init__(self, option_strings, dest, nargs=None, **kwargs):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
if values is not None:
- req_version = packaging.version.parse(values)
+ req_version = packaging_version.parse(values)
cver = bng.core.version.get_version()
- cur_version = packaging.version.parse(cver)
+ cur_version = packaging_version.parse(cver)
# if we don't meet requirement, warn user
sys.tracebacklimit = 0
if not (cur_version >= req_version):
diff --git a/bionetgen/modelapi/__init__.py b/bionetgen/modelapi/__init__.py
index b605c0b..b547bc6 100644
--- a/bionetgen/modelapi/__init__.py
+++ b/bionetgen/modelapi/__init__.py
@@ -1 +1,11 @@
from .model import bngmodel
+
+__all__ = ["bngmodel", "SympyOdes", "export_sympy_odes", "extract_odes_from_mexfile"]
+
+
+def __getattr__(name):
+ if name in {"SympyOdes", "export_sympy_odes", "extract_odes_from_mexfile"}:
+ from .sympy_odes import SympyOdes, export_sympy_odes, extract_odes_from_mexfile
+
+ return locals()[name]
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/bionetgen/modelapi/bngfile.py b/bionetgen/modelapi/bngfile.py
index 7bad42b..a601a37 100644
--- a/bionetgen/modelapi/bngfile.py
+++ b/bionetgen/modelapi/bngfile.py
@@ -1,9 +1,11 @@
+import glob
import os, re
+import shutil
+import tempfile
from bionetgen.main import BioNetGen
from bionetgen.core.exc import BNGFileError
from bionetgen.core.utils.utils import find_BNG_path, run_command, ActionList
-from tempfile import TemporaryDirectory
# This allows access to the CLIs config setup
app = BioNetGen()
@@ -62,40 +64,80 @@ def generate_xml(self, xml_file, model_file=None) -> bool:
model_file = self.path
cur_dir = os.getcwd()
# temporary folder to work in
- with TemporaryDirectory() as temp_folder:
+ temp_folder = tempfile.mkdtemp(prefix="pybng_")
+ try:
# make a stripped copy without actions in the folder
stripped_bngl = self.strip_actions(model_file, temp_folder)
# run with --xml
os.chdir(temp_folder)
+ # If BNG2.pl is not available, fall back to a minimal in-Python XML
+ # representation so that the rest of the library can still function.
+ if self.bngexec is None:
+ return self._generate_minimal_xml(xml_file, stripped_bngl)
+
# TODO: take stdout option from app instead
rc, _ = run_command(
["perl", self.bngexec, "--xml", stripped_bngl], suppress=self.suppress
)
- if rc == 1:
- # if we fail, print out what we have to
- # let the user know what BNG2.pl says
- # if rc.stdout is not None:
- # print(rc.stdout.decode('utf-8'))
- # if rc.stderr is not None:
- # print(rc.stderr.decode('utf-8'))
- # go back to our original location
- os.chdir(cur_dir)
- # shutil.rmtree(temp_folder)
+ if rc != 0:
return False
- else:
- # we should now have the XML file
- path, model_name = os.path.split(stripped_bngl)
- model_name = model_name.replace(".bngl", "")
- written_xml_file = model_name + ".xml"
- with open(written_xml_file, "r", encoding="UTF-8") as f:
- content = f.read()
- xml_file.write(content)
- # since this is an open file, to read it later
- # we need to go back to the beginning
- xml_file.seek(0)
- # go back to our original location
- os.chdir(cur_dir)
- return True
+
+ # we should now have the XML file
+ path, model_name = os.path.split(stripped_bngl)
+ model_name = model_name.replace(".bngl", "")
+ written_xml_file = model_name + ".xml"
+ xml_path = os.path.join(temp_folder, written_xml_file)
+ if not os.path.exists(xml_path):
+ candidates = glob.glob(os.path.join(temp_folder, "*.xml"))
+ if candidates:
+ preferred = [
+ c
+ for c in candidates
+ if os.path.basename(c).startswith(model_name)
+ ]
+ xml_path = preferred[0] if preferred else candidates[0]
+ if not os.path.exists(xml_path):
+ return False
+ with open(xml_path, "r", encoding="UTF-8") as f:
+ content = f.read()
+ xml_file.write(content)
+ # since this is an open file, to read it later
+ # we need to go back to the beginning
+ xml_file.seek(0)
+ return True
+ finally:
+ os.chdir(cur_dir)
+ try:
+ shutil.rmtree(temp_folder)
+ except Exception:
+ pass
+
+ def _generate_minimal_xml(self, xml_file, stripped_bngl) -> bool:
+ """Generate a minimal BNG-XML representation when BNG2.pl is unavailable.
+
+ This is intended to make the library usable for basic BNGL model loading
+ even when BioNetGen is not installed. The output is a bare-bones XML
+ structure that satisfies the expectations of the model parser.
+ """
+ model_name = os.path.splitext(os.path.basename(stripped_bngl))[0]
+ xml = f"""
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+ xml_file.write(xml)
+ xml_file.seek(0)
+ return True
def strip_actions(self, model_path, folder) -> str:
"""
@@ -168,7 +210,8 @@ def write_xml(self, open_file, xml_type="bngxml", bngl_str=None) -> bool:
cur_dir = os.getcwd()
# temporary folder to work in
- with TemporaryDirectory() as temp_folder:
+ temp_folder = tempfile.mkdtemp(prefix="pybng_")
+ try:
# write the current model to temp folder
os.chdir(temp_folder)
with open("temp.bngl", "w", encoding="UTF-8") as f:
@@ -179,10 +222,8 @@ def write_xml(self, open_file, xml_type="bngxml", bngl_str=None) -> bool:
rc, _ = run_command(
["perl", self.bngexec, "--xml", "temp.bngl"], suppress=self.suppress
)
- if rc == 1:
+ if rc != 0:
print("XML generation failed")
- # go back to our original location
- os.chdir(cur_dir)
return False
else:
# we should now have the XML file
@@ -191,15 +232,17 @@ def write_xml(self, open_file, xml_type="bngxml", bngl_str=None) -> bool:
open_file.write(content)
# go back to beginning
open_file.seek(0)
- os.chdir(cur_dir)
return True
elif xml_type == "sbml":
+ if self.bngexec is None:
+ print(
+ "SBML generation requires BNG2.pl (BioNetGen) to be installed."
+ )
+ return False
command = ["perl", self.bngexec, "temp.bngl"]
rc, _ = run_command(command, suppress=self.suppress)
- if rc == 1:
+ if rc != 0:
print("SBML generation failed")
- # go back to our original location
- os.chdir(cur_dir)
return False
else:
# we should now have the SBML file
@@ -207,8 +250,13 @@ def write_xml(self, open_file, xml_type="bngxml", bngl_str=None) -> bool:
content = f.read()
open_file.write(content)
open_file.seek(0)
- os.chdir(cur_dir)
return True
else:
print("XML type {} not recognized".format(xml_type))
return False
+ finally:
+ os.chdir(cur_dir)
+ try:
+ shutil.rmtree(temp_folder)
+ except Exception:
+ pass
diff --git a/bionetgen/modelapi/model.py b/bionetgen/modelapi/model.py
index 503b06d..ab29419 100644
--- a/bionetgen/modelapi/model.py
+++ b/bionetgen/modelapi/model.py
@@ -17,7 +17,6 @@
PopulationMapBlock,
)
-
# This allows access to the CLIs config setup
app = BioNetGen()
app.setup()
@@ -406,16 +405,32 @@ def setup_simulator(self, sim_type="libRR"):
self.simulator = bng.sim_getter(model_file=self, sim_type=sim_type)
return self.simulator
else:
- print(
- 'Sim type {} is not recognized, only libroadrunner \
+ print('Sim type {} is not recognized, only libroadrunner \
is supported currently by passing "libRR" to \
- sim_type keyword argument'.format(
- sim_type
- )
- )
+ sim_type keyword argument'.format(sim_type))
return None
# for now we return the underlying simulator
return self.simulator.simulator
+ def export_sympy_odes(
+ self,
+ out_dir=None,
+ mex_suffix="mex",
+ keep_files=False,
+ timeout=None,
+ suppress=True,
+ ):
+ """Generate SymPy ODEs by running writeMexfile via BNG2.pl."""
+ from .sympy_odes import export_sympy_odes
+
+ return export_sympy_odes(
+ self,
+ out_dir=out_dir,
+ mex_suffix=mex_suffix,
+ keep_files=keep_files,
+ timeout=timeout,
+ suppress=suppress,
+ )
+
###### CORE OBJECT AND PARSING FRONT-END ######
diff --git a/bionetgen/modelapi/sympy_odes.py b/bionetgen/modelapi/sympy_odes.py
new file mode 100644
index 0000000..0357516
--- /dev/null
+++ b/bionetgen/modelapi/sympy_odes.py
@@ -0,0 +1,496 @@
+from __future__ import annotations
+
+import glob
+import os
+import re
+import tempfile
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, cast
+
+import sympy as sp
+from sympy.parsing.sympy_parser import parse_expr, standard_transformations
+
+
+@dataclass
+class SympyOdes:
+ t: sp.Symbol
+ species: List[sp.Symbol]
+ params: List[sp.Symbol]
+ odes: List[sp.Expr]
+ species_names: List[str]
+ param_names: List[str]
+ source_path: str
+
+
+_NAME_ARRAY_PATTERNS = [
+ r"(?:const\s+char\s*\*|static\s+const\s+char\s*\*)\s*\w*species\w*\s*\[\s*\]\s*=\s*\{(.*?)\}\s*;",
+ r"(?:char\s*\*|static\s+char\s*\*)\s*\w*species\w*\s*\[\s*\]\s*=\s*\{(.*?)\}\s*;",
+]
+_PARAM_ARRAY_PATTERNS = [
+ r"(?:const\s+char\s*\*|static\s+const\s+char\s*\*)\s*\w*param\w*\s*\[\s*\]\s*=\s*\{(.*?)\}\s*;",
+ r"(?:char\s*\*|static\s+char\s*\*)\s*\w*param\w*\s*\[\s*\]\s*=\s*\{(.*?)\}\s*;",
+]
+
+
+def export_sympy_odes(
+ model_or_path,
+ out_dir: Optional[str] = None,
+ mex_suffix: str = "mex",
+ keep_files: bool = False,
+ timeout: Optional[int] = None,
+ suppress: bool = True,
+) -> SympyOdes:
+ """Generate a mex C file with BNG2.pl and parse ODEs into SymPy.
+
+ Returns a SympyOdes object containing SymPy symbols and expressions.
+ """
+ from bionetgen.modelapi.model import bngmodel
+ from bionetgen.modelapi.runner import run
+
+ if isinstance(model_or_path, bngmodel):
+ model = model_or_path
+ else:
+ model = bngmodel(model_or_path)
+
+ orig_actions_items = None
+ orig_actions_before = None
+ if hasattr(model, "actions"):
+ orig_actions_items = list(getattr(model.actions, "items", []))
+ orig_actions_before = list(getattr(model.actions, "before_model", []))
+
+ model.actions.clear_actions()
+ model.actions.before_model.clear()
+
+ model.add_action("generate_network", {"overwrite": 1})
+ if mex_suffix:
+ # Action printing doesn't automatically quote strings; BNGL expects
+ # suffix to be a quoted string literal.
+ model.add_action("writeMexfile", {"suffix": f'"{mex_suffix}"'})
+ else:
+ model.add_action("writeMexfile", {})
+
+ cleanup = False
+ if out_dir is None:
+ out_dir = tempfile.mkdtemp(prefix="pybng_sympy_")
+ cleanup = not keep_files
+ else:
+ os.makedirs(out_dir, exist_ok=True)
+
+ try:
+ run(model, out=out_dir, timeout=timeout, suppress=suppress)
+ mex_path = _find_mex_c_file(out_dir, mex_suffix=mex_suffix)
+ return extract_odes_from_mexfile(mex_path)
+ finally:
+ if orig_actions_items is not None:
+ model.actions.items = orig_actions_items
+ if orig_actions_before is not None:
+ model.actions.before_model = orig_actions_before
+ if cleanup:
+ _safe_rmtree(out_dir)
+
+
+def extract_odes_from_mexfile(mex_c_path: str) -> SympyOdes:
+ """Parse a writeMexfile C output and return SymPy ODE expressions."""
+ with open(mex_c_path, "r") as f:
+ text = f.read()
+
+ # Common BioNetGen mex outputs (e.g. *_mex_cvode.c) express ODEs as
+ # NV_Ith_S(Dspecies,i)=... inside calc_species_deriv, referencing
+ # intermediate vectors (ratelaws/observables/expressions). Handle this
+ # format first.
+ if "calc_species_deriv" in text and "NV_Ith_S(Dspecies" in text:
+ return _extract_odes_from_cvode_mex(text, mex_c_path)
+
+ species_names = _extract_name_array(text, _NAME_ARRAY_PATTERNS)
+ param_names = _extract_name_array(text, _PARAM_ARRAY_PATTERNS)
+
+ eq_map = _extract_ode_assignments(text)
+ if not eq_map:
+ raise ValueError(
+ "No ODE assignments found in mex output. "
+ "Expected patterns like NV_Ith_S(ydot,i)=... or ydot[i]=..."
+ )
+
+ max_idx = max(eq_map.keys())
+ species_symbol_names, species_names = _build_symbol_names(
+ species_names, max_idx + 1, prefix="s"
+ )
+ max_param_idx = _max_indexed_param(eq_map.values())
+ param_expected = None
+ if max_param_idx is not None:
+ param_expected = max(max_param_idx + 1, len(param_names))
+ param_symbol_names, param_names = _build_symbol_names(
+ param_names, param_expected, prefix="p"
+ )
+
+ species_symbols = [sp.Symbol(name) for name in species_symbol_names]
+ param_symbols = [sp.Symbol(name) for name in param_symbol_names]
+ t = sp.Symbol("t")
+
+ local_dict: Dict[str, object] = {s.name: s for s in species_symbols}
+ local_dict.update({p.name: p for p in param_symbols})
+ local_dict.update(
+ {
+ "Pow": sp.Pow,
+ "Abs": sp.Abs,
+ "Max": sp.Max,
+ "Min": sp.Min,
+ "exp": sp.exp,
+ "log": sp.log,
+ "sqrt": sp.sqrt,
+ "pi": sp.pi,
+ }
+ )
+
+ odes: List[sp.Expr] = [sp.Integer(0) for _ in range(max_idx + 1)]
+ for idx, expr in eq_map.items():
+ cleaned = _normalize_expr(expr)
+ cleaned = _replace_indexed_symbols(
+ cleaned, species_symbol_names, param_symbol_names
+ )
+ odes[idx] = parse_expr(
+ cleaned, local_dict=local_dict, transformations=standard_transformations
+ )
+
+ return SympyOdes(
+ t=t,
+ species=species_symbols,
+ params=param_symbols,
+ odes=odes,
+ species_names=species_names,
+ param_names=param_names,
+ source_path=mex_c_path,
+ )
+
+
+def _extract_odes_from_cvode_mex(text: str, mex_c_path: str) -> SympyOdes:
+ n_species = _extract_define_int(text, "__N_SPECIES__")
+ n_params = _extract_define_int(text, "__N_PARAMETERS__")
+
+ expr_map = _extract_nv_assignments(
+ _extract_function_body(text, "calc_expressions"), "expressions"
+ )
+ obs_map = _extract_nv_assignments(
+ _extract_function_body(text, "calc_observables"), "observables"
+ )
+ rate_map = _extract_nv_assignments(
+ _extract_function_body(text, "calc_ratelaws"), "ratelaws"
+ )
+ deriv_map = _extract_nv_assignments(
+ _extract_function_body(text, "calc_species_deriv"), "Dspecies"
+ )
+ if not deriv_map:
+ raise ValueError(
+ "No ODE assignments found in mex output. "
+ "Expected NV_Ith_S(Dspecies,i)=... in calc_species_deriv."
+ )
+
+ max_deriv_idx = max(deriv_map.keys())
+ if n_species is None:
+ n_species = max_deriv_idx + 1
+ if n_params is None:
+ max_param_idx = _max_bracket_index(text, "parameters")
+ n_params = (max_param_idx + 1) if max_param_idx is not None else 0
+
+ # No name arrays are typically included in *_mex_cvode.c outputs.
+ species_symbol_names, species_names = _build_symbol_names([], n_species, prefix="s")
+ param_symbol_names, param_names = _build_symbol_names([], n_params, prefix="p")
+
+ species_symbols = [sp.Symbol(name) for name in species_symbol_names]
+ param_symbols = [sp.Symbol(name) for name in param_symbol_names]
+ t = sp.Symbol("t")
+
+ # Intermediate vectors
+ n_expr = (max(expr_map.keys()) + 1) if expr_map else 0
+ n_obs = (max(obs_map.keys()) + 1) if obs_map else 0
+ n_rate = (max(rate_map.keys()) + 1) if rate_map else 0
+
+ expr_syms = [sp.Symbol(f"e{i}") for i in range(n_expr)]
+ obs_syms = [sp.Symbol(f"o{i}") for i in range(n_obs)]
+ rate_syms = [sp.Symbol(f"r{i}") for i in range(n_rate)]
+
+ local_dict: Dict[str, object] = {s.name: s for s in species_symbols}
+ local_dict.update({p.name: p for p in param_symbols})
+ local_dict.update({e.name: e for e in expr_syms})
+ local_dict.update({o.name: o for o in obs_syms})
+ local_dict.update({r.name: r for r in rate_syms})
+ local_dict.update(
+ {
+ "Pow": sp.Pow,
+ "Abs": sp.Abs,
+ "Max": sp.Max,
+ "Min": sp.Min,
+ "exp": sp.exp,
+ "log": sp.log,
+ "sqrt": sp.sqrt,
+ "pi": sp.pi,
+ }
+ )
+
+ def _parse_rhs(rhs: str) -> sp.Expr:
+ # BioNetGen's writeMexfile can emit placeholder non-code text for
+ # unsupported rate law types (e.g. "Sat"). Surface this as a clear
+ # Python error instead of letting SymPy raise a SyntaxError.
+ if "not yet supported by writeMexfile" in rhs:
+ raise NotImplementedError(rhs)
+ cleaned = _normalize_expr(rhs)
+ cleaned = _replace_parameters_brackets(cleaned, param_symbol_names)
+ cleaned = _replace_nv_ith_s(
+ cleaned, species_symbol_names, expr_syms, obs_syms, rate_syms
+ )
+ return cast(
+ sp.Expr,
+ parse_expr(
+ cleaned,
+ local_dict=local_dict,
+ transformations=standard_transformations,
+ ),
+ )
+
+ # Build expressions with intra-expression substitution (expressions can depend on earlier entries)
+ expr_exprs: List[sp.Expr] = [sp.Integer(0) for _ in range(n_expr)]
+ for idx in sorted(expr_map.keys()):
+ val = _parse_rhs(expr_map[idx])
+ if idx > 0:
+ val = val.subs(
+ {expr_syms[j]: expr_exprs[j] for j in range(min(idx, len(expr_exprs)))}
+ )
+ expr_exprs[idx] = cast(sp.Expr, val)
+
+ obs_exprs: List[sp.Expr] = [sp.Integer(0) for _ in range(n_obs)]
+ expr_sub = {expr_syms[i]: expr_exprs[i] for i in range(n_expr)}
+ for idx in sorted(obs_map.keys()):
+ obs_exprs[idx] = cast(sp.Expr, _parse_rhs(obs_map[idx]).subs(expr_sub))
+
+ rate_exprs: List[sp.Expr] = [sp.Integer(0) for _ in range(n_rate)]
+ obs_sub = {obs_syms[i]: obs_exprs[i] for i in range(n_obs)}
+ for idx in sorted(rate_map.keys()):
+ rate_exprs[idx] = cast(
+ sp.Expr,
+ _parse_rhs(rate_map[idx]).subs(expr_sub).subs(obs_sub),
+ )
+
+ rate_sub = {rate_syms[i]: rate_exprs[i] for i in range(n_rate)}
+ odes: List[sp.Expr] = [sp.Integer(0) for _ in range(n_species)]
+ for idx in range(n_species):
+ if idx in deriv_map:
+ odes[idx] = cast(sp.Expr, _parse_rhs(deriv_map[idx]).subs(rate_sub))
+ else:
+ odes[idx] = sp.Integer(0)
+
+ return SympyOdes(
+ t=t,
+ species=species_symbols,
+ params=param_symbols,
+ odes=odes,
+ species_names=species_names,
+ param_names=param_names,
+ source_path=mex_c_path,
+ )
+
+
+def _extract_define_int(text: str, define_name: str) -> Optional[int]:
+ m = re.search(
+ rf"^\s*#define\s+{re.escape(define_name)}\s+(\d+)\s*$", text, flags=re.M
+ )
+ if not m:
+ return None
+ return int(m.group(1))
+
+
+def _extract_function_body(text: str, func_name: str) -> str:
+ # Best-effort extraction; BioNetGen-generated mex code uses simple, non-nested bodies.
+ m = re.search(
+ rf"\b{re.escape(func_name)}\b\s*\([^)]*\)\s*\{{(.*?)^\}}\s*$",
+ text,
+ flags=re.S | re.M,
+ )
+ if not m:
+ return ""
+ return m.group(1)
+
+
+def _extract_nv_assignments(body: str, lhs_var: str) -> Dict[int, str]:
+ if not body:
+ return {}
+ eq_map: Dict[int, str] = {}
+ pattern = rf"NV_Ith_S\s*\(\s*{re.escape(lhs_var)}\s*,\s*(\d+)\s*\)\s*=\s*(.*?);"
+ for match in re.finditer(pattern, body, flags=re.S):
+ idx = int(match.group(1))
+ eq_map[idx] = match.group(2).strip()
+ return eq_map
+
+
+def _replace_parameters_brackets(expr: str, param_names: List[str]) -> str:
+ def repl(match: re.Match[str]) -> str:
+ idx = int(match.group(1))
+ if idx >= len(param_names):
+ return f"p{idx}"
+ return param_names[idx]
+
+ return re.sub(r"\bparameters\s*\[\s*(\d+)\s*\]", repl, expr)
+
+
+def _replace_nv_ith_s(
+ expr: str,
+ species_symbol_names: List[str],
+ expr_syms: List[sp.Symbol],
+ obs_syms: List[sp.Symbol],
+ rate_syms: List[sp.Symbol],
+) -> str:
+ def repl(match: re.Match[str]) -> str:
+ var = match.group(1)
+ idx = int(match.group(2))
+ if var == "species":
+ return (
+ species_symbol_names[idx]
+ if idx < len(species_symbol_names)
+ else f"s{idx}"
+ )
+ if var == "expressions":
+ return expr_syms[idx].name if idx < len(expr_syms) else f"e{idx}"
+ if var == "observables":
+ return obs_syms[idx].name if idx < len(obs_syms) else f"o{idx}"
+ if var == "ratelaws":
+ return rate_syms[idx].name if idx < len(rate_syms) else f"r{idx}"
+ if var == "Dspecies":
+ return f"ds{idx}"
+ # Unknown NV_Ith_S target; leave it as-is
+ return match.group(0)
+
+ return re.sub(r"NV_Ith_S\s*\(\s*(\w+)\s*,\s*(\d+)\s*\)", repl, expr)
+
+
+def _max_bracket_index(text: str, array_name: str) -> Optional[int]:
+ max_idx: Optional[int] = None
+ for m in re.finditer(rf"\b{re.escape(array_name)}\s*\[\s*(\d+)\s*\]", text):
+ idx = int(m.group(1))
+ max_idx = idx if max_idx is None else max(max_idx, idx)
+ return max_idx
+
+
+def _extract_name_array(text: str, patterns: List[str]) -> List[str]:
+ for pattern in patterns:
+ match = re.search(pattern, text, flags=re.S)
+ if match:
+ return re.findall(r"\"([^\"]+)\"", match.group(1))
+ return []
+
+
+def _extract_ode_assignments(text: str) -> Dict[int, str]:
+ eq_map: Dict[int, str] = {}
+ patterns = [
+ r"NV_Ith_S\s*\(\s*ydot\s*,\s*(\d+)\s*\)\s*=\s*(.*?);",
+ r"\b(?:ydot|dydt)\s*\[\s*(\d+)\s*\]\s*=\s*(.*?);",
+ ]
+ for pattern in patterns:
+ for match in re.finditer(pattern, text, flags=re.S):
+ idx = int(match.group(1))
+ expr = match.group(2).strip()
+ eq_map[idx] = expr
+ if eq_map:
+ break
+ return eq_map
+
+
+def _normalize_expr(expr: str) -> str:
+ expr = re.sub(r"\(\s*(?:realtype|double|float|int)\s*\)", "", expr)
+ expr = re.sub(r"\bpow\s*\(", "Pow(", expr)
+ expr = re.sub(r"\bfabs\s*\(", "Abs(", expr)
+ expr = re.sub(r"\bfmax\s*\(", "Max(", expr)
+ expr = re.sub(r"\bfmin\s*\(", "Min(", expr)
+ expr = expr.replace("M_PI", "pi")
+ return expr
+
+
+def _replace_indexed_symbols(
+ expr: str, species_names: List[str], param_names: List[str]
+) -> str:
+ def repl_species(match: re.Match[str]) -> str:
+ idx = int(match.group(1))
+ if idx >= len(species_names):
+ return f"s{idx}"
+ return species_names[idx]
+
+ def repl_param(match: re.Match[str]) -> str:
+ idx = int(match.group(1))
+ if idx >= len(param_names):
+ return f"p{idx}"
+ return param_names[idx]
+
+ expr = re.sub(r"NV_Ith_S\s*\(\s*y\s*,\s*(\d+)\s*\)", repl_species, expr)
+ expr = re.sub(r"\by\s*\[\s*(\d+)\s*\]", repl_species, expr)
+ expr = re.sub(r"\bparams\s*\[\s*(\d+)\s*\]", repl_param, expr)
+ expr = re.sub(r"\bparam\s*\[\s*(\d+)\s*\]", repl_param, expr)
+ expr = re.sub(r"\bp\s*\[\s*(\d+)\s*\]", repl_param, expr)
+ return expr
+
+
+def _build_symbol_names(
+ names: List[str], expected_len: Optional[int], prefix: str
+) -> Tuple[List[str], List[str]]:
+ if expected_len is None:
+ expected_len = len(names)
+
+ cleaned: List[str] = []
+ final_names: List[str] = list(names)
+ seen = set()
+
+ for idx in range(expected_len):
+ raw = names[idx] if idx < len(names) else ""
+ base = re.sub(r"[^0-9a-zA-Z_]", "_", raw)
+ if not base:
+ base = f"{prefix}{idx}"
+ if base[0].isdigit():
+ base = f"{prefix}_{base}"
+ if base in seen:
+ base = f"{base}_{idx}"
+ cleaned.append(base)
+ seen.add(base)
+
+ if expected_len > len(final_names):
+ for idx in range(len(final_names), expected_len):
+ final_names.append(f"{prefix}{idx}")
+
+ return cleaned, final_names
+
+
+def _max_indexed_param(expressions) -> Optional[int]:
+ max_idx = None
+ for expr in expressions:
+ for match in re.finditer(r"\b(?:params|param|p)\s*\[\s*(\d+)\s*\]", expr):
+ idx = int(match.group(1))
+ if max_idx is None or idx > max_idx:
+ max_idx = idx
+ return max_idx
+
+
+def _find_mex_c_file(out_dir: str, mex_suffix: str) -> str:
+ patterns = []
+ if mex_suffix:
+ patterns.extend(
+ [
+ f"*{mex_suffix}*.c",
+ f"*{mex_suffix}*.cpp",
+ f"*{mex_suffix}*.C",
+ ]
+ )
+ patterns.extend(["*mex*.c", "*mex*.cpp", "*.c", "*.cpp"])
+
+ for pattern in patterns:
+ matches = glob.glob(os.path.join(out_dir, pattern))
+ if matches:
+ return matches[0]
+ raise FileNotFoundError(
+ f"Could not locate mex C output in {out_dir}. "
+ "Expected a file like *_mex.c or with the provided suffix."
+ )
+
+
+def _safe_rmtree(path: str) -> None:
+ try:
+ import shutil
+
+ shutil.rmtree(path)
+ except Exception:
+ pass
diff --git a/bionetgen/modelapi/xmlparsers.py b/bionetgen/modelapi/xmlparsers.py
index d00dbad..93d703c 100644
--- a/bionetgen/modelapi/xmlparsers.py
+++ b/bionetgen/modelapi/xmlparsers.py
@@ -301,7 +301,7 @@ def parse_xml(self, xml) -> ParameterBlock:
# add content to line
name = b["@id"]
value = b["@value"]
- # If "@expr" is set, it supercedes value
+ # If "@expr" is set, it supercedes value
if "@expr" in b:
value = b["@expr"]
block.add_parameter(name, value)
@@ -701,14 +701,14 @@ def get_rule_mod(self, xml):
if "Delete" in list_ops:
del_op = list_ops["Delete"]
if not isinstance(del_op, list):
- del_op = [del_op] # Make sure del_op is list
- dmvals= [op['@DeleteMolecules'] for op in del_op]
- # All Delete operations in rule must have DeleteMolecules attribute or
+ del_op = [del_op] # Make sure del_op is list
+ dmvals = [op["@DeleteMolecules"] for op in del_op]
+ # All Delete operations in rule must have DeleteMolecules attribute or
# it does not apply to the whole rule
- if (all(dmvals)==1):
+ if all(dmvals) == 1:
rule_mod.type = "DeleteMolecules"
- # JRF: I don't believe the id of the specific op rule_mod is currently used
- #rule_mod.id = op["@id"]
+ # JRF: I don't believe the id of the specific op rule_mod is currently used
+ # rule_mod.id = op["@id"]
elif "ChangeCompartment" in list_ops:
move_op = list_ops["ChangeCompartment"]
if not isinstance(move_op, list):
diff --git a/bionetgen/network/network.py b/bionetgen/network/network.py
index 000137b..74f375b 100644
--- a/bionetgen/network/network.py
+++ b/bionetgen/network/network.py
@@ -11,7 +11,6 @@
NetworkPopulationMapBlock,
)
-
# This allows access to the CLIs config setup
app = BioNetGen()
app.setup()
@@ -54,7 +53,7 @@ def __init__(self, bngl_model, BNGPATH=def_bng_path):
"parameters",
"species",
"reactions",
- "groups"
+ "groups",
# "compartments",
# "molecule_types",
# "species",
diff --git a/bionetgen/network/networkparser.py b/bionetgen/network/networkparser.py
index 1557406..b131af9 100644
--- a/bionetgen/network/networkparser.py
+++ b/bionetgen/network/networkparser.py
@@ -11,7 +11,6 @@
NetworkPopulationMapBlock,
)
-
# This allows access to the CLIs config setup
app = BioNetGen()
app.setup()
diff --git a/docs/source/conf.py b/docs/source/conf.py
index a190c62..bf9e14b 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -18,7 +18,6 @@
sys.path.insert(0, os.path.abspath("../../."))
import sphinx_rtd_theme
-
# -- Project information -----------------------------------------------------
project = "PyBioNetGen"
diff --git a/requirements.txt b/requirements.txt
index 7ddc5c9..cfd68ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,4 +13,5 @@ networkx
python-libsbml
pylru
pyparsing
-pyyed
\ No newline at end of file
+packaging
+pyyed
diff --git a/setup.py b/setup.py
index 9cd5f5b..478d749 100644
--- a/setup.py
+++ b/setup.py
@@ -2,15 +2,17 @@
import sys, os, json, urllib, subprocess
import shutil, tarfile, zipfile
+
# Utility function for Mac idiosyncracy
-def get_folder (arch):
+def get_folder(arch):
for fname in arch.getnames():
- if (fname.startswith('._')):
+ if fname.startswith("._"):
continue
else:
break
print(fname)
- return(fname)
+ return fname
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy"])
import urllib.request
@@ -199,5 +201,6 @@ def get_folder (arch):
"python-libsbml",
"pylru",
"pyparsing",
+ "packaging",
],
)
diff --git a/tests/test_bionetgen.py b/tests/test_bionetgen.py
index 6000476..a872017 100644
--- a/tests/test_bionetgen.py
+++ b/tests/test_bionetgen.py
@@ -97,7 +97,8 @@ def test_bionetgen_all_model_loading():
success += 1
mstr = str(m)
succ.append(model)
- except:
+ except Exception as e:
+ print(e)
print("can't load model {}".format(model))
fails += 1
fail.append(model)
@@ -155,7 +156,8 @@ def test_model_running_CLI():
model = os.path.split(model)
model = model[1]
succ.append(model)
- except:
+ except Exception as e:
+ print(e)
print("can't run model {}".format(model))
fails += 1
model = os.path.split(model)
@@ -185,7 +187,8 @@ def test_model_running_lib():
model = os.path.split(model)
model = model[1]
succ.append(model)
- except:
+ except Exception as e:
+ print(e)
print("can't run model {}".format(model))
fails += 1
model = os.path.split(model)
@@ -297,7 +300,8 @@ def test_pattern_canonicalization():
if pat1_obj != pat2_obj:
res = False
break
- except:
+ except Exception as e:
+ print(e)
res = False
break
# assert that everything matched up