Skip to content
Snippets Groups Projects
Commit 36de779a authored by Sven Gundlach's avatar Sven Gundlach
Browse files

Integrate llm subprocess

parent 56af3751
Branches
No related tags found
No related merge requests found
Pipeline #16633 passed
......@@ -15,14 +15,16 @@
# limitations under the License.
import argparse
import logging
import os
import subprocess
from os.path import isfile
from logging import basicConfig, DEBUG
from os import access, getcwd, R_OK
from os.path import isfile, join, isdir, dirname, abspath
from subprocess import check_output, STDOUT, CalledProcessError, run
from sys import executable
from .server import did_save, tdd_server
# Package imports
from .server import did_save, recommend_software_under_test, tdd_server
logging.basicConfig(level=logging.DEBUG)
basicConfig(level=DEBUG)
def add_arguments(parser):
......@@ -54,23 +56,43 @@ def add_arguments(parser):
"-f", "--file", dest="file", type=readable_file, help="TDD-File input file for pFUnit generator"
)
parser.add_argument(
"-s", "--source", dest="source", type=readable_directory, help="Path of source code to analyse"
)
def readable_file(file_path: str):
"""
Check for readable file.
Check for an existing and readable file.
:param file_path: path to file
:return: valid readable file path
"""
abs_file_path = os.path.join(os.getcwd(), file_path)
abs_file_path = join(getcwd(), file_path)
if isfile(abs_file_path) and os.access(abs_file_path, os.R_OK):
if isfile(abs_file_path) and access(abs_file_path, R_OK):
return abs_file_path
else:
raise argparse.ArgumentTypeError(f"File {abs_file_path} doesn't exist or isn't readable.")
def readable_directory(dir_path: str):
"""
Check for an existing and readable directory.
:param dir_path: path to directory
:return: valid readable directory path
"""
abs_dir_path = join(getcwd(), dir_path)
if isdir(abs_dir_path) and access(abs_dir_path, R_OK):
return abs_dir_path
else:
raise argparse.ArgumentTypeError(f"Directory {abs_dir_path} doesn't exist or isn't readable.")
def fxtran_executable(path: str):
"""
Check for valid fxtran path.
......@@ -83,8 +105,8 @@ def fxtran_executable(path: str):
try:
# Call 'fxtran -help' via subprocess
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
check_output(cmd, shell=True, stderr=STDOUT)
except CalledProcessError as e:
raise argparse.ArgumentTypeError(
f"Did not found fxtran. Command '{e.cmd}' returned error (code {e.returncode}): {e.output}. Provide valid path via -fx PATH or add to system PATH."
)
......@@ -92,6 +114,37 @@ def fxtran_executable(path: str):
return path
def process_recommended_metrics(metrics_table):
"""Process the recommended metrics passed from __main__.py."""
# Debug output of the recommended metrics
print("Debug Output: Recommended Metrics")
if not metrics_table:
print("No recommended metrics available.")
return
# Print each metric in a structured format
for index, metric in enumerate(metrics_table):
print(f"Metric {index + 1}: {metric}") # Adjust formatting as needed
# Get the current script's directory
current_dir = dirname(abspath(__file__))
# Define the path to the llm __main__.py file
llm_main_path = join(current_dir, 'llm')
# Define the arguments you want to pass to llm
args = ['--contextOnly', metrics_table] # Replace with your actual arguments
# Run the llm __main__.py file with arguments
result = run([executable, '-m', 'llm'] + args, cwd=llm_main_path)
# Check the result
if result.returncode != 0:
print("llm execution failed.")
else:
print("llm executed successfully.")
def main():
parser = argparse.ArgumentParser()
add_arguments(parser)
......@@ -106,9 +159,15 @@ def main():
tdd_server.sort_metric = args.metric
if args.file:
# Start cli code gen with given path
tdd_server.input_path = os.path.join(os.getcwd(), args.file)
# Start cli code gen for input path
tdd_server.input_path = join(getcwd(), args.file)
did_save(tdd_server, None)
elif args.source:
# Start cli code analysis for input path
tdd_server.input_path = join(getcwd(), args.source)
metrics_table = recommend_software_under_test(tdd_server)
# Pass the recommended metrics to the llm analyzer
process_recommended_metrics(metrics_table)
else:
if args.tcp:
......
......@@ -17,50 +17,48 @@ import logging
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
# Util imports
# Standard library imports
from logging import getLogger
from os.path import join, relpath
from shutil import rmtree
from typing import Callable, Generic
# Antlr4
from antlr4.tree.Tree import ParseTree
# User relative imports
# Package imports
from ..fxca.util.fxtran_utils import get_files, write_decorate_src_xml
from ..gen.python.TestSuite.TestSuiteParser import TestSuiteParser
from ..gen.python.TestSuite.TestSuiteVisitor import TestSuiteVisitor
from ..utils.calculate_complexity import calculate_metrics
from ..symboltable.symbol_table import MetricSymbol, SymbolTable, PathSymbol, SymbolTableOptions, P, T, TestCaseSymbol
from ..symboltable.symbol_table import MetricSymbol, SymbolTable, SymbolTableOptions, P, T, TestCaseSymbol
# Debug Log
logger = logging.getLogger(__name__)
LOGGER = getLogger(__name__)
class CalculateComplexityVisitor(TestSuiteVisitor, Generic[T]):
_symbol_table: SymbolTable
_test_path: str
class ComplexityMetricVisitor(TestSuiteVisitor, Generic[T]):
# Class constants
TEMP_DIR_NAME = "tmp"
XML_FILE_PATTERN = "*.[fF]90.xml"
SINGLE_QUOTE = "'"
def __init__(
self,
name: str = "",
test_work_path: str = "tdd-dsl/output",
fxtran_path: str = "fxtran",
sort_metric=None,
debug: bool = False,
debug_seperator: str = "\n"):
def __init__(self, name: str = "", source_root_directory: str = "tdd-dsl/output",
fxtran_command_path: str = "fxtran", sorting_metric=None,
debug_mode: bool = False, debug_separator: str = "\n"):
super().__init__()
self.sort_metric = sort_metric
self.fxtran_path = fxtran_path
self.sorting_metric = sorting_metric
self.fxtran_command_path = fxtran_command_path # Updated variable name
self._symbol_table = SymbolTable(name, SymbolTableOptions(False))
self._scope = None
self._test_work_path = test_work_path
self._test_path = ""
self._debug = debug
self._debug_seperator = debug_seperator
self._current_scope = None
self._source_root_directory = source_root_directory
self._source_path = ""
self._debug_mode = debug_mode
self._debug_separator = debug_separator
@property
def work_path(self) -> str:
return self._test_work_path
return self._source_root_directory
# Visit a parse tree produced by TestSuiteParser#testSuite.
def visitTestSuite(self, ctx: TestSuiteParser.TestSuiteContext):
......@@ -71,66 +69,66 @@ class CalculateComplexityVisitor(TestSuiteVisitor, Generic[T]):
def visitTestCase(self, ctx: TestSuiteParser.TestCaseContext):
return self.withScope(ctx, TestCaseSymbol, lambda: self.visitChildren(ctx), ctx.ID().getText())
# Return subdirectories under working path or user entered path
# Process the source path provided by the user or default to the working path
def visitSrcPath(self, ctx: TestSuiteParser.SrcPathContext):
# Strip string terminals
user_path: str = ctx.path.text.strip("\'") if ctx.path else ''
# TODO document
# Update source directory
# If the given path is an absolute path, then self._testPath is ignored and the joining is only the given path
self._test_path: str = os.path.join(self._test_work_path, user_path)
# TODO hc
xml_path = os.path.join(self._test_path, "tmp")
# Write XML files
write_decorate_src_xml(self._test_path, xml_path, fxtran_path=self.fxtran_path)
# Get Fortran files
xml_files = get_files(xml_path, "*.[fF]90.xml")
# Extract and clean the user-provided path
user_path = ctx.path.text.strip(self.SINGLE_QUOTE) if ctx.path else ''
# Return symbol table for client call via TestSuite
self.calculate_metrics_for(user_path)
def calculate_metrics_for(self, user_path):
# Update the source directory based on the user input
self._source_path = join(self._source_root_directory, user_path)
# Create a temporary directory for XML files
temp_xml_path = join(self._source_path, self.TEMP_DIR_NAME)
write_decorate_src_xml(self._source_path, temp_xml_path, fxtran_path=self.fxtran_command_path) # Updated variable name
# Retrieve Fortran XML files from the temporary directory
xml_files = get_files(temp_xml_path, self.XML_FILE_PATTERN)
for path, filename in xml_files:
src_filename: str = filename.rsplit(".", 1)[0]
rel_path = os.path.relpath(path, xml_path)
if rel_path != ".":
# Relative path exists
src_path: str = os.path.join(self._test_path, rel_path, src_filename)
else:
# Relative path is current dir and omitted
src_path: str = os.path.join(self._test_path, src_filename)
src_filename = filename.rsplit(".", 1)[0]
rel_path = relpath(path, temp_xml_path)
# Construct the source path using the relative path
src_path = join(self._source_path, rel_path, src_filename) if rel_path != "." else join(self._source_path, src_filename)
# Calculate metrics for the current XML file
scope_elements = calculate_metrics(
xml_path=os.path.join(
path,
filename),
xml_path=join(path, filename),
src=src_path,
sort_metric=self.sort_metric,
debug=self._debug,
debug_seperator=self._debug_seperator)
sort_metric=self.sorting_metric,
debug=self._debug_mode,
debug_separator=self._debug_separator
)
# Add the calculated metrics to the symbol table
for scope_name, scope in scope_elements.items():
if scope.is_testable:
self._symbol_table.add_new_symbol_of_type(MetricSymbol, self._scope, scope_name, scope)
self._symbol_table.add_new_symbol_of_type(MetricSymbol, self._current_scope, scope_name, scope)
# Clean up temporary XML files after processing
self._remove_temp_files(temp_xml_path)
# Return symboltable for cli call
return self._symbol_table
def _remove_temp_files(self, temp_xml_path: str):
"""Remove the temporary XML files directory."""
try:
# Remove temporary xml files
shutil.rmtree(xml_path)
rmtree(temp_xml_path)
except OSError as e:
logger.error("Error deleting temporary xml directory : %s - %s." % (e.filename, e.strerror))
LOGGER.error("Error deleting temporary XML directory: %s - %s.", e.filename, e.strerror)
def withScope(self, tree: ParseTree, t: type, action: Callable, *my_args: P.args or None, **my_kwargs: P.kwargs or None) -> T:
def withScope(self, tree: ParseTree, symbol_type: type, action: Callable, *args: P.args or None, **kwargs: P.kwargs or None) -> T:
"""
Visit a scoped symbol and recursively visit all symbols inside with the scoped symbol as scope
Visit a scoped symbol and recursively visit all symbols inside with the scoped symbol as scope.
:param tree: Context of the scoped symbol
:param t: Symbol type
:param action: Lambda function to add children symbols to symboltable
:param my_args: Arguments of symbol type
:param my_kwargs: named Arguments of symbol type
:param symbol_type: Type of the symbol
:param action: Lambda function to add child symbols to the symbol table
:param args: Arguments for the symbol type
:param kwargs: Named arguments for the symbol type
:return: Current scope
"""
scope = self._symbol_table.add_new_symbol_of_type(t, self._scope, *my_args, **my_kwargs)
scope = self._symbol_table.add_new_symbol_of_type(symbol_type, self._current_scope, *args, **kwargs)
scope.context = tree
self._scope = scope
self._current_scope = scope
try:
return action()
finally:
self._scope = scope.parent()
self._current_scope = scope.parent()
......@@ -40,7 +40,7 @@ from pygls.server import LanguageServer
from pygls.workspace import Document
# User relative imports
from .cst.calculate_complexity_visitor import CalculateComplexityVisitor
from .cst.calculate_complexity_visitor import ComplexityMetricVisitor
from .cst.cmake_file_generator_visitor import CMakeFileGeneratorVisitor
from .cst.f90_file_generator_visitor import F90FileGeneratorVisitor
from .cst.diagnostic_listener import DiagnosticListener
......@@ -63,7 +63,7 @@ class TDDLSPServer(LanguageServer):
CONFIGURATION_SECTION = "ODsl-TDD-DSL-Server"
# Number of SuT to return for metric calculation
N_SHOW_METRICS = 0
N_SHOW_METRICS = 3
SHOW_DEBUG_OUTPUT: bool = True
DEBUG_OUTPUT_SEPERATOR: str = "\t"
......@@ -242,17 +242,9 @@ def did_close(server: TDDLSPServer, params: DidCloseTextDocumentParams):
def did_save(server: TDDLSPServer, params: DidSaveTextDocumentParams):
"""Execute file generator and return text document did save notification."""
# LSP client or cli call
input_stream: InputStream
file_path: str
if params:
# LSP client
text_doc: Document = get_text_document(params)
file_path = path.abspath(text_doc.path)
input_stream = InputStream(text_doc.source)
else:
# cli
file_path = tdd_server.input_path
input_stream = FileStream(tdd_server.input_path)
text_doc: Document = get_text_document(params) if params else None
file_path: str = path.abspath(text_doc.path) if params else server.input_path
input_stream: InputStream = InputStream(text_doc.source) if params else FileStream(server.input_path)
server.parseTree = parse_stream(input_stream, server) # Launch parser
if server.parseTree:
......@@ -263,19 +255,19 @@ def did_save(server: TDDLSPServer, params: DidSaveTextDocumentParams):
rel_file_path: str = path.relpath(file_path, getcwd()) # Get relative input path for file generation
# Generate pf files
pf_file_generator_visitor: PFFileGeneratorVisitor = PFFileGeneratorVisitor(
work_path=getcwd(), files=tdd_server.files, symbol_table=symbol_table, rel_file_path=rel_file_path
work_path=getcwd(), files=server.files, symbol_table=symbol_table, rel_file_path=rel_file_path
)
tdd_server.files = pf_file_generator_visitor.visit(server.parseTree) # Write pf files and save generated files
server.files = pf_file_generator_visitor.visit(server.parseTree) # Write pf files and save generated files
# Generate F90 files
f90_file_generator_visitor: F90FileGeneratorVisitor = F90FileGeneratorVisitor(
work_path=getcwd(), files=tdd_server.files, symbol_table=symbol_table, rel_file_path=rel_file_path
work_path=getcwd(), files=server.files, symbol_table=symbol_table, rel_file_path=rel_file_path
)
tdd_server.files = f90_file_generator_visitor.visit(server.parseTree) # Update fortran file and save generated files
server.files = f90_file_generator_visitor.visit(server.parseTree) # Update fortran file and save generated files
# Generate CMake files
cmake_file_generator_visitor: CMakeFileGeneratorVisitor = CMakeFileGeneratorVisitor(
work_path=getcwd(), files=tdd_server.files, symbol_table=symbol_table
work_path=getcwd(), files=server.files, symbol_table=symbol_table
)
tdd_server.files = cmake_file_generator_visitor.visit(server.parseTree) # Update CMake files and save generated files
server.files = cmake_file_generator_visitor.visit(server.parseTree) # Update CMake files and save generated files
server.show_message("Text Document Did Save")
......@@ -333,27 +325,86 @@ def semantic_tokens(server: TDDLSPServer, params: SemanticTokensParams) -> Seman
return SemanticTokens(data=data)
def table_to_string(metrics_table, separator):
"""Converts the list of metric dictionaries into a formatted string."""
if not metrics_table:
return ""
# Extract headers from the first dictionary
headers = metrics_table[0].keys()
# Create a list to hold the string representation of the table
output_lines = []
# Add the header line
header_line = separator.join(headers)
output_lines.append(header_line)
# Add each row of values
for row in metrics_table:
row_values = [str(row[header]) for header in headers] # Convert each value to string
output_lines.append(separator.join(row_values)) # Join values with the separator
# Join all lines with newline characters
return "\n".join(output_lines)
@tdd_server.command(TDDLSPServer.CMD_RECOMMEND_SUT_BLOCKING)
def recommend_SUT(tdd_server: TDDLSPServer, *args):
"""Calculates the complexity of the SuTs in the path and returns test recommendations."""
calculate_complexity_visitor = CalculateComplexityVisitor(
name="paths", test_work_path=getcwd(), fxtran_path=tdd_server.fxtran_path,
sort_metric=tdd_server.sort_metric, debug=tdd_server.SHOW_DEBUG_OUTPUT,
debug_seperator=tdd_server.DEBUG_OUTPUT_SEPERATOR
def recommend_software_under_test(server: TDDLSPServer, *args):
"""Calculates the complexity of Software Under Test (SuT) in the input path and returns SuTs test recommendations."""
# Initialize a visitor to calculate complexity metrics
complexity_visitor = ComplexityMetricVisitor(
name="recommend",
source_root_directory=getcwd(),
fxtran_command_path=server.fxtran_path,
sorting_metric=server.sort_metric,
debug_mode=server.SHOW_DEBUG_OUTPUT,
debug_separator=server.DEBUG_OUTPUT_SEPERATOR
)
symbol_table = calculate_complexity_visitor.visit(tdd_server.parse_tree)
metric_list = suggest_symbols(symbol_table, position=None, symbol_type=MetricSymbol)
# Show the top N metrics as determined by the configuration
for metric in metric_list[:tdd_server.N_SHOW_METRICS]:
tdd_server.show_message(metric)
# Generate a symbol table based on cli or client call
if args:
symbol_table = complexity_visitor.visit(server.parse_tree)
else:
symbol_table = complexity_visitor.calculate_metrics_for(server.input_path)
# Suggest metrics based on the generated symbol table
recommended_metrics = suggest_symbols(symbol_table, position=None, symbol_type=MetricSymbol)
# Write debug output to file if enabled
if tdd_server.SHOW_DEBUG_OUTPUT:
metric_list.insert(0, tdd_server.DEBUG_HEADER)
debug_file_write(path.join(getcwd(), tdd_server.sort_metric), "\n".join(metric_list))
# Prepare the metrics table as a list of dictionaries
metrics_table = []
# Add the header to the metrics table
header = server.DEBUG_HEADER.split(server.DEBUG_OUTPUT_SEPERATOR) # Split header into list
metrics_table.append({header[i]: None for i in range(len(header))}) # Add empty row for headers
# Populate the metrics table with recommended metrics
for metric in recommended_metrics:
values = metric.split(server.DEBUG_OUTPUT_SEPERATOR) # Split metric into values
metrics_table.append({header[i]: values[i] for i in range(len(values))}) # Create a dictionary for each row
# If no arguments are given, return the metrics table
if not args:
return metrics_table # Return the list of dictionaries
# Display the top N metrics as configured in the server settings
for metric in recommended_metrics[:server.N_SHOW_METRICS]:
server.show_message(metric)
# Write debug output to a file if debugging is enabled
if server.SHOW_DEBUG_OUTPUT:
debug_file_path = path.join(getcwd(), server.sort_metric) # Define the debug file path
server.show_message(f"Written debug file: {debug_file_path}")
# Convert the metrics table to a string
metrics_string = table_to_string(metrics_table, server.DEBUG_OUTPUT_SEPERATOR)
# Write the formatted string to the debug file
debug_file_write(debug_file_path, metrics_string)
tdd_server.show_message(f"Recommend SuT by {tdd_server.sort_metric}...")
# Final message indicating the recommendation process is complete
server.show_message(f"Recommendations for Software Under Test (SuT) based on {server.sort_metric}...")
def debug_file_write(file_path: str = None, content: str = None):
......
......@@ -71,7 +71,7 @@ class Scope:
sort_metric: str = field(default="")
debug: bool = field(default=False)
debug_seperator: str = field(default="\n")
debug_separator: str = field(default="\n")
routine_types: Set = field(default_factory=lambda: {"function-stmt", "subroutine-stmt"})
module_types: Set = field(default_factory=lambda: {"module-stmt"})
......@@ -604,52 +604,52 @@ class Scope:
""" toString method """
if not self.debug:
return (f"ID: {self.name}{self.debug_seperator}"
f"Source: {self.src}{self.debug_seperator}"
return (f"ID: {self.name}{self.debug_separator}"
f"Source: {self.src}{self.debug_separator}"
)
elif self.debug_seperator == "\n":
return (f"Scope: {self.name}{self.debug_seperator}"
f"Source: {self.src}{self.debug_seperator}"
f"Cyclomatic Complexity: {self.cyclomatic_complexity}{self.debug_seperator}"
f"Depth of Nesting: {self.depth_of_nesting}{self.debug_seperator}"
f"Lines of Code (LOC): {self.loc}{self.debug_seperator}"
f"Number of Parameters: {self.n_arguments}{self.debug_seperator}"
f"Number of Conditionals: {self.n_conditionals}{self.debug_seperator}"
f"Number of Loops: {self.n_loops}{self.debug_seperator}"
f"Number of Branches: {self.n_branches}{self.debug_seperator}"
f"Number of Variables: {self.n_declarations}{self.debug_seperator}"
f"Number of Return Statements: {self.n_results}{self.debug_seperator}"
f"Number of Calls to External Functions/Procedures: {self.n_external_calls}{self.debug_seperator}"
f"Number of Decision Points: {self.n_decision_points}{self.debug_seperator}"
f"Halstead Complexity Measures:{self.debug_seperator}"
f"Number of distinct Operators ηT: {self.n_operators}{self.debug_seperator}"
f"Number of distinct Operands ηD: {self.n_operands}{self.debug_seperator}"
f"Number of total Operators NT: {self.sum_operators}{self.debug_seperator}"
f"Number of total Operands ND: {self.sum_operands}{self.debug_seperator}"
f"Ratio of Operators nNT: {self.ratio_operators}{self.debug_seperator}"
f"Ratio of Operands nND: {self.ratio_operands}{self.debug_seperator}"
f"Vocabulary (ηT + ηD): {self.vocabulary}{self.debug_seperator}"
f"Program Length (NT + ND): {self.program_length}{self.debug_seperator}"
f"Calculated Length: {self.calculated_length}{self.debug_seperator}"
f"Volume: {self.volume}{self.debug_seperator}"
f"Difficulty: {self.difficulty}{self.debug_seperator}"
f"Effort: {self.effort}{self.debug_seperator}"
f"Time required to program: {self.time_required_to_program}{self.debug_seperator}"
f"Number of delivered bugs: {self.n_bugs}{self.debug_seperator}"
# f"Distinct Operators: {self.operators}{self.debug_seperator}"
# f"Distinct Operands: {self.operands}{self.debug_seperator}"
f"Test Score:{self.debug_seperator}"
f"Testability Score: {self.testability_difficulty}{self.debug_seperator}"
f"Testability Index: {self.testability_index}{self.debug_seperator}"
f"Normalized Testability Score: {self.normalized_testability_difficulty}{self.debug_seperator}"
f"Aggregated Testability Score: {self.aggregated_testability_difficulty}{self.debug_seperator}"
f"Test Index: {self.test_index}{self.debug_seperator}"
f"Normalized Test Score: {self.normalized_test_score}{self.debug_seperator}"
f"Aggregated Test Score: {self.aggregated_test_score}{self.debug_seperator}"
elif self.debug_separator == "\n":
return (f"Scope: {self.name}{self.debug_separator}"
f"Source: {self.src}{self.debug_separator}"
f"Cyclomatic Complexity: {self.cyclomatic_complexity}{self.debug_separator}"
f"Depth of Nesting: {self.depth_of_nesting}{self.debug_separator}"
f"Lines of Code (LOC): {self.loc}{self.debug_separator}"
f"Number of Parameters: {self.n_arguments}{self.debug_separator}"
f"Number of Conditionals: {self.n_conditionals}{self.debug_separator}"
f"Number of Loops: {self.n_loops}{self.debug_separator}"
f"Number of Branches: {self.n_branches}{self.debug_separator}"
f"Number of Variables: {self.n_declarations}{self.debug_separator}"
f"Number of Return Statements: {self.n_results}{self.debug_separator}"
f"Number of Calls to External Functions/Procedures: {self.n_external_calls}{self.debug_separator}"
f"Number of Decision Points: {self.n_decision_points}{self.debug_separator}"
f"Halstead Complexity Measures:{self.debug_separator}"
f"Number of distinct Operators ηT: {self.n_operators}{self.debug_separator}"
f"Number of distinct Operands ηD: {self.n_operands}{self.debug_separator}"
f"Number of total Operators NT: {self.sum_operators}{self.debug_separator}"
f"Number of total Operands ND: {self.sum_operands}{self.debug_separator}"
f"Ratio of Operators nNT: {self.ratio_operators}{self.debug_separator}"
f"Ratio of Operands nND: {self.ratio_operands}{self.debug_separator}"
f"Vocabulary (ηT + ηD): {self.vocabulary}{self.debug_separator}"
f"Program Length (NT + ND): {self.program_length}{self.debug_separator}"
f"Calculated Length: {self.calculated_length}{self.debug_separator}"
f"Volume: {self.volume}{self.debug_separator}"
f"Difficulty: {self.difficulty}{self.debug_separator}"
f"Effort: {self.effort}{self.debug_separator}"
f"Time required to program: {self.time_required_to_program}{self.debug_separator}"
f"Number of delivered bugs: {self.n_bugs}{self.debug_separator}"
# f"Distinct Operators: {self.operators}{self.debug_separator}"
# f"Distinct Operands: {self.operands}{self.debug_separator}"
f"Test Score:{self.debug_separator}"
f"Testability Score: {self.testability_difficulty}{self.debug_separator}"
f"Testability Index: {self.testability_index}{self.debug_separator}"
f"Normalized Testability Score: {self.normalized_testability_difficulty}{self.debug_separator}"
f"Aggregated Testability Score: {self.aggregated_testability_difficulty}{self.debug_separator}"
f"Test Index: {self.test_index}{self.debug_separator}"
f"Normalized Test Score: {self.normalized_test_score}{self.debug_separator}"
f"Aggregated Test Score: {self.aggregated_test_score}{self.debug_separator}"
f"Test Factor: {self.test_factor}"
)
else:
return (self.debug_seperator.join(str(x) for x in [self.name,
return (self.debug_separator.join(str(x) for x in [self.name,
self.src,
self.cyclomatic_complexity,
self.depth_of_nesting,
......@@ -812,14 +812,14 @@ def add_operators_to(scope: Scope, element: ET.Element):
scope.add_operator(operator.text.strip())
def calculate_metrics(xml_path: str = None, src: str = None, sort_metric=None, debug: bool = False, debug_seperator: str = "\n") -> dict[str, Scope]:
def calculate_metrics(xml_path: str = None, src: str = None, sort_metric=None, debug: bool = False, debug_separator: str = "\n") -> dict[str, Scope]:
"""
Calculate cyclomatic complexity and halstead complexity measures from fxtran formatted xml file for Fortran code
:param xml_path: Path to fxtran xml files
:param src: Optional path to Fortran source files
:param sort_metric: Optional sort metric
:param debug: Optional debug output
:param debug_seperator: Optional seperator for debug output
:param debug_separator: Optional separator for debug output
:return: None
"""
......@@ -906,7 +906,7 @@ def calculate_metrics(xml_path: str = None, src: str = None, sort_metric=None, d
src=src,
sort_metric=sort_metric,
debug=debug,
debug_seperator=debug_seperator)
debug_separator=debug_separator)
# Add scope to parent
if current_scope:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment