Skip to content
Snippets Groups Projects
Commit 77338a13 authored by Sven Gundlach's avatar Sven Gundlach
Browse files

Add metrics file forward pass

parent 36de779a
No related branches found
No related tags found
No related merge requests found
Pipeline #16634 passed
Scope Source Cyclomatic Complexity Depth LOC Parameters Conditionals Loops Branches Variables Returns Calls Decision Points Halstead Complexity (d)Operators ηT (d)Operands ηD (t)Operators NT (t)Operands ND Operator ratio nNT Operands ratio nND Vocabulary (ηT + ηD) Program Length (NT + ND) Calculated Length Volume Difficulty Effort Time to program delivered bugs Test Score Testability Difficulty Testability Index Normalized Testability Difficulty Aggregated Testability Difficulty Test Index Normalized Test Score Aggregated Test Score Test Factor
additional_use_0 /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/additional_use_0.f90 1 0 3 0 0 0 0 0 0 0 0 6 0 7 0 1.1666666666666667 0 6 7 15.509775004326936 18.094737505048094 0 0.0 0.0 0.0 3.981666666666667 0.2007360321177651 1.3272222222222223 0.2342156862745098 0.0 0.0 0.0 0.0
additional_use_1 /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/additional_use_1.f90 1 0 3 0 0 0 0 0 0 0 0 6 0 7 0 1.1666666666666667 0 6 7 15.509775004326936 18.094737505048094 0 0.0 0.0 0.0 3.981666666666667 0.2007360321177651 1.3272222222222223 0.2342156862745098 0.0 0.0 0.0 0.0
shape_area /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 3 1 0 0 0 2 0 0 0 8 2 12 2 1.5 1.0 10 14 26.0 46.50699332842307 4.0 186.02797331369229 10.334887406316238 0.010862528153285396 4.945 0.16820857863751051 1.6483333333333334 0.2908823529411765 0.0018075360102744163 0.017712662939482517 0.0031257640481439732 0.002564630448945443
s_bar /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 2 1 0 0 0 1 0 0 0 10 1 13 1 1.3 1.0 11 14 33.219280948873624 48.432042660922164 5.0 242.1602133046108 13.453345183589489 0.012950323152584048 3.9209999999999994 0.20321072952651903 1.9604999999999997 0.23064705882352937 0.0025979996800340193 0.025064514972089637 0.0029487664673046635 0.004007760634936491
s_foo /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 2 1 0 0 0 1 0 0 0 10 1 13 1 1.3 1.0 11 14 33.219280948873624 48.432042660922164 5.0 242.1602133046108 13.453345183589489 0.012950323152584048 3.9209999999999994 0.20321072952651903 1.9604999999999997 0.23064705882352937 0.0025979996800340193 0.025064514972089637 0.0029487664673046635 0.004007760634936491
loop_sum /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 7 0 19 3 6 6 2 4 5 1 8 19 8 50 18 2.6315789473684212 2.25 27 68 104.71062275542812 323.33235014711585 21.375 6911.228984394601 383.9571657997001 0.12094342600424289 27.35171052631579 0.03527124048024578 1.4395637119113573 1.6089241486068113 0.0038055664221222805 0.1553207443221004 0.17359377306587695 0.004992795816227588
get_sum0 /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 3 2 0 0 0 3 1 1 0 9 4 15 7 1.6666666666666667 1.75 13 22 36.529325012980806 81.40967379910403 7.875 641.1011811679442 35.616732287108015 0.024783528032634616 5.949166666666666 0.14390214654035258 1.9830555555555553 0.34995098039215683 0.0034801524274944923 0.04795853134538097 0.008463270237420172 0.00549639982819632
cfo_example /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 8 1 75 19 7 6 4 48 16 3 10 44 62 335 116 7.613636363636363 1.8709677419354838 106 451 609.3751624640273 3034.2921250080026 41.16129032258064 124895.37908097454 6938.632171165253 0.8328682865844182 116.88392228739002 0.00848292100055929 1.5584522971652002 6.875524840434707 0.003854699178702489 0.7081717240480763 3.12428701785916 0.005678668563152091
fT_ME /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 4 1 0 0 0 2 1 0 0 12 6 21 8 1.75 1.3333333333333333 18 29 58.529325012980806 120.92782504182705 8.0 967.4226003346164 53.7457000185898 0.032605402408605454 7.435833333333334 0.1185419342092265 1.8589583333333335 0.43740196078431376 0.0037430633794581273 0.05869820589528508 0.01381134256359649 0.006003014985781784
plus_two /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 4 3 0 0 0 3 2 0 0 10 5 21 9 2.1 1.8 15 30 44.82892142331043 117.20671786825557 9.0 1054.8604608143 58.60335893412778 0.03454157115559581 7.6530000000000005 0.11556685542586385 1.9132500000000001 0.4501764705882353 0.003858579366184362 0.06388014059176386 0.015030621315709142 0.006103891637219979
get_sum1 /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 4 2 0 0 0 3 1 1 0 12 4 21 7 1.75 1.75 16 28 51.01955000865387 112.0 10.5 1176.0 65.33333333333333 0.037137867273699644 7.494999999999999 0.11771630370806357 1.8737499999999998 0.4408823529411764 0.004215189321505027 0.06709530236998952 0.015787129969409296 0.0066311172752194815
get_sum2 /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 2 0 8 2 1 0 2 3 2 0 2 18 4 34 9 1.8888888888888888 2.25 22 43 83.05865002596161 191.75555960140377 20.25 3883.0500819284266 215.72500455157925 0.08234945701069864 13.059722222222224 0.0711251605255359 1.632465277777778 0.7682189542483661 0.00541148545983773 0.12420445942210188 0.05844915737510677 0.007876188651647305
zxrex /home/sgu/IdeaProjects/python-oceandsls/tdd-dsl/input/tdd_dev/UVic_example_opem/cfo_example.f90 1 0 8 3 0 0 0 4 4 0 0 14 5 46 23 3.2857142857142856 4.6 19 69 64.91260938324326 293.1069984276074 32.199999999999996 9438.045349368957 524.3358527427198 0.14886757655663466 14.722 0.06360513929525506 1.84025 0.866 0.008241805353939348 0.23845529576127097 0.1122142568288334 0.013483627864165914
\ No newline at end of file
...@@ -114,17 +114,14 @@ def fxtran_executable(path: str): ...@@ -114,17 +114,14 @@ def fxtran_executable(path: str):
return path return path
def process_recommended_metrics(metrics_table): def process_recommended_metrics(metrics_path):
"""Process the recommended metrics passed from __main__.py.""" """Process the recommended metrics passed from __main__.py."""
# Debug output of the recommended metrics
print("Debug Output: Recommended Metrics") # Check for metrics
if not metrics_table: if not metrics_path:
print("No recommended metrics available.") print("No recommended metrics available.")
return return
readable_file(metrics_path)
# Print each metric in a structured format
for index, metric in enumerate(metrics_table):
print(f"Metric {index + 1}: {metric}") # Adjust formatting as needed
# Get the current script's directory # Get the current script's directory
current_dir = dirname(abspath(__file__)) current_dir = dirname(abspath(__file__))
...@@ -133,7 +130,7 @@ def process_recommended_metrics(metrics_table): ...@@ -133,7 +130,7 @@ def process_recommended_metrics(metrics_table):
llm_main_path = join(current_dir, 'llm') llm_main_path = join(current_dir, 'llm')
# Define the arguments you want to pass to llm # Define the arguments you want to pass to llm
args = ['--contextOnly', metrics_table] # Replace with your actual arguments args = ['--contextOnly', metrics_path] # Replace with your actual arguments
# Run the llm __main__.py file with arguments # Run the llm __main__.py file with arguments
result = run([executable, '-m', 'llm'] + args, cwd=llm_main_path) result = run([executable, '-m', 'llm'] + args, cwd=llm_main_path)
...@@ -144,7 +141,6 @@ def process_recommended_metrics(metrics_table): ...@@ -144,7 +141,6 @@ def process_recommended_metrics(metrics_table):
else: else:
print("llm executed successfully.") print("llm executed successfully.")
def main(): def main():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
add_arguments(parser) add_arguments(parser)
...@@ -165,9 +161,9 @@ def main(): ...@@ -165,9 +161,9 @@ def main():
elif args.source: elif args.source:
# Start cli code analysis for input path # Start cli code analysis for input path
tdd_server.input_path = join(getcwd(), args.source) tdd_server.input_path = join(getcwd(), args.source)
metrics_table = recommend_software_under_test(tdd_server) metrics_path = recommend_software_under_test(tdd_server)
# Pass the recommended metrics to the llm analyzer # Pass the recommended metrics to the llm analyzer
process_recommended_metrics(metrics_table) process_recommended_metrics(metrics_path)
else: else:
if args.tcp: if args.tcp:
......
Subproject commit 3c938efc4512ad0e01d225a70fee7faa40fc6ce1 Subproject commit 14644fc4aafe06604fee44d3d8ac5bbe80e885fd
...@@ -324,31 +324,6 @@ def semantic_tokens(server: TDDLSPServer, params: SemanticTokensParams) -> Seman ...@@ -324,31 +324,6 @@ def semantic_tokens(server: TDDLSPServer, params: SemanticTokensParams) -> Seman
return SemanticTokens(data=data) return SemanticTokens(data=data)
def table_to_string(metrics_table, separator):
"""Converts the list of metric dictionaries into a formatted string."""
if not metrics_table:
return ""
# Extract headers from the first dictionary
headers = metrics_table[0].keys()
# Create a list to hold the string representation of the table
output_lines = []
# Add the header line
header_line = separator.join(headers)
output_lines.append(header_line)
# Add each row of values
for row in metrics_table:
row_values = [str(row[header]) for header in headers] # Convert each value to string
output_lines.append(separator.join(row_values)) # Join values with the separator
# Join all lines with newline characters
return "\n".join(output_lines)
@tdd_server.command(TDDLSPServer.CMD_RECOMMEND_SUT_BLOCKING) @tdd_server.command(TDDLSPServer.CMD_RECOMMEND_SUT_BLOCKING)
def recommend_software_under_test(server: TDDLSPServer, *args): def recommend_software_under_test(server: TDDLSPServer, *args):
"""Calculates the complexity of Software Under Test (SuT) in the input path and returns SuTs test recommendations.""" """Calculates the complexity of Software Under Test (SuT) in the input path and returns SuTs test recommendations."""
...@@ -372,38 +347,17 @@ def recommend_software_under_test(server: TDDLSPServer, *args): ...@@ -372,38 +347,17 @@ def recommend_software_under_test(server: TDDLSPServer, *args):
# Suggest metrics based on the generated symbol table # Suggest metrics based on the generated symbol table
recommended_metrics = suggest_symbols(symbol_table, position=None, symbol_type=MetricSymbol) recommended_metrics = suggest_symbols(symbol_table, position=None, symbol_type=MetricSymbol)
# Prepare the metrics table as a list of dictionaries # Write output to file if debug enabled or passed to llm analysis
metrics_table = [] if tdd_server.SHOW_DEBUG_OUTPUT or not args:
recommended_metrics.insert(0, tdd_server.DEBUG_HEADER)
# Add the header to the metrics table file_path = path.join(getcwd(), tdd_server.sort_metric)
header = server.DEBUG_HEADER.split(server.DEBUG_OUTPUT_SEPERATOR) # Split header into list debug_file_write(file_path, "\n".join(recommended_metrics))
metrics_table.append({header[i]: None for i in range(len(header))}) # Add empty row for headers return file_path
# Populate the metrics table with recommended metrics
for metric in recommended_metrics:
values = metric.split(server.DEBUG_OUTPUT_SEPERATOR) # Split metric into values
metrics_table.append({header[i]: values[i] for i in range(len(values))}) # Create a dictionary for each row
# If no arguments are given, return the metrics table
if not args:
return metrics_table # Return the list of dictionaries
# Display the top N metrics as configured in the server settings
for metric in recommended_metrics[:server.N_SHOW_METRICS]:
server.show_message(metric)
# Write debug output to a file if debugging is enabled
if server.SHOW_DEBUG_OUTPUT:
debug_file_path = path.join(getcwd(), server.sort_metric) # Define the debug file path
server.show_message(f"Written debug file: {debug_file_path}")
# Convert the metrics table to a string
metrics_string = table_to_string(metrics_table, server.DEBUG_OUTPUT_SEPERATOR)
# Write the formatted string to the debug file # Show the top N metrics as determined by the configuration
debug_file_write(debug_file_path, metrics_string) for metric in recommended_metrics[:tdd_server.N_SHOW_METRICS]:
tdd_server.show_message(metric)
# Final message indicating the recommendation process is complete
server.show_message(f"Recommendations for Software Under Test (SuT) based on {server.sort_metric}...") server.show_message(f"Recommendations for Software Under Test (SuT) based on {server.sort_metric}...")
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment