Skip to content

Commit

Permalink
commit changes
Browse files Browse the repository at this point in the history
  • Loading branch information
snehas-05 committed Nov 5, 2024
1 parent 32ab3f5 commit 32e1852
Show file tree
Hide file tree
Showing 9 changed files with 228 additions and 0 deletions.
7 changes: 7 additions & 0 deletions model_evaluation/ models/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import joblib

def load_model(model_name):
return joblib.load(f'models/{model_name}.pkl')

def save_model(model, model_name):
joblib.dump(model, f'models/{model_name}.pkl')
40 changes: 40 additions & 0 deletions model_evaluation/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from flask import Flask, render_template, request, redirect, url_for, flash
from datasets.dataset_manager import DatasetManager
from evaluations.evaluator import ModelEvaluator
import os

app = Flask(__name__)
app.secret_key = 'your_secret_key' # Set a secret key for session management

dataset_manager = DatasetManager()
evaluator = ModelEvaluator()

@app.route('/')
def index():
datasets = dataset_manager.get_standard_datasets() # Get available datasets
return render_template('index.html', datasets=datasets)

@app.route('/upload', methods=['POST'])
def upload_dataset():
if request.method == 'POST':
file = request.files['dataset']
if file and dataset_manager.allowed_file(file.filename):
dataset_name = dataset_manager.save_uploaded_dataset(file)
flash(f'Dataset {dataset_name} uploaded successfully!', 'success')
else:
flash('Invalid file type. Please upload a valid dataset.', 'danger')
return redirect(url_for('index'))

@app.route('/evaluate', methods=['POST'])
def evaluate_model():
model_name = request.form['model_name']
dataset_name = request.form['dataset_name']
results = evaluator.evaluate(model_name, dataset_name)
if results:
return render_template('results.html', results=results)
else:
flash('Evaluation failed. Please check the model and dataset.', 'danger')
return redirect(url_for('index'))

if __name__ == '__main__':
app.run(debug=True)
19 changes: 19 additions & 0 deletions model_evaluation/datasets/dataset_manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import os
import pandas as pd

class DatasetManager:
ALLOWED_EXTENSIONS = {'csv', 'xlsx', 'json'} # Define allowed file extensions

def __init__(self):
self.standard_datasets = ['MNIST', 'CIFAR-10', 'IMDB'] # Placeholder for actual paths

def allowed_file(self, filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS

def get_standard_datasets(self):
return self.standard_datasets

def save_uploaded_dataset(self, file):
dataset_path = os.path.join('datasets', file.filename)
file.save(dataset_path)
return file.filename
40 changes: 40 additions & 0 deletions model_evaluation/evaluations/evaluator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import joblib
import pandas as pd
import os

class ModelEvaluator:
def evaluate(self, model_name, dataset_name):
model_path = f'models/{model_name}.pkl'

# Check if model file exists
if not os.path.exists(model_path):
print(f"Model {model_name} not found.")
return None

model = joblib.load(model_path) # Load model
X, y = self.load_data(dataset_name) # Load data

if X is None or y is None:
return None

y_pred = model.predict(X)

metrics = {
'accuracy': accuracy_score(y, y_pred),
'precision': precision_score(y, y_pred, average='weighted'),
'recall': recall_score(y, y_pred, average='weighted'),
'f1_score': f1_score(y, y_pred, average='weighted')
}
return metrics

def load_data(self, dataset_name):
# Implement data loading based on the dataset type
dataset_path = os.path.join('datasets', dataset_name)
if dataset_name.endswith('.csv'):
data = pd.read_csv(dataset_path)
X = data.iloc[:, :-1].values # Features (all but last column)
y = data.iloc[:, -1].values # Labels (last column)
return X, y
# Add more conditions to load other dataset types if necessary
return None, None
4 changes: 4 additions & 0 deletions model_evaluation/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Flask
scikit-learn
joblib
pandas
37 changes: 37 additions & 0 deletions model_evaluation/static/script.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// Function to automatically hide flash messages after a few seconds
document.addEventListener("DOMContentLoaded", () => {
const flashMessages = document.querySelectorAll(".flash-message");
flashMessages.forEach(msg => {
setTimeout(() => {
msg.style.display = "none";
}, 5000); // Adjust the timeout as needed (5000ms = 5 seconds)
});
});

// Form validation for dataset upload and model evaluation
document.addEventListener("DOMContentLoaded", () => {
const uploadForm = document.querySelector("form[action='/upload']");
const evaluateForm = document.querySelector("form[action='/evaluate']");

if (uploadForm) {
uploadForm.addEventListener("submit", (e) => {
const fileInput = uploadForm.querySelector("input[type='file']");
if (!fileInput.value) {
alert("Please select a dataset file to upload.");
e.preventDefault();
}
});
}

if (evaluateForm) {
evaluateForm.addEventListener("submit", (e) => {
const modelSelect = evaluateForm.querySelector("select[name='model_name']");
const datasetSelect = evaluateForm.querySelector("select[name='dataset_name']");

if (!modelSelect.value || !datasetSelect.value) {
alert("Please select both a model and a dataset to evaluate.");
e.preventDefault();
}
});
}
});
24 changes: 24 additions & 0 deletions model_evaluation/static/style.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
body {
font-family: Arial, sans-serif;
}

h1 {
color: #333;
}

form {
margin: 20px 0;
}

input, select, button {
padding: 10px;
margin: 5px 0;
}

.success {
color: green;
}

.danger {
color: red;
}
40 changes: 40 additions & 0 deletions model_evaluation/templates/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Model Evaluation</title>
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
</head>
<body>
<h1>Model Evaluation and Benchmarking</h1>

{% with messages = get_flashed_messages(with_categories=true) %}
{% if messages %}
<ul>
{% for category, message in messages %}
<li class="{{ category }}">{{ message }}</li>
{% endfor %}
</ul>
{% endif %}
{% endwith %}

<form action="/upload" method="post" enctype="multipart/form-data">
<input type="file" name="dataset" required>
<button type="submit">Upload Dataset</button>
</form>

<form action="/evaluate" method="post">
<select name="model_name" required>
<option value="model1">Model 1</option>
<option value="model2">Model 2</option>
<!-- Add model options dynamically or statically -->
</select>
<select name="dataset_name" required>
{% for dataset in datasets %}
<option value="{{ dataset }}">{{ dataset }}</option>
{% endfor %}
</select>
<button type="submit">Evaluate Model</button>
</form>
</body>
</html>
17 changes: 17 additions & 0 deletions model_evaluation/templates/results.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Evaluation Results</title>
</head>
<body>
<h1>Evaluation Results</h1>
<ul>
<li>Accuracy: {{ results['accuracy'] }}</li>
<li>Precision: {{ results['precision'] }}</li>
<li>Recall: {{ results['recall'] }}</li>
<li>F1 Score: {{ results['f1_score'] }}</li>
</ul>
<a href="/">Back</a>
</body>
</html>

0 comments on commit 32e1852

Please sign in to comment.