AshmithaIRRI commited on
Commit
8d8b089
·
verified ·
1 Parent(s): 56bc087

create app.py

Browse files
Files changed (1) hide show
  1. app.py +503 -0
app.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Wed Jan 15 10:25:34 2025
4
+
5
+ @author: Ashmitha
6
+ """
7
+
8
+ #-----------------------------------------------------------Libraries----------------------------------------------------------------------------
9
+ import pandas as pd
10
+ import numpy as np
11
+ import gradio as gr
12
+ from sklearn.metrics import mean_squared_error,r2_score
13
+ from scipy.stats import pearsonr
14
+ from sklearn.preprocessing import StandardScaler
15
+ from sklearn.model_selection import KFold
16
+ import tensorflow as tf
17
+ from tensorflow.keras.models import Sequential
18
+ from tensorflow.keras.layers import GRU,Dense,Dropout,BatchNormalization,LeakyReLU
19
+ from tensorflow.keras.optimizers import Adam
20
+ from tensorflow.keras import regularizers
21
+ from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
22
+ import os
23
+ from sklearn.preprocessing import MinMaxScaler
24
+ from keras.layers import Conv1D,MaxPooling1D,Dense,Flatten,Dropout,LeakyReLU
25
+ from keras.callbacks import ReduceLROnPlateau,EarlyStopping
26
+ from sklearn.ensemble import RandomForestRegressor
27
+ from xgboost import XGBRegressor
28
+ import io
29
+ from sklearn.feature_selection import SelectFromModel
30
+ import tempfile
31
+ import pyinstaller
32
+
33
+ #--------------------------------Random Forest for Feature selection-------------------------------------------
34
+ def RandomForestFeatureSelection(trainX, trainy,num_features=60):
35
+ rf=RandomForestRegressor(n_estimators=1000,random_state=50)
36
+ rf.fit(trainX,trainy)
37
+ importances=rf.feature_importances_
38
+ indices=np.argsort(importances)[-num_features:]
39
+ return indices
40
+ #------------------------------------------------------------------GRU model--------------------------------------------------
41
+ def GRUModel(trainX,trainy,testX,testy,epochs=1000,batch_size=64,learning_rate=0.0001,l1_reg=0.001,l2_reg=0.001,dropout_rate=0.2,feature_selection=True):
42
+ if feature_selection:
43
+ rf=RandomForestRegressor(n_estimators=100,random_state=42)
44
+ rf.fit(trainX,trainy)
45
+ selector=SelectFromModel(rf,threshold="mean",prefit=True)
46
+ trainX=selector.transform(trainX)
47
+ if testX is not None:
48
+ testX=selector.transform(testX)
49
+ print(f"Selected {trainX.shape[1]} features based on feature importance")
50
+ scaler=MinMaxScaler()
51
+ trainX_scaled=scaler.fit_transform(trainX)
52
+ if testX is not None:
53
+ testX_scaled=scaler.transform(testX)
54
+ target_scaler=MinMaxScaler()
55
+ trainy_scaled=target_scaler.fit_transform(trainy.reshape(-1,1))
56
+ trainX=trainX_scaled.reshape((trainX.shape[0],1,trainX.shape[1]))
57
+ if testX is not None:
58
+ testX=testX_scaled.reshape((testX.shape[0],1,testX.shape[1]))
59
+ model=Sequential()
60
+ model.add(GRU(512, input_shape=(trainX.shape[1],trainX.shape[2]), return_sequences=False,kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
61
+ model.add(Dense(256,kernel_initializer='he_normal',kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
62
+ model.add(BatchNormalization())
63
+ model.add(Dropout(dropout_rate))
64
+ model.add(LeakyReLU(alpha=0.1))
65
+
66
+ model.add(Dense(128,kernel_initializer="he_normal",kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
67
+ model.add(BatchNormalization())
68
+ model.add(Dropout(dropout_rate))
69
+ model.add(LeakyReLU(alpha=0.1))
70
+
71
+ model.add(Dense(64,kernel_initializer='he_normal',kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
72
+ model.add(BatchNormalization())
73
+ model.add(Dropout(dropout_rate))
74
+ model.add(LeakyReLU(alpha=0.1))
75
+
76
+ model.add(Dense(32,kernel_initializer='he_normal',kernel_regularizer=regularizers.l1_l2(l1=l1_reg,l2=l2_reg)))
77
+ model.add(BatchNormalization())
78
+ model.add(Dropout(dropout_rate))
79
+ model.add(LeakyReLU(alpha=0.1))
80
+
81
+ model.add(Dense(1,activation="relu"))
82
+ model.compile(loss="mse",optimizer=Adam(learning_rate=learning_rate),metrics=["mse"])
83
+ learning_rate_reduction=ReduceLROnPlateau(monitor="val_loss",patience=10,verbose=1,factor=0.5,min_lr=1e-6)
84
+ early_stopping=EarlyStopping(monitor='val_loss',verbose=1,restore_best_weights=True,patience=10)
85
+ history = model.fit(trainX, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,
86
+ callbacks=[learning_rate_reduction, early_stopping])
87
+ predicted_train=model.predict(trainX)
88
+ predicted_test=model.predict(testX) if testX is not None else None
89
+ predicted_train=model.predict(trainX)
90
+ predicted_test=model.predict(testX) if testX is not None else None
91
+ predicted_train=predicted_train.flatten()
92
+ if predicted_test is not None:
93
+ predicted_test =predicted_test.flatten()
94
+ else:
95
+ predicted_test=np.zeros_like(predicted_train)
96
+ predicted_train=target_scaler.inverse_transform(predicted_train.reshape(-1,1)).flatten()
97
+ if predicted_test is not None:
98
+ predicted_test=target_scaler.inverse_transform(predicted_test.reshape(-1,1).flatten())
99
+ return predicted_train.predicted_test,history
100
+ #----------------------------------------------------CNN-----------------------------------------------
101
+ def CNNModel(trainX,trainy,testX,testy,epochs=1000,batch_size=64,learning_rate=0.0001,l1_reg=0.0001,l2_reg=0.0001,dropout_rate=0.3,feature_selection=True):
102
+ if feature_selection:
103
+ rf=RandomForestRegressor(n_estimators=100,random_state=42)
104
+ rf.fit(trainX,trainy)
105
+ selector=SelectFromModel(rf,threshold="mean",prefit=True)
106
+ trainX=selector.transform(trainX)
107
+ if testX is not None:
108
+ testX=selector.transform(testX)
109
+ print(f"Selected {trainX.shape[1]} feature based on the importance feature")
110
+ scaler=MinMaxScaler()
111
+ trainX_scaled=scaler.fit.transform(trainX)
112
+ if testX is not None:
113
+ testX_scaled=scaler.transfom(testX)
114
+ trainX=trainX_scaled.reshape((trainX.shape[0], trainX.shape[1],1))
115
+ if testX is not None:
116
+ testX = testX_scaled.reshape((testX.shape[0]),testX.shape[1],1)
117
+ model=Sequential()
118
+ model.add(Conv1D(512, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
119
+ model.add(MaxPooling1D(pool_size=2))
120
+ model.add(Dropout(dropout_rate))
121
+
122
+ model.add(Conv1D(256, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
123
+ model.add(MaxPooling1D(pool_size=2))
124
+ model.add(Dropout(dropout_rate))
125
+
126
+ model.add(Conv1D(128, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
127
+ model.add(MaxPooling1D(pool_size=2))
128
+ model.add(Dropout(dropout_rate))
129
+
130
+ model.add(Flatten())
131
+ model.add(Dense(64, kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
132
+ model.add(LeakyReLU(alpha=0.1))
133
+ model.add(Dropout(dropout_rate))
134
+
135
+ model.add(Dense(1, activation='linear'))
136
+
137
+
138
+ model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
139
+
140
+
141
+ learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=1, factor=0.5, min_lr=1e-6)
142
+ early_stopping = EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=True, patience=10)
143
+
144
+
145
+ history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,
146
+ callbacks=[learning_rate_reduction, early_stopping])
147
+
148
+ predicted_train = model.predict(trainX).flatten()
149
+ predicted_test = model.predict(testX).flatten() if testX is not None else None
150
+
151
+ return predicted_train, predicted_test, history
152
+ #-------------------------------------------------------------------RFModel---------------------------------------------------------
153
+
154
+ def RFModel(trainX, trainy, testX, testy, n_estimators=100, max_depth=None,feature_selection=True):
155
+ if feature_selection:
156
+ rf=RandomForestRegressor(n_estimators=100, random_state=42)
157
+ rf.fit(trainX, trainy)
158
+ selector=SelectFromModel(rf, threshold="mean", prefit=True)
159
+ trainX=selector.transform(trainX)
160
+ if testX is not None:
161
+ testX=selector.transform(testX)
162
+ print(f"Selected {trainX.shape[1]} feature based on the feature selection")
163
+
164
+
165
+
166
+
167
+
168
+ scaler = MinMaxScaler()
169
+ trainX_scaled = scaler.fit_transform(trainX)
170
+ if testX is not None:
171
+ testX_scaled = scaler.transform(testX)
172
+
173
+
174
+ rf_model = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, random_state=42)
175
+ history=rf_model.fit(trainX_scaled, trainy)
176
+
177
+
178
+
179
+ predicted_train = rf_model.predict(trainX_scaled)
180
+ predicted_test = rf_model.predict(testX_scaled) if testX is not None else None
181
+
182
+ return predicted_train, predicted_test,history
183
+ #------------------------------------------------------------------------------XGboost---------------------------------------------------------------
184
+ def XGBoostModel(trainX, trainy, testX, testy,learning_rate,min_child_weight,feature_selection=True, n_estimators=100, max_depth=None):
185
+ if feature_selection:
186
+ rf=RandomForestRegressor(n_estimators=100,random_state=42)
187
+ rf.fit(trainX,trainy)
188
+ selector=SelectFromModel(rf,threshold="mean",prefit=True)
189
+ trainX=selector.transform(trainX)
190
+ if testX is not None:
191
+ testX=selector.transform(testX)
192
+ print(f"Selected {trainX.shape[1]} features based on feature importance")
193
+
194
+
195
+
196
+
197
+ scaler = MinMaxScaler()
198
+ trainX_scaled = scaler.fit_transform(trainX)
199
+ if testX is not None:
200
+ testX_scaled = scaler.transform(testX)
201
+
202
+
203
+ xgb_model=XGBRegressor(objective="reg:squarederror",random_state=42)
204
+ history=xgb_model.fit(trainX, trainy)
205
+ param_grid={
206
+ "learning_rate":0.01,
207
+ "max_depth" : 10,
208
+ "n_estimators": 100,
209
+ "min_child_weight": 5
210
+ }
211
+
212
+
213
+ # Predictions
214
+ predicted_train = xgb_model.predict(trainX_scaled)
215
+ predicted_test = xgb_model.predict(testX_scaled) if testX is not None else None
216
+
217
+
218
+ return predicted_train, predicted_test,history
219
+
220
+
221
+
222
+
223
+
224
+
225
+ #----------------------------------------reading file----------------------------------------------------------------------------------------
226
+
227
+
228
+
229
+
230
+
231
+
232
+ def read_csv_file(uploaded_file):
233
+ if uploaded_file is not None:
234
+ if hasattr(uploaded_file, 'data'):
235
+ return pd.read_csv(io.BytesIO(uploaded_file.data))
236
+ elif hasattr(uploaded_file, 'name'):
237
+ return pd.read_csv(uploaded_file.name)
238
+ return None
239
+
240
+
241
+ #-----------------------------------------------------------------calculate topsis score--------------------------------------------------------
242
+
243
+
244
+ def calculate_topsis_score(df):
245
+
246
+ metrics = df[['Train_MSE', 'Train_RMSE', 'Train_R2', 'Train_Corr']].dropna() # Ensure no NaN values
247
+ norm_metrics = metrics / np.sqrt((metrics ** 2).sum(axis=0))
248
+
249
+
250
+ ideal_best = pd.Series(index=norm_metrics.columns)
251
+ ideal_worst = pd.Series(index=norm_metrics.columns)
252
+
253
+
254
+ for col in ['Train_MSE', 'Train_RMSE']:
255
+ ideal_best[col] = norm_metrics[col].min()
256
+ ideal_worst[col] = norm_metrics[col].max()
257
+
258
+
259
+ for col in ['Train_R2', 'Train_Corr']:
260
+ ideal_best[col] = norm_metrics[col].max()
261
+ ideal_worst[col] = norm_metrics[col].min()
262
+
263
+
264
+ dist_to_best = np.sqrt(((norm_metrics - ideal_best) ** 2).sum(axis=1))
265
+ dist_to_worst = np.sqrt(((norm_metrics - ideal_worst) ** 2).sum(axis=1))
266
+
267
+
268
+ topsis_score = dist_to_worst / (dist_to_best + dist_to_worst)
269
+ df['TOPSIS_Score'] = np.nan
270
+ df.loc[metrics.index, 'TOPSIS_Score'] = topsis_score # Assign TOPSIS scores
271
+ return df
272
+
273
+ #--------------------------------------------------- Nested Cross validation---------------------------------------------------------------------------
274
+
275
+ def NestedKFoldCrossValidation(training_data, training_additive, testing_data, testing_additive,
276
+ training_dominance, testing_dominance, epochs,learning_rate,min_child_weight, batch_size=64,
277
+ outer_n_splits=2, inner_n_splits=2, output_file='cross_validation_results.csv',
278
+ predicted_phenotype_file='predicted_phenotype.csv', feature_selection=True):
279
+
280
+ if 'phenotypes' not in training_data.columns:
281
+ raise ValueError("Training data does not contain the 'phenotypes' column.")
282
+
283
+
284
+ training_additive = training_additive.iloc[:, 1:]
285
+ testing_additive = testing_additive.iloc[:, 1:]
286
+ training_dominance = training_dominance.iloc[:, 1:]
287
+ testing_dominance = testing_dominance.iloc[:, 1:]
288
+
289
+ # Merge training and testing data with additive and dominance components
290
+ training_data_merged = pd.concat([training_data, training_additive, training_dominance], axis=1)
291
+ testing_data_merged = pd.concat([testing_data, testing_additive, testing_dominance], axis=1)
292
+
293
+ phenotypic_info = training_data['phenotypes'].values
294
+ phenotypic_test_info = testing_data['phenotypes'].values if 'phenotypes' in testing_data.columns else None
295
+ sample_ids = testing_data.iloc[:, 0].values
296
+
297
+ training_genotypic_data_merged = training_data_merged.iloc[:, 2:].values
298
+ testing_genotypic_data_merged = testing_data_merged.iloc[:, 2:].values
299
+
300
+
301
+ if feature_selection:
302
+ rf = RandomForestRegressor(n_estimators=100, random_state=42)
303
+ rf.fit(training_genotypic_data_merged, phenotypic_info)
304
+ selector = SelectFromModel(rf, threshold="mean", prefit=True)
305
+ training_genotypic_data_merged = selector.transform(training_genotypic_data_merged)
306
+ testing_genotypic_data_merged = selector.transform(testing_genotypic_data_merged)
307
+ print(f"Selected {training_genotypic_data_merged.shape[1]} features based on importance.")
308
+
309
+
310
+ scaler = StandardScaler()
311
+ training_genotypic_data_merged = scaler.fit_transform(training_genotypic_data_merged)
312
+ testing_genotypic_data_merged = scaler.transform(testing_genotypic_data_merged)
313
+
314
+ outer_kf = KFold(n_splits=outer_n_splits)
315
+
316
+ results = []
317
+ all_predicted_phenotypes = []
318
+
319
+ def calculate_metrics(true_values, predicted_values):
320
+ mse = mean_squared_error(true_values, predicted_values)
321
+ rmse = np.sqrt(mse)
322
+ r2 = r2_score(true_values, predicted_values)
323
+ corr = pearsonr(true_values, predicted_values)[0]
324
+ return mse, rmse, r2, corr
325
+
326
+ models = [
327
+ ('GRUModel', GRUModel),
328
+ ('CNNModel', CNNModel),
329
+ ('RFModel', RFModel),
330
+ ('XGBoostModel', XGBoostModel)
331
+ ]
332
+
333
+ for outer_fold, (outer_train_index, outer_test_index) in enumerate(outer_kf.split(phenotypic_info), 1):
334
+ outer_trainX = training_genotypic_data_merged[outer_train_index]
335
+ outer_trainy = phenotypic_info[outer_train_index]
336
+
337
+ outer_testX = testing_genotypic_data_merged
338
+ outer_testy = phenotypic_test_info
339
+
340
+ for model_name, model_func in models:
341
+ print(f"Running model: {model_name} for fold {outer_fold}")
342
+ if model_name in ['GRUModel', 'CNNModel']:
343
+ predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy, epochs=epochs, batch_size=batch_size)
344
+ elif model_name in ['RFModel']:
345
+ predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy)
346
+ else:
347
+ predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy,learning_rate,min_child_weight)
348
+
349
+
350
+
351
+ mse_train, rmse_train, r2_train, corr_train = calculate_metrics(outer_trainy, predicted_train)
352
+ mse_test, rmse_test, r2_test, corr_test = calculate_metrics(outer_testy, predicted_test) if outer_testy is not None else (None, None, None, None)
353
+
354
+ results.append({
355
+ 'Model': model_name,
356
+ 'Fold': outer_fold,
357
+ 'Train_MSE': mse_train,
358
+ 'Train_RMSE': rmse_train,
359
+ 'Train_R2': r2_train,
360
+ 'Train_Corr': corr_train,
361
+ 'Test_MSE': mse_test,
362
+ 'Test_RMSE': rmse_test,
363
+ 'Test_R2': r2_test,
364
+ 'Test_Corr': corr_test
365
+ })
366
+
367
+ if predicted_test is not None:
368
+ predicted_test_df = pd.DataFrame({
369
+ 'Sample_ID': sample_ids,
370
+ 'Predicted_Phenotype': predicted_test,
371
+ 'Model': model_name
372
+ })
373
+ all_predicted_phenotypes.append(predicted_test_df)
374
+
375
+ results_df = pd.DataFrame(results)
376
+
377
+
378
+ avg_results_df = results_df.groupby('Model').agg({
379
+ 'Train_MSE': 'mean',
380
+ 'Train_RMSE': 'mean',
381
+ 'Train_R2': 'mean',
382
+ 'Train_Corr': 'mean',
383
+ 'Test_MSE': 'mean',
384
+ 'Test_RMSE': 'mean',
385
+ 'Test_R2': 'mean',
386
+ 'Test_Corr': 'mean'
387
+ }).reset_index()
388
+
389
+
390
+ def calculate_topsis_score(df):
391
+
392
+ norm_df = (df.iloc[:, 1:] - df.iloc[:, 1:].min()) / (df.iloc[:, 1:].max() - df.iloc[:, 1:].min())
393
+
394
+
395
+ ideal_positive = norm_df.max(axis=0)
396
+ ideal_negative = norm_df.min(axis=0)
397
+
398
+
399
+ dist_positive = np.sqrt(((norm_df - ideal_positive) ** 2).sum(axis=1))
400
+ dist_negative = np.sqrt(((norm_df - ideal_negative) ** 2).sum(axis=1))
401
+
402
+
403
+ topsis_score = dist_negative / (dist_positive + dist_negative)
404
+
405
+
406
+ df['TOPSIS_Score'] = topsis_score
407
+
408
+ return df
409
+
410
+ avg_results_df = calculate_topsis_score(avg_results_df)
411
+
412
+
413
+ avg_results_df.to_csv(output_file, index=False)
414
+
415
+
416
+ if all_predicted_phenotypes:
417
+ predicted_all_df = pd.concat(all_predicted_phenotypes, axis=0, ignore_index=True)
418
+ predicted_all_df.to_csv(predicted_phenotype_file, index=False)
419
+
420
+ return avg_results_df, predicted_all_df if all_predicted_phenotypes else None
421
+
422
+ #--------------------------------------------------------------------Gradio interface---------------------------------------------------------------
423
+
424
+ def run_cross_validation(training_file, training_additive_file, testing_file, testing_additive_file,
425
+ training_dominance_file, testing_dominance_file,feature_selection,learning_rate,min_child_weight):
426
+
427
+
428
+ epochs = 1000
429
+ batch_size = 64
430
+ outer_n_splits = 2
431
+ inner_n_splits = 2
432
+ min_child_weight=5
433
+ learning_rate=0.001
434
+
435
+
436
+
437
+ training_data = pd.read_csv(training_file.name)
438
+ training_additive = pd.read_csv(training_additive_file.name)
439
+ testing_data = pd.read_csv(testing_file.name)
440
+ testing_additive = pd.read_csv(testing_additive_file.name)
441
+ training_dominance = pd.read_csv(training_dominance_file.name)
442
+ testing_dominance = pd.read_csv(testing_dominance_file.name)
443
+
444
+
445
+ results, predicted_phenotypes = NestedKFoldCrossValidation(
446
+ training_data=training_data,
447
+ training_additive=training_additive,
448
+ testing_data=testing_data,
449
+ testing_additive=testing_additive,
450
+ training_dominance=training_dominance,
451
+ testing_dominance=testing_dominance,
452
+ epochs=epochs,
453
+ batch_size=batch_size,
454
+ outer_n_splits=outer_n_splits,
455
+ inner_n_splits=inner_n_splits,
456
+ learning_rate=learning_rate,
457
+ min_child_weight=min_child_weight,
458
+ feature_selection=feature_selection
459
+ )
460
+
461
+
462
+ results_file = "cross_validation_results.csv"
463
+ predicted_file = "predicted_phenotype.csv"
464
+ results.to_csv(results_file, index=False)
465
+ predicted_phenotypes.to_csv(predicted_file, index=False)
466
+
467
+ return results_file, predicted_file
468
+
469
+ with gr.Blocks() as interface:
470
+ gr.Markdown("# DeepMap - An Integrated GUI for Genotype to Phenotype Prediction")
471
+
472
+ with gr.Row():
473
+ training_file = gr.File(label="Upload Training Data (CSV)")
474
+ training_additive_file = gr.File(label="Upload Training Additive Data (CSV)")
475
+ training_dominance_file = gr.File(label="Upload Training Dominance Data (CSV)")
476
+
477
+ with gr.Row():
478
+ testing_file = gr.File(label="Upload Testing Data (CSV)")
479
+ testing_additive_file = gr.File(label="Upload Testing Additive Data (CSV)")
480
+ testing_dominance_file = gr.File(label="Upload Testing Dominance Data (CSV)")
481
+
482
+ with gr.Row():
483
+ feature_selection = gr.Checkbox(label="Enable Feature Selection", value=True)
484
+
485
+ output1 = gr.File(label="Cross-Validation Results (CSV)")
486
+ output2 = gr.File(label="Predicted Phenotypes (CSV)")
487
+
488
+ submit_btn = gr.Button("Run DeepMap")
489
+ submit_btn.click(
490
+ run_cross_validation,
491
+ inputs=[
492
+ training_file, training_additive_file, testing_file,
493
+ testing_additive_file, training_dominance_file,testing_dominance_file,
494
+ feature_selection
495
+ ],
496
+ outputs=[output1, output2]
497
+ )
498
+
499
+
500
+ interface.launch()
501
+
502
+
503
+