AshmithaIRRI commited on
Commit
78e4d98
·
verified ·
1 Parent(s): 5750b4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -260
app.py CHANGED
@@ -1,6 +1,17 @@
 
 
 
1
 
 
 
2
 
3
- #---------------------------------------------Libraries--------------------------
 
 
 
 
 
 
4
  import pandas as pd
5
  import numpy as np
6
  import gradio as gr
@@ -25,272 +36,113 @@ from sklearn.feature_selection import SelectFromModel
25
  import tempfile
26
  import matplotlib.pyplot as plt
27
  import seaborn as sns
28
- #------------------------------------------GRUModel-------------------------------------
29
-
30
 
31
- def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64, learning_rate=0.0001,
32
- l1_reg=0.001, l2_reg=0.001, dropout_rate=0.2):
33
-
34
- # Scale the input data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  scaler = MinMaxScaler()
36
  trainX_scaled = scaler.fit_transform(trainX)
37
  testX_scaled = scaler.transform(testX) if testX is not None else None
38
 
39
- # Scale the target variable
40
- target_scaler = MinMaxScaler()
41
- trainy_scaled = target_scaler.fit_transform(trainy.reshape(-1, 1))
42
-
43
- # Model definition
44
- model = Sequential()
45
-
46
- # Input Layer
47
- model.add(Dense(512, input_shape=(trainX.shape[1],), kernel_initializer='he_normal',
48
- kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
49
- model.add(BatchNormalization())
50
- model.add(Dropout(dropout_rate))
51
- model.add(LeakyReLU(alpha=0.1))
52
-
53
- # Hidden Layers
54
- model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
55
- model.add(BatchNormalization())
56
- model.add(Dropout(dropout_rate))
57
- model.add(LeakyReLU(alpha=0.1))
58
-
59
- model.add(Dense(128, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
60
- model.add(BatchNormalization())
61
- model.add(Dropout(dropout_rate))
62
- model.add(LeakyReLU(alpha=0.1))
63
-
64
- model.add(Dense(64, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
65
- model.add(BatchNormalization())
66
- model.add(Dropout(dropout_rate))
67
- model.add(LeakyReLU(alpha=0.1))
68
-
69
- model.add(Dense(32, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
70
- model.add(BatchNormalization())
71
- model.add(Dropout(dropout_rate))
72
- model.add(LeakyReLU(alpha=0.1))
73
-
74
- # Output Layer
75
- model.add(Dense(1, activation="relu"))
76
-
77
- # Compile Model
78
- model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
79
-
80
- # Callbacks
81
- callbacks = [
82
- ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=1, factor=0.5, min_lr=1e-6),
83
- EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=True, patience=10)
84
- ]
85
-
86
- # Train model
87
- history = model.fit(trainX_scaled, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1,
88
- verbose=1, callbacks=callbacks)
89
-
90
- # Predictions
91
- predicted_train = model.predict(trainX_scaled).flatten()
92
- predicted_test = model.predict(testX_scaled).flatten() if testX is not None else None
93
-
94
- # Inverse transform predictions
95
- predicted_train = target_scaler.inverse_transform(predicted_train.reshape(-1, 1)).flatten()
96
- if predicted_test is not None:
97
- predicted_test = target_scaler.inverse_transform(predicted_test.reshape(-1, 1)).flatten()
98
-
99
- return predicted_train, predicted_test, history
100
-
101
- #def GRUModel(trainX, trainy, testX=None, testy=None, epochs=1000, batch_size=64, learning_rate=0.0001,
102
- # l1_reg=0.001, l2_reg=0.001, dropout_rate=0.2, feature_selection=True, top_k=10):
103
-
104
-
105
 
 
 
 
 
 
106
 
107
- # Scale the input data
108
- # scaler = MinMaxScaler()
109
- #trainX_scaled = scaler.fit_transform(trainX)
110
- # testX_scaled = scaler.transform(testX) if testX is not None else None
111
 
112
- # Scale the target variable
113
- #target_scaler = MinMaxScaler()
114
- #trainy_scaled = target_scaler.fit_transform(trainy.reshape(-1, 1))
115
 
116
- # Reshape inputs to (samples, timesteps, features)
117
- #trainX = trainX_scaled.reshape((trainX.shape[0], 1, trainX.shape[1]))
118
- #if testX is not None:
119
- # testX = testX_scaled.reshape((testX.shape[0], 1, testX.shape[1]))
120
-
121
- # Model definition
122
- #model = Sequential()
123
- #model.add(GRU(512, input_shape=(trainX.shape[1], trainX.shape[2]), return_sequences=False,
124
- #kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
125
-
126
-
127
- #model.add(Dense(512, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
128
- #model.add(BatchNormalization())
129
- #model.add(Dropout(dropout_rate))
130
- #model.add(LeakyReLU(alpha=0.1))
131
-
132
- #model.add(Dense(256, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
133
- #model.add(BatchNormalization())
134
- #model.add(Dropout(dropout_rate))
135
- #model.add(LeakyReLU(alpha=0.1))
136
-
137
- #model.add(Dense(128, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
138
- #model.add(BatchNormalization())
139
- #model.add(Dropout(dropout_rate))
140
- #model.add(LeakyReLU(alpha=0.1))
141
-
142
- #model.add(Dense(64, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
143
- #model.add(BatchNormalization())
144
- #model.add(Dropout(dropout_rate))
145
- #model.add(LeakyReLU(alpha=0.1))
146
-
147
- #model.add(Dense(32, kernel_initializer='he_normal', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
148
- #model.add(BatchNormalization())
149
- #model.add(Dropout(dropout_rate))
150
- #model.add(LeakyReLU(alpha=0.1))
151
-
152
- #model.add(Dense(1, activation="relu")) # Output layer
153
-
154
- #model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
155
 
156
  # Callbacks
157
- #callbacks = [
158
- # ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=1, factor=0.5, min_lr=1e-6),
159
- # EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=True, patience=10)
160
- #]
161
 
162
- # Train model
163
- #history = model.fit(trainX, trainy_scaled, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1, callbacks=callbacks)
 
164
 
165
  # Predictions
166
- #predicted_train = model.predict(trainX).flatten()
167
- # predicted_test = model.predict(testX).flatten() if testX is not None else None
168
-
169
- # Inverse transform predictions
170
- # predicted_train = target_scaler.inverse_transform(predicted_train.reshape(-1, 1)).flatten()
171
- # if predicted_test is not None:
172
- # predicted_test = target_scaler.inverse_transform(predicted_test.reshape(-1, 1)).flatten()
173
-
174
- #return predicted_train, predicted_test, history
175
-
176
-
177
-
178
-
179
- #--------------------------------------------------CNNModel-------------------------------------------
180
- def CNNModel(trainX, trainy, testX, testy, epochs=1000, batch_size=64, learning_rate=0.0001, l1_reg=0.0001, l2_reg=0.0001, dropout_rate=0.3,feature_selection=True):
181
-
182
-
183
-
184
- # Scaling the inputs
185
- scaler = MinMaxScaler()
186
- trainX_scaled = scaler.fit_transform(trainX)
187
- if testX is not None:
188
- testX_scaled = scaler.transform(testX)
189
-
190
- # Reshape for CNN input (samples, features, channels)
191
- trainX = trainX_scaled.reshape((trainX.shape[0], trainX.shape[1], 1))
192
- if testX is not None:
193
- testX = testX_scaled.reshape((testX.shape[0], testX.shape[1], 1))
194
-
195
- model = Sequential()
196
-
197
- # Convolutional layers
198
- model.add(Conv1D(512, kernel_size=3, activation='relu', input_shape=(trainX.shape[1], 1), kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
199
- model.add(MaxPooling1D(pool_size=2))
200
- model.add(Dropout(dropout_rate))
201
-
202
- model.add(Conv1D(256, kernel_size=3, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
203
- model.add(MaxPooling1D(pool_size=2))
204
- model.add(Dropout(dropout_rate))
205
-
206
- model.add(Conv1D(128, kernel_size=3, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
207
- model.add(MaxPooling1D(pool_size=2))
208
- model.add(Dropout(dropout_rate))
209
-
210
- # Flatten and Dense layers
211
- model.add(Flatten())
212
- model.add(Dense(64, kernel_regularizer=regularizers.l1_l2(l1=l1_reg, l2=l2_reg)))
213
- model.add(LeakyReLU(alpha=0.1))
214
- model.add(Dropout(dropout_rate))
215
-
216
- model.add(Dense(1, activation='linear'))
217
 
218
- # Compile the model
219
- model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate), metrics=['mse'])
220
-
221
- # Callbacks
222
- learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=5, verbose=1, factor=0.5, min_lr=1e-6)
223
- early_stopping = EarlyStopping(monitor='val_loss', verbose=1, restore_best_weights=True, patience=10)
224
-
225
- # Train the model
226
- history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,
227
- callbacks=[learning_rate_reduction, early_stopping])
228
-
229
- predicted_train = model.predict(trainX).flatten()
230
- predicted_test = model.predict(testX).flatten() if testX is not None else None
231
-
232
  return predicted_train, predicted_test, history
233
- #------------------------------------------RFModel---------------------------------------------------
234
- def RFModel(trainX, trainy, testX, testy, n_estimators=100, max_depth=None,feature_selection=True):
235
-
236
-
237
- # Log transformation of the target variable
238
-
239
- # Scaling the feature data
240
- scaler = MinMaxScaler()
241
- trainX_scaled = scaler.fit_transform(trainX)
242
- if testX is not None:
243
- testX_scaled = scaler.transform(testX)
244
-
245
- # Define and train the RandomForest model
246
- rf_model = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, random_state=42)
247
- history=rf_model.fit(trainX_scaled, trainy)
248
-
249
-
250
- # Predictions
251
- predicted_train = rf_model.predict(trainX_scaled)
252
- predicted_test = rf_model.predict(testX_scaled) if testX is not None else None
253
-
254
- return predicted_train, predicted_test,history
255
- #-------------------------------------------------XGBoost--------------------------------------------
256
- def XGBoostModel(trainX, trainy, testX, testy,learning_rate,min_child_weight,feature_selection=True, n_estimators=100, max_depth=None):
257
-
258
-
259
-
260
- # Scale the features
261
- scaler = MinMaxScaler()
262
- trainX_scaled = scaler.fit_transform(trainX)
263
- if testX is not None:
264
- testX_scaled = scaler.transform(testX)
265
 
266
-
267
- xgb_model=XGBRegressor(objective="reg:squarederror",random_state=42)
268
- history=xgb_model.fit(trainX, trainy)
269
- #param_grid={
270
- #"learning_rate":0.01,
271
- #"max_depth" : 10,
272
- #"n_estimators": 100,
273
- #"min_child_weight": 10
274
- # }
275
-
276
-
277
- # Predictions
278
- predicted_train = xgb_model.predict(trainX_scaled)
279
- predicted_test = xgb_model.predict(testX_scaled) if testX is not None else None
280
-
281
-
282
- return predicted_train, predicted_test,history
283
- #------------------------------------------------------------------File--------------------------------------------
284
- def read_csv_file(uploaded_file):
285
- if uploaded_file is not None:
286
- if hasattr(uploaded_file, 'data'): # For NamedBytes
287
- return pd.read_csv(io.BytesIO(uploaded_file.data))
288
- elif hasattr(uploaded_file, 'name'): # For NamedString
289
- return pd.read_csv(uploaded_file.name)
290
- return None
291
-
292
-
293
- #_-------------------------------------------------------------NestedKFold Cross Validation---------------------
294
  def calculate_topsis_score(df):
295
  # Normalize the data
296
  norm_df = (df.iloc[:, 1:] - df.iloc[:, 1:].min()) / (df.iloc[:, 1:].max() - df.iloc[:, 1:].min())
@@ -310,7 +162,6 @@ def calculate_topsis_score(df):
310
  df['TOPSIS_Score'] = topsis_score
311
 
312
  return df
313
- #_-------------------------------------------------------------NestedKFold Cross Validation---------------------
314
  def NestedKFoldCrossValidation(training_data, training_additive, testing_data, testing_additive,
315
  training_dominance, testing_dominance, epochs, learning_rate, min_child_weight, batch_size=64,
316
  outer_n_splits=2, output_file='cross_validation_results.csv',
@@ -353,10 +204,8 @@ def NestedKFoldCrossValidation(training_data, training_additive, testing_data, t
353
  return mse, rmse, r2, corr
354
 
355
  models = [
356
- ('GRUModel', GRUModel),
357
- ('CNNModel', CNNModel),
358
- ('RFModel', RFModel),
359
- ('XGBoostModel', XGBoostModel)
360
  ]
361
 
362
  for outer_fold, (outer_train_index, outer_test_index) in enumerate(outer_kf.split(phenotypic_info), 1):
@@ -383,10 +232,10 @@ def NestedKFoldCrossValidation(training_data, training_additive, testing_data, t
383
 
384
  for model_name, model_func in models:
385
  print(f"Running model: {model_name} for fold {outer_fold}")
386
- if model_name in ['GRUModel', 'CNNModel']:
387
  predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy, epochs=epochs, batch_size=batch_size)
388
- elif model_name in ['RFModel']:
389
- predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy)
390
  else:
391
  predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy, learning_rate, min_child_weight)
392
 
@@ -547,5 +396,4 @@ with gr.Blocks() as interface:
547
  )
548
 
549
  # Launch the interface
550
- interface.launch()
551
-
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Fri Jan 31 13:24:37 2025
4
 
5
+ @author: Ashmitha
6
+ """
7
 
8
+ import tensorflow as tf
9
+ from tensorflow.keras.layers import Input, Dense, Dropout, LayerNormalization
10
+ from tensorflow.keras.optimizers import Adam
11
+ from tensorflow.keras.models import Model
12
+ from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
13
+ from sklearn.preprocessing import MinMaxScaler
14
+ import pandas as pd
15
  import pandas as pd
16
  import numpy as np
17
  import gradio as gr
 
36
  import tempfile
37
  import matplotlib.pyplot as plt
38
  import seaborn as sns
 
 
39
 
40
+ # Positional Encoding Function
41
+ def positional_encoding(seq_len, d_model):
42
+ pos = tf.range(seq_len, dtype=tf.float32)[:, tf.newaxis]
43
+ div_term = tf.exp(tf.range(0, d_model, 2, dtype=tf.float32) * (-tf.math.log(10000.0) / d_model))
44
+ pos_encoding = tf.concat([tf.sin(pos * div_term), tf.cos(pos * div_term)], axis=-1)
45
+ return pos_encoding[tf.newaxis, ...]
46
+
47
+ # Multi-Head Self-Attention Layer
48
+ class MultiHeadSelfAttention(tf.keras.layers.Layer):
49
+ def __init__(self, embed_dim, num_heads):
50
+ super().__init__()
51
+ self.num_heads = num_heads
52
+ self.embed_dim = embed_dim
53
+ assert embed_dim % num_heads == 0, "Embedding dimension must be divisible by number of heads"
54
+
55
+ self.depth = embed_dim // num_heads
56
+ self.wq = Dense(embed_dim)
57
+ self.wk = Dense(embed_dim)
58
+ self.wv = Dense(embed_dim)
59
+ self.dense = Dense(embed_dim)
60
+
61
+ def split_heads(self, x, batch_size):
62
+ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
63
+ return tf.transpose(x, perm=[0, 2, 1, 3]) # (batch_size, num_heads, seq_length, depth)
64
+
65
+ def call(self, inputs):
66
+ batch_size = tf.shape(inputs)[0]
67
+ q = self.split_heads(self.wq(inputs), batch_size)
68
+ k = self.split_heads(self.wk(inputs), batch_size)
69
+ v = self.split_heads(self.wv(inputs), batch_size)
70
+
71
+ attention_scores = tf.matmul(q, k, transpose_b=True) / tf.math.sqrt(float(self.depth))
72
+ attention_weights = tf.nn.softmax(attention_scores, axis=-1)
73
+ attention_output = tf.matmul(attention_weights, v)
74
+
75
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
76
+ concat_attention = tf.reshape(attention_output, (batch_size, -1, self.embed_dim))
77
+ output = self.dense(concat_attention)
78
+ return output
79
+
80
+ # Transformer Block
81
+ class TransformerBlock(tf.keras.layers.Layer):
82
+ def __init__(self, embed_dim, num_heads, ff_dim, dropout_rate=0.1):
83
+ super().__init__()
84
+ self.att = MultiHeadSelfAttention(embed_dim, num_heads)
85
+ self.norm1 = LayerNormalization(epsilon=1e-6)
86
+ self.norm2 = LayerNormalization(epsilon=1e-6)
87
+ self.ffn = tf.keras.Sequential([
88
+ Dense(ff_dim, activation="relu"),
89
+ Dense(embed_dim),
90
+ ])
91
+ self.dropout1 = Dropout(dropout_rate)
92
+ self.dropout2 = Dropout(dropout_rate)
93
+
94
+ def call(self, inputs, training):
95
+ attn_output = self.att(inputs)
96
+ attn_output = self.dropout1(attn_output, training=training)
97
+ out1 = self.norm1(inputs + attn_output)
98
+
99
+ ffn_output = self.ffn(out1)
100
+ ffn_output = self.dropout2(ffn_output, training=training)
101
+ return self.norm2(out1 + ffn_output)
102
+
103
+ # Transformer Model
104
+ def TransformerModel(trainX, trainy, testX, testy, embed_dim=128, num_heads=8, ff_dim=256,
105
+ epochs=100, batch_size=64, learning_rate=0.0001, dropout_rate=0.3):
106
+
107
+ # Feature Scaling
108
  scaler = MinMaxScaler()
109
  trainX_scaled = scaler.fit_transform(trainX)
110
  testX_scaled = scaler.transform(testX) if testX is not None else None
111
 
112
+ # Ensure correct input shape
113
+ seq_len = trainX.shape[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ # Define Model
116
+ inputs = Input(shape=(seq_len, 1)) # Input reshaped to (batch, seq_len, 1)
117
+ x = Dense(embed_dim)(inputs) # Feature projection
118
+ pos_encoding = positional_encoding(seq_len, embed_dim)
119
+ x += tf.broadcast_to(pos_encoding, tf.shape(x)) # Ensure shape compatibility
120
 
121
+ # Transformer Blocks
122
+ for _ in range(3):
123
+ x = TransformerBlock(embed_dim, num_heads, ff_dim, dropout_rate)(x)
 
124
 
125
+ x = Dense(64, activation="relu")(x)
126
+ x = Dropout(dropout_rate)(x)
127
+ outputs = Dense(1, activation="linear")(tf.reduce_mean(x, axis=1)) # Reduce along sequence length
128
 
129
+ model = Model(inputs, outputs)
130
+ model.compile(loss="mse", optimizer=Adam(learning_rate=learning_rate), metrics=["mse"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
  # Callbacks
133
+ lr_reduction = ReduceLROnPlateau(monitor="val_loss", patience=5, factor=0.5, min_lr=1e-6, verbose=1)
134
+ early_stopping = EarlyStopping(monitor="val_loss", patience=10, restore_best_weights=True, verbose=1)
 
 
135
 
136
+ # Train Model
137
+ history = model.fit(trainX_scaled[..., np.newaxis], trainy, validation_split=0.1,
138
+ epochs=epochs, batch_size=batch_size, callbacks=[lr_reduction, early_stopping], verbose=1)
139
 
140
  # Predictions
141
+ predicted_train = model.predict(trainX_scaled[..., np.newaxis]).flatten()
142
+ predicted_test = model.predict(testX_scaled[..., np.newaxis]).flatten() if testX is not None else None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  return predicted_train, predicted_test, history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  def calculate_topsis_score(df):
147
  # Normalize the data
148
  norm_df = (df.iloc[:, 1:] - df.iloc[:, 1:].min()) / (df.iloc[:, 1:].max() - df.iloc[:, 1:].min())
 
162
  df['TOPSIS_Score'] = topsis_score
163
 
164
  return df
 
165
  def NestedKFoldCrossValidation(training_data, training_additive, testing_data, testing_additive,
166
  training_dominance, testing_dominance, epochs, learning_rate, min_child_weight, batch_size=64,
167
  outer_n_splits=2, output_file='cross_validation_results.csv',
 
204
  return mse, rmse, r2, corr
205
 
206
  models = [
207
+
208
+ ('TransformerModel', TransformerModel)
 
 
209
  ]
210
 
211
  for outer_fold, (outer_train_index, outer_test_index) in enumerate(outer_kf.split(phenotypic_info), 1):
 
232
 
233
  for model_name, model_func in models:
234
  print(f"Running model: {model_name} for fold {outer_fold}")
235
+ if model_name in ['TransformerModel' ]:
236
  predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy, epochs=epochs, batch_size=batch_size)
237
+ #elif model_name in ['RFModel']:
238
+ # predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy)
239
  else:
240
  predicted_train, predicted_test, history = model_func(outer_trainX, outer_trainy, outer_testX, outer_testy, learning_rate, min_child_weight)
241
 
 
396
  )
397
 
398
  # Launch the interface
399
+ interface.launch()