from tqdm import tqdm import torch from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from torchmetrics.functional import auroc import torch.optim as optim import numpy as np from all_models_tools.all_model_tools import call_back from Model_Loss.Loss import Entropy_Loss from merge_class.merge import merge from Image_Process.Image_Generator import Image_generator class All_Step: def __init__(self, Training_Data_And_Label, Test_Data_And_Label, Validation_Data_And_Label, Model, Epoch, Number_Of_Classes): self.Training_Data_And_Label = Training_Data_And_Label self.Test_Data_And_Label = Test_Data_And_Label self.Validation_Data_And_Label = Validation_Data_And_Label self.Model = Model self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.Epoch = Epoch self.Number_Of_Classes = Number_Of_Classes pass def Training_Step(self, model_name, counter): # 定義優化器,並設定 weight_decay 參數來加入 L2 正則化 Optimizer = optim.SGD(self.Model.parameters(), lr=0.045, momentum = 0.9, weight_decay=0.1) model_path, early_stopping, scheduler = call_back(model_name, counter, Optimizer) criterion = Entropy_Loss() # 使用自定義的損失函數 Merge_Function = merge() train_losses = [] val_losses = [] train_accuracies = [] val_accuracies = [] Total_Epoch = 0 for epoch in range(self.Epoch): # 訓練迴圈 self.Model.train() # 開始訓練 running_loss = 0.0 all_train_preds = [] all_train_labels = [] epoch_iterator = tqdm(self.Training_Data_And_Label, desc= "Training (Epoch %d)" % epoch) # 使用進度條 for inputs, labels in epoch_iterator: inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device) Optimizer.zero_grad() outputs = self.Model(inputs) loss = criterion(outputs, labels) loss.backward() Optimizer.step() running_loss += loss.item() # 收集訓練預測和標籤 Output_Values, Output_Indexs = torch.max(outputs, dim = 1) True_Indexs = np.argmax(labels.cpu().numpy(), 1) all_train_preds.append(Output_Indexs.cpu().numpy()) all_train_labels.append(True_Indexs) all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds)) all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels)) # print(f"all_train_labels shape:{np.array(all_train_labels).shape}") Training_Loss = running_loss / len(self.Training_Data_And_Label) train_accuracy = accuracy_score(all_train_labels, all_train_preds) train_losses.append(Training_Loss) train_accuracies.append(train_accuracy) print(f"\nEpoch [{epoch+1}/{self.Epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ') self.Model.eval() val_loss = 0.0 all_val_preds = [] all_val_labels = [] with torch.no_grad(): for inputs, labels in self.Validation_Data_And_Label: inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device) outputs = self.Model(inputs) loss = criterion(outputs, labels) val_loss += loss.item() print(f"Output Contents: {outputs}") # 收集訓練預測和標籤 Output_Values, Output_Indexs = torch.max(outputs, dim = 1) True_Indexs = np.argmax(labels.cpu().numpy(), 1) all_val_preds.append(Output_Indexs.cpu().numpy()) all_val_labels.append(True_Indexs) val_loss /= len(self.Validation_Data_And_Label) val_accuracy = accuracy_score(all_val_labels, all_val_preds) val_losses.append(val_loss) val_accuracies.append(val_accuracy) print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}\n") early_stopping(val_loss, self.Model, model_path) if early_stopping.early_stop: print("Early stopping triggered. Training stopped.") Total_Epoch = epoch break # 學習率調整 scheduler.step(val_loss) return train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch def Evaluate_Model(self, cnn_model): # 測試模型 cnn_model.eval() True_Label, Predict_Label = [], [] True_Label_OneHot, Predict_Label_OneHot = [], [] loss = 0.0 with torch.no_grad(): for images, labels in self.Test_Data_And_Label: images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device) outputs = cnn_model(images) # 收集訓練預測和標籤 Output_Values, Output_Indexs = torch.max(outputs, 1) True_Indexs = np.argmax(labels.cpu().numpy(), 1) True_Label.append(Output_Indexs.cpu().numpy()) Predict_Label.append(True_Indexs) Predict_Label_OneHot.append(torch.tensor(outputs, dtype = torch.float32).cpu().numpy()[0]) True_Label_OneHot.append(torch.tensor(labels, dtype = torch.int).cpu().numpy()[0]) loss /= len(self.Test_Data_And_Label) True_Label_OneHot = torch.tensor(True_Label_OneHot, dtype = torch.int) Predict_Label_OneHot = torch.tensor(Predict_Label_OneHot, dtype = torch.float32) accuracy = accuracy_score(True_Label, Predict_Label) precision = precision_score(True_Label, Predict_Label, average = "macro") recall = recall_score(True_Label, Predict_Label, average = "macro") AUC = auroc(Predict_Label_OneHot, True_Label_OneHot, num_labels = self.Number_Of_Classes, task = "multilabel", average = "macro") f1 = f1_score(True_Label, Predict_Label, average = "macro") return loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label