20250218 commits: The programing can run and it's validation accuracy is 50 percent
This commit is contained in:
@@ -4,6 +4,9 @@ from Load_process.file_processing import Process_File
|
||||
from Load_process.LoadData import Load_Data_Prepare
|
||||
from torchvision import transforms
|
||||
from Training_Tools.Tools import Tool
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import torch
|
||||
|
||||
class Image_generator():
|
||||
'''製作資料強化'''
|
||||
@@ -23,7 +26,7 @@ class Image_generator():
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation one monkeypox image")
|
||||
print("\nAugmentation one Generator image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 1, data_size)
|
||||
self.stop += data_size
|
||||
|
||||
@@ -32,7 +35,7 @@ class Image_generator():
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation two monkeypox image")
|
||||
print("\nAugmentation two Generator image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 2, data_size)
|
||||
self.stop += data_size
|
||||
|
||||
@@ -41,7 +44,7 @@ class Image_generator():
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation three monkeypox image")
|
||||
print("\nAugmentation three Generator image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 3, data_size)
|
||||
self.stop += data_size
|
||||
|
||||
@@ -51,7 +54,7 @@ class Image_generator():
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation four monkeypox image")
|
||||
print("\nAugmentation four Generator image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 4, data_size)
|
||||
|
||||
print()
|
||||
@@ -89,6 +92,10 @@ class Image_generator():
|
||||
|
||||
for batch_idx, (images, labels) in enumerate(Training_Dataset):
|
||||
for i, img in enumerate(images):
|
||||
if i == self.stop:
|
||||
break
|
||||
|
||||
img = img.permute(2, 0, 1)
|
||||
img = Transform(img)
|
||||
img_pil = transforms.ToPILImage()(img)
|
||||
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
|
||||
@@ -100,7 +107,7 @@ class Image_generator():
|
||||
'''Images is readed by myself'''
|
||||
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
|
||||
|
||||
img = torch.tensor(img)
|
||||
|
||||
self.stop = len(img) * 1.5
|
||||
return img
|
||||
|
||||
Binary file not shown.
@@ -2,6 +2,7 @@ from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from merge_class.merge import merge
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from Load_process.LoadData import Load_Data_Prepare, Load_Data_Tools
|
||||
from model_data_processing.processing import Balance_Process
|
||||
|
||||
class Load_Indepentend_Data():
|
||||
def __init__(self, Labels, OneHot_Encording):
|
||||
@@ -28,7 +29,7 @@ class Load_Indepentend_Data():
|
||||
|
||||
classify_image = []
|
||||
Total_Dict_Data_Root = self.Get_Independent_data_Root(independent_DataRoot) # 讀取測試資料集的資料
|
||||
# Total_Dict_Data_Root = Balance_Process(Total_Dict_Data_Root, self.Labels) # 打亂並取出指定資料筆數的資料
|
||||
Total_Dict_Data_Root, Size = Balance_Process(Total_Dict_Data_Root, self.Labels) # 打亂並取出指定資料筆數的資料
|
||||
|
||||
Total_List_Data_Root = []
|
||||
for Label in self.Labels:
|
||||
|
||||
Binary file not shown.
@@ -10,10 +10,8 @@ class Entropy_Loss(nn.Module):
|
||||
def forward(self, outputs, labels):
|
||||
# 範例: 使用均方誤差作為損失計算
|
||||
# outputs = torch.argmax(outputs, 1)
|
||||
outputs = outputs.float()
|
||||
outputs = torch.tensor(outputs, dtype=torch.float32).clone().detach()
|
||||
labels = torch.tensor(labels, dtype=torch.float32).clone().detach()
|
||||
|
||||
# input shape has a question
|
||||
# print(f"Label result: {labels}, result: {outputs}")
|
||||
labels = labels.float()
|
||||
loss = functional.cross_entropy(outputs, labels)
|
||||
return loss
|
||||
return torch.tensor(loss, requires_grad = True).clone().detach().requires_grad_(True)
|
||||
Binary file not shown.
@@ -24,7 +24,7 @@ class Read_image_and_Process_image:
|
||||
try:
|
||||
img_arr = cv2.imread(p, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||
resized_arr.append(cv2.resize(img_arr, (self.Image_Size, self.Image_Size))) # 濤整圖片大小
|
||||
resized_arr.append(cv2.resize(img_arr, (self.Image_Size, self.Image_Size))) # 調整圖片大小
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
|
||||
Binary file not shown.
@@ -9,22 +9,19 @@ class ListDataset(Dataset):
|
||||
self.data = data_list
|
||||
self.labels = labels_list
|
||||
self.status = status
|
||||
print(status)
|
||||
|
||||
def Transform(self):
|
||||
return transforms.Compose([
|
||||
transforms.ToTensor() # 將 PIL Image 或 numpy array 轉換為 tensor 並自動調整通道順序為 (C, H, W)
|
||||
])
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
sample = self.data[idx]
|
||||
sample = self.data[idx]
|
||||
|
||||
if self.status:
|
||||
from Image_Process.Image_Generator import Image_generator
|
||||
ImageGenerator = Image_generator("", "", 12)
|
||||
Transform = ImageGenerator.Generator_Content(5)
|
||||
sample = Transform(sample)
|
||||
|
||||
Transform_Content = self.Transform()
|
||||
sample = Transform_Content(sample)
|
||||
|
||||
label = self.labels[idx]
|
||||
return sample, label
|
||||
|
||||
@@ -76,7 +73,6 @@ class Tool:
|
||||
Counter.append(i)
|
||||
|
||||
Counter = torch.tensor(Counter)
|
||||
|
||||
self.__OneHot_Encording = functional.one_hot(Counter, len(content))
|
||||
pass
|
||||
|
||||
@@ -112,7 +108,7 @@ class Tool:
|
||||
def Get_OneHot_Encording_Label(self):
|
||||
return self.__OneHot_Encording
|
||||
|
||||
def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int, status : bool):
|
||||
def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int, status : bool = True):
|
||||
seed = 42 # 設定任意整數作為種子
|
||||
# 產生隨機種子產生器
|
||||
generator = torch.Generator()
|
||||
@@ -120,6 +116,6 @@ class Tool:
|
||||
|
||||
# 創建 Dataset
|
||||
list_dataset = ListDataset(Datas, Labels, status)
|
||||
sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler
|
||||
# sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler
|
||||
|
||||
return DataLoader(dataset = list_dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler)
|
||||
return DataLoader(dataset = list_dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, shuffle = True)
|
||||
Binary file not shown.
Binary file not shown.
@@ -50,7 +50,7 @@ def call_back(model_name, index, optimizer):
|
||||
|
||||
model_dir = '../Result/save_the_best_model/' + model_name
|
||||
File.JudgeRoot_MakeDir(model_dir)
|
||||
modelfiles = File.Make_Save_Root('best_model( ' + str(datetime.date.today()) + " )-" + str(index) + ".weights.pt", model_dir)
|
||||
modelfiles = File.Make_Save_Root('best_model( ' + str(datetime.date.today()) + " )-" + str(index) + ".pt", model_dir)
|
||||
|
||||
# model_mckp = ModelCheckpoint(modelfiles, monitor='val_loss', save_best_only=True, save_weights_only = True, mode='auto')
|
||||
|
||||
|
||||
Binary file not shown.
@@ -12,18 +12,18 @@ def plot_history(Epochs, Losses, Accuracys, file_name, model_name):
|
||||
plt.subplot(1,2,1)
|
||||
plt.plot(range(1, Epochs + 1), Losses[0])
|
||||
plt.plot(range(1, Epochs + 1), Losses[1])
|
||||
plt.ylabel('Accuracy')
|
||||
plt.ylabel('Losses')
|
||||
plt.xlabel('epoch')
|
||||
plt.legend(['Train','Validation'], loc='upper left')
|
||||
plt.title('Model Accuracy')
|
||||
plt.title('Model Loss')
|
||||
|
||||
plt.subplot(1,2,2)
|
||||
plt.plot(range(1, Epochs + 1), Accuracys[0])
|
||||
plt.plot(range(1, Epochs + 1), Accuracys[1])
|
||||
plt.ylabel('loss')
|
||||
plt.ylabel('Accuracies')
|
||||
plt.xlabel('epoch')
|
||||
plt.legend(['Train','Validation'], loc='upper left')
|
||||
plt.title('Model Loss')
|
||||
plt.title('Model Accuracy')
|
||||
|
||||
model_dir = '../Result/save_the_train_image( ' + str(datetime.date.today()) + " )"
|
||||
File.JudgeRoot_MakeDir(model_dir)
|
||||
|
||||
@@ -7,6 +7,7 @@ import numpy as np
|
||||
from all_models_tools.all_model_tools import call_back
|
||||
from Model_Loss.Loss import Entropy_Loss
|
||||
from merge_class.merge import merge
|
||||
from Image_Process.Image_Generator import Image_generator
|
||||
|
||||
|
||||
class All_Step:
|
||||
@@ -34,6 +35,7 @@ class All_Step:
|
||||
val_losses = []
|
||||
train_accuracies = []
|
||||
val_accuracies = []
|
||||
Total_Epoch = 0
|
||||
|
||||
for epoch in range(self.Epoch): # 訓練迴圈
|
||||
self.Model.train() # 開始訓練
|
||||
@@ -43,12 +45,7 @@ class All_Step:
|
||||
|
||||
epoch_iterator = tqdm(self.Training_Data_And_Label, desc= "Training (Epoch %d)" % epoch) # 使用進度條
|
||||
|
||||
|
||||
for inputs, labels in epoch_iterator:
|
||||
print(inputs.shape)
|
||||
# 輸入的維度為3維 但模型要的是4維 所以要再多加一維
|
||||
# inputs = np.expand_dims(inputs, axis = 0)
|
||||
# print(inputs.shape)
|
||||
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
|
||||
|
||||
Optimizer.zero_grad()
|
||||
@@ -59,24 +56,24 @@ class All_Step:
|
||||
running_loss += loss.item()
|
||||
|
||||
# 收集訓練預測和標籤
|
||||
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||
True_Values, True_Indexs = torch.max(labels, 1)
|
||||
# all_train_preds.extend(preds.cpu().numpy())
|
||||
# all_train_labels.extend(labels.cpu().numpy())
|
||||
Output_Values, Output_Indexs = torch.max(outputs, dim = 1)
|
||||
True_Indexs = np.argmax(labels.cpu().numpy(), 1)
|
||||
|
||||
all_train_preds.append(Output_Indexs.cpu().numpy())
|
||||
all_train_labels.append(True_Indexs.cpu().numpy())
|
||||
all_train_labels.append(True_Indexs)
|
||||
|
||||
all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds))
|
||||
all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels))
|
||||
|
||||
# print(f"all_train_labels shape:{np.array(all_train_labels).shape}")
|
||||
|
||||
Training_Loss = running_loss / len(self.Training_Data_And_Label)
|
||||
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
|
||||
|
||||
train_losses.append(Training_Loss)
|
||||
train_accuracies.append(train_accuracy)
|
||||
|
||||
print(f"Epoch [{epoch+1}/{self.Epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ')
|
||||
print(f"\nEpoch [{epoch+1}/{self.Epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ')
|
||||
|
||||
self.Model.eval()
|
||||
val_loss = 0.0
|
||||
@@ -84,77 +81,71 @@ class All_Step:
|
||||
all_val_labels = []
|
||||
|
||||
with torch.no_grad():
|
||||
for inputs, labels in self.Validation_Data_And_Label:
|
||||
# inputs = np.expand_dims(inputs, axis = 0)
|
||||
for inputs, labels in self.Validation_Data_And_Label:
|
||||
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
|
||||
|
||||
outputs = self.Model(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
val_loss += loss.item()
|
||||
|
||||
# 收集訓練預測和標籤
|
||||
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||
True_Values, True_Indexs = torch.max(labels, 1)
|
||||
print(f"Output Contents: {outputs}")
|
||||
|
||||
# all_val_preds.extend(preds.cpu().numpy())
|
||||
# all_val_labels.extend(labels.cpu().numpy())a
|
||||
# 收集訓練預測和標籤
|
||||
Output_Values, Output_Indexs = torch.max(outputs, dim = 1)
|
||||
True_Indexs = np.argmax(labels.cpu().numpy(), 1)
|
||||
|
||||
all_val_preds.append(Output_Indexs.cpu().numpy())
|
||||
all_val_labels.append(True_Indexs.cpu().numpy())
|
||||
|
||||
# 計算驗證損失與準確率
|
||||
all_val_preds = Merge_Function.merge_data_main(all_val_preds, 0, len(all_val_preds))
|
||||
all_val_labels = Merge_Function.merge_data_main(all_val_labels, 0, len(all_val_labels))
|
||||
all_val_labels.append(True_Indexs)
|
||||
|
||||
val_loss /= len(self.Validation_Data_And_Label)
|
||||
val_accuracy = accuracy_score(all_val_labels, all_val_preds)
|
||||
|
||||
val_losses.append(val_loss)
|
||||
val_accuracies.append(val_accuracy)
|
||||
print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}")
|
||||
print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}\n")
|
||||
|
||||
early_stopping(val_loss, self.Model, model_path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping triggered. Training stopped.")
|
||||
Total_Epoch = epoch
|
||||
break
|
||||
|
||||
# 學習率調整
|
||||
scheduler.step(val_loss)
|
||||
|
||||
return train_losses, val_losses, train_accuracies, val_accuracies
|
||||
return train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch
|
||||
|
||||
def Evaluate_Model(self, cnn_model):
|
||||
# 測試模型
|
||||
cnn_model.eval()
|
||||
True_Label, Predict_Label = [], []
|
||||
True_Label_OneHot, Predict_Label_OneHot = [], []
|
||||
loss = 0.0
|
||||
Merge_Function = merge()
|
||||
|
||||
with torch.no_grad():
|
||||
for images, labels in self.Test_Data_And_Label:
|
||||
# images = np.expand_dims(images, axis = 0)
|
||||
images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device)
|
||||
|
||||
outputs = cnn_model(images)
|
||||
|
||||
# 收集訓練預測和標籤
|
||||
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||
True_Values, True_Indexs = torch.max(labels, 1)
|
||||
True_Indexs = np.argmax(labels.cpu().numpy(), 1)
|
||||
|
||||
True_Label.append(Output_Indexs.cpu().numpy())
|
||||
Predict_Label.append(True_Indexs.cpu().numpy())
|
||||
Predict_Label.append(True_Indexs)
|
||||
|
||||
# Predict_Label.extend(predicted.cpu().numpy())
|
||||
# True_Label.extend(labels.cpu().numpy())
|
||||
Predict_Label_OneHot.append(torch.tensor(outputs, dtype = torch.float32).cpu().numpy()[0])
|
||||
True_Label_OneHot.append(torch.tensor(labels, dtype = torch.int).cpu().numpy()[0])
|
||||
|
||||
loss /= len(self.Test_Data_And_Label)
|
||||
|
||||
True_Label = Merge_Function.merge_data_main(True_Label, 0, len(True_Label))
|
||||
Predict_Label = Merge_Function.merge_data_main(Predict_Label, 0, len(Predict_Label))
|
||||
True_Label_OneHot = torch.tensor(True_Label_OneHot, dtype = torch.int)
|
||||
Predict_Label_OneHot = torch.tensor(Predict_Label_OneHot, dtype = torch.float32)
|
||||
|
||||
accuracy = accuracy_score(True_Label, Predict_Label)
|
||||
precision = precision_score(True_Label, Predict_Label)
|
||||
recall = recall_score(True_Label, Predict_Label)
|
||||
AUC = auroc(True_Label, Predict_Label, task = "binary")
|
||||
f1 = f1_score(True_Label, Predict_Label)
|
||||
precision = precision_score(True_Label, Predict_Label, average = "macro")
|
||||
recall = recall_score(True_Label, Predict_Label, average = "macro")
|
||||
AUC = auroc(Predict_Label_OneHot, True_Label_OneHot, num_labels = self.Number_Of_Classes, task = "multilabel", average = "macro")
|
||||
f1 = f1_score(True_Label, Predict_Label, average = "macro")
|
||||
return loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,4 +1,4 @@
|
||||
from draw_tools.draw import plot_history, Confusion_Matrix_of_Two_Classification
|
||||
from draw_tools.draw import plot_history, draw_heatmap
|
||||
from Load_process.Load_Indepentend import Load_Indepentend_Data
|
||||
from _validation.ValidationTheEnterData import validation_the_enter_data
|
||||
from Load_process.file_processing import Process_File
|
||||
@@ -77,8 +77,8 @@ class experiments():
|
||||
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
|
||||
self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label
|
||||
|
||||
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1, True)
|
||||
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, self.train_batch_size, True)
|
||||
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1)
|
||||
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, 1)
|
||||
|
||||
# self.Grad = Grad_CAM(self.Topic_Tool.Get_Data_Label(), self.test_label, self.experiment_name, self.convolution_name)
|
||||
|
||||
@@ -91,15 +91,15 @@ class experiments():
|
||||
# print("讀出權重\n")
|
||||
print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start))
|
||||
|
||||
train_losses, val_losses, train_accuracies, val_accuracies = step.Training_Step(self.model_name, counter)
|
||||
train_losses, val_losses, train_accuracies, val_accuracies, Epoch = step.Training_Step(self.model_name, counter)
|
||||
loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label = step.Evaluate_Model(cnn_model)
|
||||
|
||||
self.record_matrix_image(True_Label, Predict_Label, self.model_name, counter)
|
||||
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, counter, self.experiment_name)) # 紀錄當前訓練完之後的預測結果,並輸出成csv檔
|
||||
Matrix = self.record_matrix_image(True_Label, Predict_Label, self.model_name, counter)
|
||||
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, counter, self.experiment_name, Matrix)) # 紀錄當前訓練完之後的預測結果,並輸出成csv檔
|
||||
|
||||
Losses = [train_losses, val_losses]
|
||||
Accuracyes = [train_accuracies, val_accuracies]
|
||||
plot_history(self.epoch, Losses, Accuracyes, "train" + str(counter), self.experiment_name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
|
||||
Accuracies = [train_accuracies, val_accuracies]
|
||||
plot_history(Epoch + 1, Losses, Accuracies, "train" + str(counter), self.experiment_name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
|
||||
# self.Grad.process_main(cnn_model, counter, self.test)
|
||||
|
||||
return loss, accuracy, precision, recall, AUC, f1
|
||||
@@ -118,9 +118,9 @@ class experiments():
|
||||
'''劃出混淆矩陣(熱力圖)'''
|
||||
# 計算混淆矩陣
|
||||
matrix = confusion_matrix(True_Labels, Predict_Labels)
|
||||
Confusion_Matrix_of_Two_Classification(model_name, matrix, index) # 呼叫畫出confusion matrix的function
|
||||
draw_heatmap(matrix, model_name, index) # 呼叫畫出confusion matrix的function
|
||||
|
||||
return matrix.real
|
||||
return matrix
|
||||
|
||||
def record_everyTime_test_result(self, loss, accuracy, precision, recall, auc, f, indexs, model_name, Matrix):
|
||||
'''記錄我單次的訓練結果並將它輸出到檔案中'''
|
||||
|
||||
23
main.py
23
main.py
@@ -52,29 +52,18 @@ if __name__ == "__main__":
|
||||
counter = 5
|
||||
Batch_Size = 128
|
||||
|
||||
for i in range(0, counter, 1): # 做規定次數的訓練
|
||||
for Run_Range in range(0, counter, 1): # 做規定次數的訓練
|
||||
# 讀取資料
|
||||
Data_Dict_Data = loading_data.process_main(Label_Length)
|
||||
# Data_Dict_Data, Train_Size = Balance_Process(Data_Dict_Data, Labels)
|
||||
|
||||
# 輸出內容
|
||||
i = 0
|
||||
total = 0
|
||||
Length_Array = []
|
||||
for label in Labels:
|
||||
length = len(Data_Dict_Data[label])
|
||||
Length_Array.append(length)
|
||||
print(f"類別 {i} 有 {str(length)} 筆資料")
|
||||
total += length
|
||||
i += 1
|
||||
Data_Dict_Data, Train_Size = Balance_Process(Data_Dict_Data, Labels)
|
||||
|
||||
print("總共有 " + str(total) + " 筆資料")
|
||||
print("總共有 " + str(Train_Size * 3) + " 筆資料")
|
||||
|
||||
# 做出跟資料相同數量的Label
|
||||
Classes = []
|
||||
i = 0
|
||||
for encording in Encording_Label:
|
||||
Classes.append(image_processing.make_label_list(Length_Array[i], encording))
|
||||
Classes.append(image_processing.make_label_list(Train_Size, encording))
|
||||
i += 1
|
||||
|
||||
# 將資料做成Dict的資料型態
|
||||
@@ -97,7 +86,7 @@ if __name__ == "__main__":
|
||||
# training_data = list(total_trains) # 轉換資料型態
|
||||
|
||||
training_data, train_label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size, True)
|
||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size)
|
||||
|
||||
# 查看Dataloader的Shape
|
||||
for idx, data in enumerate(Training_Dataset):
|
||||
@@ -140,7 +129,7 @@ if __name__ == "__main__":
|
||||
# trains.append(All_Monkeypox_Data)
|
||||
# training_label.append([0, 0, 0, 0, 1, 0, 0])
|
||||
|
||||
loss, accuracy, precision, recall, AUC, f = experiment.processing_main(Training_Dataset, i) # 執行訓練方法
|
||||
loss, accuracy, precision, recall, AUC, f = experiment.processing_main(Training_Dataset, Run_Range) # 執行訓練方法
|
||||
Calculate_Tool.Append_numbers(loss, accuracy, precision, recall, AUC, f)
|
||||
|
||||
print("實驗結果")
|
||||
|
||||
Binary file not shown.
132
test.ipynb
Normal file
132
test.ipynb
Normal file
@@ -0,0 +1,132 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor(1.9255)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.nn.functional as F\n",
|
||||
"\n",
|
||||
"# 假設有3個類別\n",
|
||||
"num_classes = 3\n",
|
||||
"batch_size = 4\n",
|
||||
"\n",
|
||||
"# 模擬模型輸出 (未經softmax)\n",
|
||||
"# 輸出形狀應為 [batch_size, num_classes]\n",
|
||||
"predictions = torch.randn(batch_size, num_classes)\n",
|
||||
"\n",
|
||||
"# 模擬真實標籤,假設為one-hot編碼\n",
|
||||
"# 形狀為 [batch_size, num_classes]\n",
|
||||
"targets_one_hot = torch.tensor([\n",
|
||||
" [1, 0, 0],\n",
|
||||
" [0, 1, 0],\n",
|
||||
" [0, 0, 1],\n",
|
||||
" [0, 1, 0]\n",
|
||||
"], dtype=torch.float32)\n",
|
||||
"\n",
|
||||
"# 將one-hot編碼的標籤轉換為類別索引\n",
|
||||
"# 形狀為 [batch_size]\n",
|
||||
"targets = torch.argmax(targets_one_hot, dim=1)\n",
|
||||
"\n",
|
||||
"# 定義損失函數\n",
|
||||
"criterion = nn.CrossEntropyLoss()\n",
|
||||
"\n",
|
||||
"# 計算損失\n",
|
||||
"loss = criterion(predictions, targets)\n",
|
||||
"print(loss)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[-0.2698, 0.7174, -0.2358],\n",
|
||||
" [ 1.4976, -1.1554, 3.3826],\n",
|
||||
" [-0.8067, 1.1254, 1.9788],\n",
|
||||
" [ 1.3467, 0.7573, -1.5764]])"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"predictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([0, 1, 2, 1])"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"targets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<generator object <genexpr> at 0x7fd0786385f0>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(x for x in range(1, 76))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Reference in New Issue
Block a user