20250224 commits: Data Argumentation had done 5x, and validation Accuracy just only have 33%

This commit is contained in:
2025-02-24 22:57:27 +08:00
parent 16c7a074bb
commit 7cb89d2ff1
13 changed files with 46 additions and 39 deletions

View File

@@ -7,6 +7,7 @@ from Training_Tools.Tools import Tool
import numpy as np
from PIL import Image
import torch
import cv2
class Image_generator():
'''製作資料強化'''
@@ -26,36 +27,37 @@ class Image_generator():
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
產生出資料強化後的影像
'''
print("\nAugmentation one Generator image")
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 1, data_size)
self.stop += data_size
for i in range(1, 5, 1):
print("\nAugmentation one Generator image")
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, i, data_size)
self.stop += data_size
# 製作標準資料增強
'''
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
產生出資料強化後的影像
'''
print("\nAugmentation two Generator image")
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 2, data_size)
self.stop += data_size
# # 製作標準資料增強
# '''
# 這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
# 產生出資料強化後的影像
# '''
# print("\nAugmentation two Generator image")
# data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 2, data_size)
# self.stop += data_size
# 製作標準資料增強
'''
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
產生出資料強化後的影像
'''
print("\nAugmentation three Generator image")
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 3, data_size)
self.stop += data_size
# # 製作標準資料增強
# '''
# 這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
# 產生出資料強化後的影像
# '''
# print("\nAugmentation three Generator image")
# data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 3, data_size)
# self.stop += data_size
# 製作標準資料增強
'''
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
產生出資料強化後的影像
'''
print("\nAugmentation four Generator image")
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 4, data_size)
# # 製作標準資料增強
# '''
# 這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
# 產生出資料強化後的影像
# '''
# print("\nAugmentation four Generator image")
# data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 4, data_size)
print()
@@ -97,7 +99,12 @@ class Image_generator():
img = img.permute(2, 0, 1)
img = Transform(img)
img_pil = transforms.ToPILImage()(img)
# 轉換為 NumPy 陣列並從 BGR 轉為 RGB
img_np = img.numpy().transpose(1, 2, 0) # 轉回 HWC 格式
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB) # BGR 轉 RGB
img_pil = transforms.ToPILImage()(img_np)
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
data_size += 1
@@ -109,7 +116,7 @@ class Image_generator():
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
img = torch.tensor(img)
self.stop = len(img) * 1.5
self.stop = len(img) * 5
return img
def Generator_Content(self, judge): # 影像資料增強

View File

@@ -45,8 +45,8 @@ class Load_Indepentend_Data():
Classify_Label.append(test_label)
i += 1
original_test_root = self.merge.merge_data_main(classify_image, 0, 2)
original_test_label = self.merge.merge_data_main(Classify_Label, 0, 2)
original_test_root = self.merge.merge_data_main(classify_image, 0)
original_test_label = self.merge.merge_data_main(Classify_Label, 0)
test = []
test = image_processing.Data_Augmentation_Image(original_test_root)

View File

@@ -3,7 +3,6 @@ from draw_tools.Grad_cam import Grad_CAM
from Load_process.Load_Indepentend import Load_Indepentend_Data
from _validation.ValidationTheEnterData import validation_the_enter_data
from Load_process.file_processing import Process_File
from merge_class.merge import merge
from sklearn.metrics import confusion_matrix
from experiments.pytorch_Model import ModifiedXception
from experiments.Model_All_Step import All_Step
@@ -48,7 +47,6 @@ class experiments():
self.validation_obj = validation_the_enter_data() # 呼叫驗證物件
self.cut_image = Load_Indepentend_Data(self.Topic_Tool.Get_Data_Label(), self.Topic_Tool.Get_OneHot_Encording_Label()) # 呼叫切割影像物件
self.merge = merge()
self.model_name = Model_Name # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
self.experiment_name = Experiment_Name
@@ -105,7 +103,7 @@ class experiments():
Losses = [train_losses, val_losses]
Accuracies = [train_accuracies, val_accuracies]
plot_history(Epoch + 1, Losses, Accuracies, "train" + str(counter), self.experiment_name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
self.Grad.process_main(cnn_model, counter, Testing_Dataset)
# self.Grad.process_main(cnn_model, counter, Testing_Dataset)
return loss, accuracy, precision, recall, AUC, f1

View File

@@ -31,6 +31,7 @@ if __name__ == "__main__":
# 取得One-hot encording 的資料
tool.Set_OneHotEncording(Labels)
Encording_Label = tool.Get_OneHot_Encording_Label()
Label_Length = len(Labels)
Classification = 3 # 分類數量

View File

@@ -18,10 +18,10 @@ class merge:
def get_judge_status(self):
return self.__judge
def merge_all_image_data(self, Classify1, Classify2):
def merge_all_image_data(self, Classify1, Classify2, Number_Of_Classes = 2):
merged_data = [Classify1, Classify2]
return self.merge_data_main(merged_data, 0, 2)
return self.merge_data_main(merged_data, 0, Number_Of_Classes)
def merge_data_main(self, merge_data, merge_start_index, total_merge_number = 3):
'''
@@ -31,6 +31,7 @@ class merge:
* merge_start_index: 合併資料的起始位置
* total_merge_numbers: 總共要合併的數量
'''
if self.validation.validation_type(merge_data, dict):
self.set_merge_data(merge_data)
return self.merge_dict_to_list_data(merge_start_index, total_merge_number)
@@ -50,12 +51,12 @@ class merge:
def merge_dict_to_dict(self, original : dict, myself):
keys = list(original.keys())
data = {
keys[0]: [],
keys[1]: [],
}
data = {}
for key in keys:
Content = {key : []}
data.update(Content)
tempData = [original[key], myself[key]]
end = 2