20250216 Commits: Modification to three Classifications and it can train, but validation have some question
This commit is contained in:
@@ -7,11 +7,12 @@ from Training_Tools.Tools import Tool
|
||||
|
||||
class Image_generator():
|
||||
'''製作資料強化'''
|
||||
def __init__(self, Generator_Root, Labels) -> None:
|
||||
def __init__(self, Generator_Root, Labels, Image_Size) -> None:
|
||||
self._validation = validation_the_enter_data()
|
||||
self.stop = 0
|
||||
self.Labels = Labels
|
||||
self.Generator_Root = Generator_Root
|
||||
self.Generator_Root = Generator_Root
|
||||
self.Image_Size = Image_Size
|
||||
pass
|
||||
|
||||
def Processing_Main(self, Training_Dict_Data_Root):
|
||||
@@ -71,18 +72,17 @@ class Image_generator():
|
||||
strardand = 要使用哪種Image Augmentation
|
||||
'''
|
||||
File = Process_File()
|
||||
image_processing = Read_image_and_Process_image()
|
||||
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||
tool = Tool()
|
||||
Classes = []
|
||||
Transform = self.Generator_Content(stardand)
|
||||
|
||||
for label in self.Labels: # 分別對所有類別進行資料強化
|
||||
image = self.load_data(stardand) # 取的資料
|
||||
image = self.load_data(label) # 取的資料
|
||||
save_root = File.Make_Save_Root(label, save_roots) # 合併路徑
|
||||
|
||||
Classes = image_processing.make_label_list(len(image), "1")
|
||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(image, Classes, 1)
|
||||
|
||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(image, Classes, 1, False)
|
||||
|
||||
if File.JudgeRoot_MakeDir(save_root): # 判斷要存的資料夾存不存在,不存在則創立
|
||||
print("The file is exist")
|
||||
@@ -92,12 +92,13 @@ class Image_generator():
|
||||
img = Transform(img)
|
||||
img_pil = transforms.ToPILImage()(img)
|
||||
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
|
||||
data_size += 1
|
||||
|
||||
return data_size
|
||||
|
||||
def load_data(self, label):
|
||||
'''Images is readed by myself'''
|
||||
image_processing = Read_image_and_Process_image()
|
||||
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -19,12 +19,13 @@ Parmeter
|
||||
herpes_data: 合併herpes Dataset的資料成一個List
|
||||
MonkeyPox_data: 合併MonkeyPox DataSet 的資料成一個List
|
||||
'''
|
||||
def __init__(self, Training_Root,Test_Root, Validation_Root, Generator_Root, Labels) -> None:
|
||||
def __init__(self, Training_Root,Test_Root, Validation_Root, Generator_Root, Labels, Image_Size) -> None:
|
||||
self.Training_Root = Training_Root
|
||||
self.TestRoot = Test_Root
|
||||
self.ValidationRoot = Validation_Root
|
||||
self.GeneratoRoot = Generator_Root
|
||||
self.Labels = Labels
|
||||
self.Image_Size = Image_Size
|
||||
pass
|
||||
|
||||
def process_main(self, Data_Length : int):
|
||||
@@ -33,7 +34,7 @@ Parmeter
|
||||
load = Loding_Data_Root(self.Labels, self.Training_Root, self.GeneratoRoot)
|
||||
Indepentend = Cut_Indepentend_Data(self.Training_Root, self.Labels)
|
||||
Load_Tool = Load_Data_Tools()
|
||||
Generator = Image_generator(self.GeneratoRoot, self.Labels)
|
||||
Generator = Image_generator(self.GeneratoRoot, self.Labels, self.Image_Size)
|
||||
|
||||
# 將測試資料獨立出來
|
||||
test_size = 0.1
|
||||
|
||||
@@ -24,7 +24,7 @@ class Load_Indepentend_Data():
|
||||
print("validation_labels有 " + str(len(self.validation_label)) + " 筆資料\n")
|
||||
|
||||
def get_Independent_image(self, independent_DataRoot):
|
||||
image_processing = Read_image_and_Process_image()
|
||||
image_processing = Read_image_and_Process_image(123)
|
||||
|
||||
classify_image = []
|
||||
Total_Dict_Data_Root = self.Get_Independent_data_Root(independent_DataRoot) # 讀取測試資料集的資料
|
||||
|
||||
Binary file not shown.
@@ -15,5 +15,5 @@ class Entropy_Loss(nn.Module):
|
||||
# input shape has a question
|
||||
# print(f"Label result: {labels}, result: {outputs}")
|
||||
labels = labels.float()
|
||||
loss = functional.binary_cross_entropy(outputs, labels)
|
||||
loss = functional.cross_entropy(outputs, labels)
|
||||
return loss
|
||||
Binary file not shown.
@@ -3,15 +3,15 @@ import numpy as np
|
||||
import torch
|
||||
|
||||
class Read_image_and_Process_image:
|
||||
def __init__(self) -> None:
|
||||
def __init__(self, Image_Size) -> None:
|
||||
self.Image_Size = Image_Size
|
||||
pass
|
||||
def get_data(self, path):
|
||||
'''讀檔'''
|
||||
img_size = 512 # 縮小後的影像
|
||||
try:
|
||||
img_arr = cv2.imread(path, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||
resized_arr = cv2.resize(img_arr, (img_size, img_size)) # 濤整圖片大小
|
||||
resized_arr = cv2.resize(img_arr, (self.Image_Size, self.Image_Size)) # 濤整圖片大小
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
@@ -21,11 +21,10 @@ class Read_image_and_Process_image:
|
||||
resized_arr = []
|
||||
|
||||
for p in path:
|
||||
img_size = 512 # 縮小後的影像
|
||||
try:
|
||||
img_arr = cv2.imread(p, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||
resized_arr.append(cv2.resize(img_arr, (img_size, img_size))) # 濤整圖片大小
|
||||
resized_arr.append(cv2.resize(img_arr, (self.Image_Size, self.Image_Size))) # 濤整圖片大小
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
@@ -33,9 +32,8 @@ class Read_image_and_Process_image:
|
||||
|
||||
def image_data_processing(self, data, label):
|
||||
'''讀檔後處理圖片'''
|
||||
img_size = 512
|
||||
data = np.asarray(data).astype(np.float32) # 將圖list轉成np.array
|
||||
data = data.reshape(-1, img_size, img_size, 3) # 更改陣列形狀
|
||||
data = data.reshape(-1, self.Image_Size, self.Image_Size, 3) # 更改陣列形狀
|
||||
label = np.array(label) # 將label從list型態轉成 numpy array
|
||||
return data, label
|
||||
|
||||
|
||||
Binary file not shown.
@@ -5,9 +5,11 @@ from torch.utils.data import Dataset, DataLoader, RandomSampler
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
class ListDataset(Dataset):
|
||||
def __init__(self, data_list, labels_list):
|
||||
def __init__(self, data_list, labels_list, status):
|
||||
self.data = data_list
|
||||
self.labels = labels_list
|
||||
self.status = status
|
||||
print(status)
|
||||
|
||||
def Transform(self):
|
||||
return transforms.Compose([
|
||||
@@ -49,7 +51,7 @@ class Tool:
|
||||
pass
|
||||
|
||||
def Set_Labels(self):
|
||||
self.__Labels = ["stomach_cancer_Crop", "Normal_Crop"]
|
||||
self.__Labels = ["stomach_cancer_Crop", "Normal_Crop", "Have_Question_Crop"]
|
||||
|
||||
def Set_Save_Roots(self):
|
||||
self.__ICG_Training_Root = "../Dataset/Training/CA_ICG"
|
||||
@@ -110,14 +112,14 @@ class Tool:
|
||||
def Get_OneHot_Encording_Label(self):
|
||||
return self.__OneHot_Encording
|
||||
|
||||
def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int):
|
||||
def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int, status : bool):
|
||||
seed = 42 # 設定任意整數作為種子
|
||||
# 產生隨機種子產生器
|
||||
generator = torch.Generator()
|
||||
generator.manual_seed(seed)
|
||||
|
||||
# 創建 Dataset
|
||||
list_dataset = ListDataset(Datas, Labels)
|
||||
list_dataset = ListDataset(Datas, Labels, status)
|
||||
sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler
|
||||
|
||||
return DataLoader(dataset = list_dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler)
|
||||
Binary file not shown.
@@ -6,6 +6,7 @@ import torch.optim as optim
|
||||
import numpy as np
|
||||
from all_models_tools.all_model_tools import call_back
|
||||
from Model_Loss.Loss import Entropy_Loss
|
||||
from merge_class.merge import merge
|
||||
|
||||
|
||||
class All_Step:
|
||||
@@ -28,6 +29,7 @@ class All_Step:
|
||||
model_path, early_stopping, scheduler = call_back(model_name, counter, Optimizer)
|
||||
|
||||
criterion = Entropy_Loss() # 使用自定義的損失函數
|
||||
Merge_Function = merge()
|
||||
train_losses = []
|
||||
val_losses = []
|
||||
train_accuracies = []
|
||||
@@ -57,13 +59,16 @@ class All_Step:
|
||||
running_loss += loss.item()
|
||||
|
||||
# 收集訓練預測和標籤
|
||||
_, preds = torch.max(outputs, 1)
|
||||
labels = np.argmax(labels.cpu().numpy())
|
||||
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||
True_Values, True_Indexs = torch.max(labels, 1)
|
||||
# all_train_preds.extend(preds.cpu().numpy())
|
||||
# all_train_labels.extend(labels.cpu().numpy())
|
||||
|
||||
all_train_preds.append(preds.cpu().numpy())
|
||||
all_train_labels.append(labels)
|
||||
all_train_preds.append(Output_Indexs.cpu().numpy())
|
||||
all_train_labels.append(True_Indexs.cpu().numpy())
|
||||
|
||||
all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds))
|
||||
all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels))
|
||||
|
||||
Training_Loss = running_loss / len(self.Training_Data_And_Label)
|
||||
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
|
||||
@@ -79,7 +84,7 @@ class All_Step:
|
||||
all_val_labels = []
|
||||
|
||||
with torch.no_grad():
|
||||
for batch_Index, (inputs, labels) in self.Validation_Data_And_Label:
|
||||
for inputs, labels in self.Validation_Data_And_Label:
|
||||
# inputs = np.expand_dims(inputs, axis = 0)
|
||||
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
|
||||
|
||||
@@ -87,17 +92,20 @@ class All_Step:
|
||||
loss = criterion(outputs, labels)
|
||||
val_loss += loss.item()
|
||||
|
||||
# 驗證預測與標籤
|
||||
_, preds = torch.max(outputs, 1)
|
||||
labels = np.argmax(labels.cpu().numpy())
|
||||
# 收集訓練預測和標籤
|
||||
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||
True_Values, True_Indexs = torch.max(labels, 1)
|
||||
|
||||
# all_val_preds.extend(preds.cpu().numpy())
|
||||
# all_val_labels.extend(labels.cpu().numpy())a
|
||||
|
||||
all_val_preds.append(preds.cpu().numpy())
|
||||
all_val_labels.append(labels)
|
||||
all_val_preds.append(Output_Indexs.cpu().numpy())
|
||||
all_val_labels.append(True_Indexs.cpu().numpy())
|
||||
|
||||
# 計算驗證損失與準確率
|
||||
all_val_preds = Merge_Function.merge_data_main(all_val_preds, 0, len(all_val_preds))
|
||||
all_val_labels = Merge_Function.merge_data_main(all_val_labels, 0, len(all_val_labels))
|
||||
|
||||
val_loss /= len(self.Validation_Data_And_Label)
|
||||
val_accuracy = accuracy_score(all_val_labels, all_val_preds)
|
||||
|
||||
@@ -120,24 +128,30 @@ class All_Step:
|
||||
cnn_model.eval()
|
||||
True_Label, Predict_Label = [], []
|
||||
loss = 0.0
|
||||
Merge_Function = merge()
|
||||
|
||||
with torch.no_grad():
|
||||
for batch_Index, (images, labels) in self.Test_Data_And_Label:
|
||||
for images, labels in self.Test_Data_And_Label:
|
||||
# images = np.expand_dims(images, axis = 0)
|
||||
images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device)
|
||||
|
||||
outputs = cnn_model(images)
|
||||
|
||||
_, predicted = torch.max(outputs, 1)
|
||||
labels = np.argmax(labels.cpu().numpy())
|
||||
# 收集訓練預測和標籤
|
||||
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||
True_Values, True_Indexs = torch.max(labels, 1)
|
||||
|
||||
Predict_Label.append(predicted.cpu().numpy())
|
||||
True_Label.append(labels)
|
||||
True_Label.append(Output_Indexs.cpu().numpy())
|
||||
Predict_Label.append(True_Indexs.cpu().numpy())
|
||||
|
||||
# Predict_Label.extend(predicted.cpu().numpy())
|
||||
# True_Label.extend(labels.cpu().numpy())
|
||||
|
||||
loss /= len(self.Test_Data_And_Label)
|
||||
|
||||
True_Label = Merge_Function.merge_data_main(True_Label, 0, len(True_Label))
|
||||
Predict_Label = Merge_Function.merge_data_main(Predict_Label, 0, len(Predict_Label))
|
||||
|
||||
accuracy = accuracy_score(True_Label, Predict_Label)
|
||||
precision = precision_score(True_Label, Predict_Label)
|
||||
recall = recall_score(True_Label, Predict_Label)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,5 +1,3 @@
|
||||
from all_models_tools.all_model_tools import call_back
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from draw_tools.draw import plot_history, Confusion_Matrix_of_Two_Classification
|
||||
from Load_process.Load_Indepentend import Load_Indepentend_Data
|
||||
from _validation.ValidationTheEnterData import validation_the_enter_data
|
||||
@@ -7,7 +5,6 @@ from Load_process.file_processing import Process_File
|
||||
from merge_class.merge import merge
|
||||
from sklearn.metrics import confusion_matrix
|
||||
from experiments.pytorch_Model import ModifiedXception
|
||||
from Image_Process.Image_Generator import Image_generator
|
||||
from experiments.Model_All_Step import All_Step
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
@@ -49,8 +46,6 @@ class experiments():
|
||||
|
||||
self.validation_obj = validation_the_enter_data() # 呼叫驗證物件
|
||||
self.cut_image = Load_Indepentend_Data(self.Topic_Tool.Get_Data_Label(), self.Topic_Tool.Get_OneHot_Encording_Label()) # 呼叫切割影像物件
|
||||
self.image_processing = Read_image_and_Process_image()
|
||||
self.ImageGenerator = Image_generator("", "")
|
||||
self.merge = merge()
|
||||
|
||||
self.model_name = Model_Name # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
||||
@@ -82,8 +77,8 @@ class experiments():
|
||||
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
|
||||
self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label
|
||||
|
||||
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1)
|
||||
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, self.train_batch_size)
|
||||
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1, True)
|
||||
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, self.train_batch_size, True)
|
||||
|
||||
# self.Grad = Grad_CAM(self.Topic_Tool.Get_Data_Label(), self.test_label, self.experiment_name, self.convolution_name)
|
||||
|
||||
|
||||
11
main.py
11
main.py
@@ -32,19 +32,20 @@ if __name__ == "__main__":
|
||||
tool.Set_OneHotEncording(Labels)
|
||||
Encording_Label = tool.Get_OneHot_Encording_Label()
|
||||
Label_Length = len(Labels)
|
||||
Classification = 2 # 分類數量
|
||||
Classification = 3 # 分類數量
|
||||
|
||||
Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
||||
Experiment_Name = "Xception Skin to train Normal stomach cancer"
|
||||
Generator_Batch_Size = 50
|
||||
Epoch = 10000
|
||||
Train_Batch_Size = 50
|
||||
Image_Size = 256
|
||||
Convolution_Name = "block14_sepconv2"
|
||||
|
||||
Prepare = Load_Data_Prepare()
|
||||
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels)
|
||||
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels, Image_Size)
|
||||
experiment = experiments(Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, Convolution_Name, tool, Classification, Status)
|
||||
image_processing = Read_image_and_Process_image()
|
||||
image_processing = Read_image_and_Process_image(Image_Size)
|
||||
Merge = merge()
|
||||
Calculate_Tool = Calculate()
|
||||
|
||||
@@ -96,9 +97,9 @@ if __name__ == "__main__":
|
||||
# training_data = list(total_trains) # 轉換資料型態
|
||||
|
||||
training_data, train_label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size)
|
||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size, True)
|
||||
|
||||
|
||||
# 查看Dataloader的Shape
|
||||
for idx, data in enumerate(Training_Dataset):
|
||||
datas = data[0]
|
||||
print(f"Shape: {datas.shape}")
|
||||
|
||||
Binary file not shown.
@@ -1,6 +1,5 @@
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from sklearn.model_selection import train_test_split
|
||||
from merge_class.merge import merge
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from Load_process.LoadData import Load_Data_Prepare, Process_File, Load_Data_Tools
|
||||
import shutil
|
||||
@@ -21,7 +20,7 @@ class Cut_Indepentend_Data():
|
||||
self.Cut_Of_Independent_Data(get_all_image_data, Indepentend_Data_Root, Test_Size)
|
||||
|
||||
def Balance_Cut_Of_Independent_Data(self, Independent_Dict_Data_Content, Test_Size):
|
||||
image_processing = Read_image_and_Process_image()
|
||||
image_processing = Read_image_and_Process_image(123)
|
||||
Prepare = Load_Data_Prepare()
|
||||
Prepare.Set_Data_Content([], len(self.Labels))
|
||||
Prepare.Set_Data_Dictionary(self.Labels, Prepare.Get_Data_Content(), 2)
|
||||
@@ -36,8 +35,7 @@ class Cut_Indepentend_Data():
|
||||
|
||||
def Cut_Of_Independent_Data(self, Independent_Dict_Data_Content, IndependentDataRoot, Test_Size):
|
||||
'''切割獨立資料(e.g. Validation、training)'''
|
||||
image_processing = Read_image_and_Process_image()
|
||||
Prepaer = Load_Data_Prepare()
|
||||
image_processing = Read_image_and_Process_image(122)
|
||||
File = Process_File()
|
||||
i = 0
|
||||
|
||||
|
||||
Reference in New Issue
Block a user