20250216 Commits: Modification to three Classifications and it can train, but validation have some question
This commit is contained in:
parent
96245bd095
commit
fd2366a40f
|
|
@ -7,11 +7,12 @@ from Training_Tools.Tools import Tool
|
||||||
|
|
||||||
class Image_generator():
|
class Image_generator():
|
||||||
'''製作資料強化'''
|
'''製作資料強化'''
|
||||||
def __init__(self, Generator_Root, Labels) -> None:
|
def __init__(self, Generator_Root, Labels, Image_Size) -> None:
|
||||||
self._validation = validation_the_enter_data()
|
self._validation = validation_the_enter_data()
|
||||||
self.stop = 0
|
self.stop = 0
|
||||||
self.Labels = Labels
|
self.Labels = Labels
|
||||||
self.Generator_Root = Generator_Root
|
self.Generator_Root = Generator_Root
|
||||||
|
self.Image_Size = Image_Size
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def Processing_Main(self, Training_Dict_Data_Root):
|
def Processing_Main(self, Training_Dict_Data_Root):
|
||||||
|
|
@ -71,18 +72,17 @@ class Image_generator():
|
||||||
strardand = 要使用哪種Image Augmentation
|
strardand = 要使用哪種Image Augmentation
|
||||||
'''
|
'''
|
||||||
File = Process_File()
|
File = Process_File()
|
||||||
image_processing = Read_image_and_Process_image()
|
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||||
tool = Tool()
|
tool = Tool()
|
||||||
Classes = []
|
Classes = []
|
||||||
Transform = self.Generator_Content(stardand)
|
Transform = self.Generator_Content(stardand)
|
||||||
|
|
||||||
for label in self.Labels: # 分別對所有類別進行資料強化
|
for label in self.Labels: # 分別對所有類別進行資料強化
|
||||||
image = self.load_data(stardand) # 取的資料
|
image = self.load_data(label) # 取的資料
|
||||||
save_root = File.Make_Save_Root(label, save_roots) # 合併路徑
|
save_root = File.Make_Save_Root(label, save_roots) # 合併路徑
|
||||||
|
|
||||||
Classes = image_processing.make_label_list(len(image), "1")
|
Classes = image_processing.make_label_list(len(image), "1")
|
||||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(image, Classes, 1)
|
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(image, Classes, 1, False)
|
||||||
|
|
||||||
|
|
||||||
if File.JudgeRoot_MakeDir(save_root): # 判斷要存的資料夾存不存在,不存在則創立
|
if File.JudgeRoot_MakeDir(save_root): # 判斷要存的資料夾存不存在,不存在則創立
|
||||||
print("The file is exist")
|
print("The file is exist")
|
||||||
|
|
@ -92,12 +92,13 @@ class Image_generator():
|
||||||
img = Transform(img)
|
img = Transform(img)
|
||||||
img_pil = transforms.ToPILImage()(img)
|
img_pil = transforms.ToPILImage()(img)
|
||||||
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
|
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
|
||||||
|
data_size += 1
|
||||||
|
|
||||||
return data_size
|
return data_size
|
||||||
|
|
||||||
def load_data(self, label):
|
def load_data(self, label):
|
||||||
'''Images is readed by myself'''
|
'''Images is readed by myself'''
|
||||||
image_processing = Read_image_and_Process_image()
|
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||||
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
|
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Binary file not shown.
Binary file not shown.
|
|
@ -19,12 +19,13 @@ Parmeter
|
||||||
herpes_data: 合併herpes Dataset的資料成一個List
|
herpes_data: 合併herpes Dataset的資料成一個List
|
||||||
MonkeyPox_data: 合併MonkeyPox DataSet 的資料成一個List
|
MonkeyPox_data: 合併MonkeyPox DataSet 的資料成一個List
|
||||||
'''
|
'''
|
||||||
def __init__(self, Training_Root,Test_Root, Validation_Root, Generator_Root, Labels) -> None:
|
def __init__(self, Training_Root,Test_Root, Validation_Root, Generator_Root, Labels, Image_Size) -> None:
|
||||||
self.Training_Root = Training_Root
|
self.Training_Root = Training_Root
|
||||||
self.TestRoot = Test_Root
|
self.TestRoot = Test_Root
|
||||||
self.ValidationRoot = Validation_Root
|
self.ValidationRoot = Validation_Root
|
||||||
self.GeneratoRoot = Generator_Root
|
self.GeneratoRoot = Generator_Root
|
||||||
self.Labels = Labels
|
self.Labels = Labels
|
||||||
|
self.Image_Size = Image_Size
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def process_main(self, Data_Length : int):
|
def process_main(self, Data_Length : int):
|
||||||
|
|
@ -33,7 +34,7 @@ Parmeter
|
||||||
load = Loding_Data_Root(self.Labels, self.Training_Root, self.GeneratoRoot)
|
load = Loding_Data_Root(self.Labels, self.Training_Root, self.GeneratoRoot)
|
||||||
Indepentend = Cut_Indepentend_Data(self.Training_Root, self.Labels)
|
Indepentend = Cut_Indepentend_Data(self.Training_Root, self.Labels)
|
||||||
Load_Tool = Load_Data_Tools()
|
Load_Tool = Load_Data_Tools()
|
||||||
Generator = Image_generator(self.GeneratoRoot, self.Labels)
|
Generator = Image_generator(self.GeneratoRoot, self.Labels, self.Image_Size)
|
||||||
|
|
||||||
# 將測試資料獨立出來
|
# 將測試資料獨立出來
|
||||||
test_size = 0.1
|
test_size = 0.1
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ class Load_Indepentend_Data():
|
||||||
print("validation_labels有 " + str(len(self.validation_label)) + " 筆資料\n")
|
print("validation_labels有 " + str(len(self.validation_label)) + " 筆資料\n")
|
||||||
|
|
||||||
def get_Independent_image(self, independent_DataRoot):
|
def get_Independent_image(self, independent_DataRoot):
|
||||||
image_processing = Read_image_and_Process_image()
|
image_processing = Read_image_and_Process_image(123)
|
||||||
|
|
||||||
classify_image = []
|
classify_image = []
|
||||||
Total_Dict_Data_Root = self.Get_Independent_data_Root(independent_DataRoot) # 讀取測試資料集的資料
|
Total_Dict_Data_Root = self.Get_Independent_data_Root(independent_DataRoot) # 讀取測試資料集的資料
|
||||||
|
|
|
||||||
Binary file not shown.
|
|
@ -15,5 +15,5 @@ class Entropy_Loss(nn.Module):
|
||||||
# input shape has a question
|
# input shape has a question
|
||||||
# print(f"Label result: {labels}, result: {outputs}")
|
# print(f"Label result: {labels}, result: {outputs}")
|
||||||
labels = labels.float()
|
labels = labels.float()
|
||||||
loss = functional.binary_cross_entropy(outputs, labels)
|
loss = functional.cross_entropy(outputs, labels)
|
||||||
return loss
|
return loss
|
||||||
Binary file not shown.
|
|
@ -3,15 +3,15 @@ import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
class Read_image_and_Process_image:
|
class Read_image_and_Process_image:
|
||||||
def __init__(self) -> None:
|
def __init__(self, Image_Size) -> None:
|
||||||
|
self.Image_Size = Image_Size
|
||||||
pass
|
pass
|
||||||
def get_data(self, path):
|
def get_data(self, path):
|
||||||
'''讀檔'''
|
'''讀檔'''
|
||||||
img_size = 512 # 縮小後的影像
|
|
||||||
try:
|
try:
|
||||||
img_arr = cv2.imread(path, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
img_arr = cv2.imread(path, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||||
resized_arr = cv2.resize(img_arr, (img_size, img_size)) # 濤整圖片大小
|
resized_arr = cv2.resize(img_arr, (self.Image_Size, self.Image_Size)) # 濤整圖片大小
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
|
|
@ -21,11 +21,10 @@ class Read_image_and_Process_image:
|
||||||
resized_arr = []
|
resized_arr = []
|
||||||
|
|
||||||
for p in path:
|
for p in path:
|
||||||
img_size = 512 # 縮小後的影像
|
|
||||||
try:
|
try:
|
||||||
img_arr = cv2.imread(p, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
img_arr = cv2.imread(p, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||||
resized_arr.append(cv2.resize(img_arr, (img_size, img_size))) # 濤整圖片大小
|
resized_arr.append(cv2.resize(img_arr, (self.Image_Size, self.Image_Size))) # 濤整圖片大小
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
|
|
@ -33,9 +32,8 @@ class Read_image_and_Process_image:
|
||||||
|
|
||||||
def image_data_processing(self, data, label):
|
def image_data_processing(self, data, label):
|
||||||
'''讀檔後處理圖片'''
|
'''讀檔後處理圖片'''
|
||||||
img_size = 512
|
|
||||||
data = np.asarray(data).astype(np.float32) # 將圖list轉成np.array
|
data = np.asarray(data).astype(np.float32) # 將圖list轉成np.array
|
||||||
data = data.reshape(-1, img_size, img_size, 3) # 更改陣列形狀
|
data = data.reshape(-1, self.Image_Size, self.Image_Size, 3) # 更改陣列形狀
|
||||||
label = np.array(label) # 將label從list型態轉成 numpy array
|
label = np.array(label) # 將label從list型態轉成 numpy array
|
||||||
return data, label
|
return data, label
|
||||||
|
|
||||||
|
|
|
||||||
Binary file not shown.
|
|
@ -5,9 +5,11 @@ from torch.utils.data import Dataset, DataLoader, RandomSampler
|
||||||
import torchvision.transforms as transforms
|
import torchvision.transforms as transforms
|
||||||
|
|
||||||
class ListDataset(Dataset):
|
class ListDataset(Dataset):
|
||||||
def __init__(self, data_list, labels_list):
|
def __init__(self, data_list, labels_list, status):
|
||||||
self.data = data_list
|
self.data = data_list
|
||||||
self.labels = labels_list
|
self.labels = labels_list
|
||||||
|
self.status = status
|
||||||
|
print(status)
|
||||||
|
|
||||||
def Transform(self):
|
def Transform(self):
|
||||||
return transforms.Compose([
|
return transforms.Compose([
|
||||||
|
|
@ -49,7 +51,7 @@ class Tool:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def Set_Labels(self):
|
def Set_Labels(self):
|
||||||
self.__Labels = ["stomach_cancer_Crop", "Normal_Crop"]
|
self.__Labels = ["stomach_cancer_Crop", "Normal_Crop", "Have_Question_Crop"]
|
||||||
|
|
||||||
def Set_Save_Roots(self):
|
def Set_Save_Roots(self):
|
||||||
self.__ICG_Training_Root = "../Dataset/Training/CA_ICG"
|
self.__ICG_Training_Root = "../Dataset/Training/CA_ICG"
|
||||||
|
|
@ -110,14 +112,14 @@ class Tool:
|
||||||
def Get_OneHot_Encording_Label(self):
|
def Get_OneHot_Encording_Label(self):
|
||||||
return self.__OneHot_Encording
|
return self.__OneHot_Encording
|
||||||
|
|
||||||
def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int):
|
def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int, status : bool):
|
||||||
seed = 42 # 設定任意整數作為種子
|
seed = 42 # 設定任意整數作為種子
|
||||||
# 產生隨機種子產生器
|
# 產生隨機種子產生器
|
||||||
generator = torch.Generator()
|
generator = torch.Generator()
|
||||||
generator.manual_seed(seed)
|
generator.manual_seed(seed)
|
||||||
|
|
||||||
# 創建 Dataset
|
# 創建 Dataset
|
||||||
list_dataset = ListDataset(Datas, Labels)
|
list_dataset = ListDataset(Datas, Labels, status)
|
||||||
sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler
|
sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler
|
||||||
|
|
||||||
return DataLoader(dataset = list_dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler)
|
return DataLoader(dataset = list_dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler)
|
||||||
Binary file not shown.
|
|
@ -6,6 +6,7 @@ import torch.optim as optim
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from all_models_tools.all_model_tools import call_back
|
from all_models_tools.all_model_tools import call_back
|
||||||
from Model_Loss.Loss import Entropy_Loss
|
from Model_Loss.Loss import Entropy_Loss
|
||||||
|
from merge_class.merge import merge
|
||||||
|
|
||||||
|
|
||||||
class All_Step:
|
class All_Step:
|
||||||
|
|
@ -28,6 +29,7 @@ class All_Step:
|
||||||
model_path, early_stopping, scheduler = call_back(model_name, counter, Optimizer)
|
model_path, early_stopping, scheduler = call_back(model_name, counter, Optimizer)
|
||||||
|
|
||||||
criterion = Entropy_Loss() # 使用自定義的損失函數
|
criterion = Entropy_Loss() # 使用自定義的損失函數
|
||||||
|
Merge_Function = merge()
|
||||||
train_losses = []
|
train_losses = []
|
||||||
val_losses = []
|
val_losses = []
|
||||||
train_accuracies = []
|
train_accuracies = []
|
||||||
|
|
@ -57,13 +59,16 @@ class All_Step:
|
||||||
running_loss += loss.item()
|
running_loss += loss.item()
|
||||||
|
|
||||||
# 收集訓練預測和標籤
|
# 收集訓練預測和標籤
|
||||||
_, preds = torch.max(outputs, 1)
|
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||||
labels = np.argmax(labels.cpu().numpy())
|
True_Values, True_Indexs = torch.max(labels, 1)
|
||||||
# all_train_preds.extend(preds.cpu().numpy())
|
# all_train_preds.extend(preds.cpu().numpy())
|
||||||
# all_train_labels.extend(labels.cpu().numpy())
|
# all_train_labels.extend(labels.cpu().numpy())
|
||||||
|
|
||||||
all_train_preds.append(preds.cpu().numpy())
|
all_train_preds.append(Output_Indexs.cpu().numpy())
|
||||||
all_train_labels.append(labels)
|
all_train_labels.append(True_Indexs.cpu().numpy())
|
||||||
|
|
||||||
|
all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds))
|
||||||
|
all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels))
|
||||||
|
|
||||||
Training_Loss = running_loss / len(self.Training_Data_And_Label)
|
Training_Loss = running_loss / len(self.Training_Data_And_Label)
|
||||||
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
|
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
|
||||||
|
|
@ -79,7 +84,7 @@ class All_Step:
|
||||||
all_val_labels = []
|
all_val_labels = []
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for batch_Index, (inputs, labels) in self.Validation_Data_And_Label:
|
for inputs, labels in self.Validation_Data_And_Label:
|
||||||
# inputs = np.expand_dims(inputs, axis = 0)
|
# inputs = np.expand_dims(inputs, axis = 0)
|
||||||
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
|
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
|
||||||
|
|
||||||
|
|
@ -87,17 +92,20 @@ class All_Step:
|
||||||
loss = criterion(outputs, labels)
|
loss = criterion(outputs, labels)
|
||||||
val_loss += loss.item()
|
val_loss += loss.item()
|
||||||
|
|
||||||
# 驗證預測與標籤
|
# 收集訓練預測和標籤
|
||||||
_, preds = torch.max(outputs, 1)
|
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||||
labels = np.argmax(labels.cpu().numpy())
|
True_Values, True_Indexs = torch.max(labels, 1)
|
||||||
|
|
||||||
# all_val_preds.extend(preds.cpu().numpy())
|
# all_val_preds.extend(preds.cpu().numpy())
|
||||||
# all_val_labels.extend(labels.cpu().numpy())a
|
# all_val_labels.extend(labels.cpu().numpy())a
|
||||||
|
|
||||||
all_val_preds.append(preds.cpu().numpy())
|
all_val_preds.append(Output_Indexs.cpu().numpy())
|
||||||
all_val_labels.append(labels)
|
all_val_labels.append(True_Indexs.cpu().numpy())
|
||||||
|
|
||||||
# 計算驗證損失與準確率
|
# 計算驗證損失與準確率
|
||||||
|
all_val_preds = Merge_Function.merge_data_main(all_val_preds, 0, len(all_val_preds))
|
||||||
|
all_val_labels = Merge_Function.merge_data_main(all_val_labels, 0, len(all_val_labels))
|
||||||
|
|
||||||
val_loss /= len(self.Validation_Data_And_Label)
|
val_loss /= len(self.Validation_Data_And_Label)
|
||||||
val_accuracy = accuracy_score(all_val_labels, all_val_preds)
|
val_accuracy = accuracy_score(all_val_labels, all_val_preds)
|
||||||
|
|
||||||
|
|
@ -120,24 +128,30 @@ class All_Step:
|
||||||
cnn_model.eval()
|
cnn_model.eval()
|
||||||
True_Label, Predict_Label = [], []
|
True_Label, Predict_Label = [], []
|
||||||
loss = 0.0
|
loss = 0.0
|
||||||
|
Merge_Function = merge()
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for batch_Index, (images, labels) in self.Test_Data_And_Label:
|
for images, labels in self.Test_Data_And_Label:
|
||||||
# images = np.expand_dims(images, axis = 0)
|
# images = np.expand_dims(images, axis = 0)
|
||||||
images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device)
|
images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device)
|
||||||
|
|
||||||
outputs = cnn_model(images)
|
outputs = cnn_model(images)
|
||||||
|
|
||||||
_, predicted = torch.max(outputs, 1)
|
# 收集訓練預測和標籤
|
||||||
labels = np.argmax(labels.cpu().numpy())
|
Output_Values, Output_Indexs = torch.max(outputs, 1)
|
||||||
|
True_Values, True_Indexs = torch.max(labels, 1)
|
||||||
|
|
||||||
Predict_Label.append(predicted.cpu().numpy())
|
True_Label.append(Output_Indexs.cpu().numpy())
|
||||||
True_Label.append(labels)
|
Predict_Label.append(True_Indexs.cpu().numpy())
|
||||||
|
|
||||||
# Predict_Label.extend(predicted.cpu().numpy())
|
# Predict_Label.extend(predicted.cpu().numpy())
|
||||||
# True_Label.extend(labels.cpu().numpy())
|
# True_Label.extend(labels.cpu().numpy())
|
||||||
|
|
||||||
loss /= len(self.Test_Data_And_Label)
|
loss /= len(self.Test_Data_And_Label)
|
||||||
|
|
||||||
|
True_Label = Merge_Function.merge_data_main(True_Label, 0, len(True_Label))
|
||||||
|
Predict_Label = Merge_Function.merge_data_main(Predict_Label, 0, len(Predict_Label))
|
||||||
|
|
||||||
accuracy = accuracy_score(True_Label, Predict_Label)
|
accuracy = accuracy_score(True_Label, Predict_Label)
|
||||||
precision = precision_score(True_Label, Predict_Label)
|
precision = precision_score(True_Label, Predict_Label)
|
||||||
recall = recall_score(True_Label, Predict_Label)
|
recall = recall_score(True_Label, Predict_Label)
|
||||||
|
|
|
||||||
Binary file not shown.
Binary file not shown.
|
|
@ -1,5 +1,3 @@
|
||||||
from all_models_tools.all_model_tools import call_back
|
|
||||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
|
||||||
from draw_tools.draw import plot_history, Confusion_Matrix_of_Two_Classification
|
from draw_tools.draw import plot_history, Confusion_Matrix_of_Two_Classification
|
||||||
from Load_process.Load_Indepentend import Load_Indepentend_Data
|
from Load_process.Load_Indepentend import Load_Indepentend_Data
|
||||||
from _validation.ValidationTheEnterData import validation_the_enter_data
|
from _validation.ValidationTheEnterData import validation_the_enter_data
|
||||||
|
|
@ -7,7 +5,6 @@ from Load_process.file_processing import Process_File
|
||||||
from merge_class.merge import merge
|
from merge_class.merge import merge
|
||||||
from sklearn.metrics import confusion_matrix
|
from sklearn.metrics import confusion_matrix
|
||||||
from experiments.pytorch_Model import ModifiedXception
|
from experiments.pytorch_Model import ModifiedXception
|
||||||
from Image_Process.Image_Generator import Image_generator
|
|
||||||
from experiments.Model_All_Step import All_Step
|
from experiments.Model_All_Step import All_Step
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
@ -49,8 +46,6 @@ class experiments():
|
||||||
|
|
||||||
self.validation_obj = validation_the_enter_data() # 呼叫驗證物件
|
self.validation_obj = validation_the_enter_data() # 呼叫驗證物件
|
||||||
self.cut_image = Load_Indepentend_Data(self.Topic_Tool.Get_Data_Label(), self.Topic_Tool.Get_OneHot_Encording_Label()) # 呼叫切割影像物件
|
self.cut_image = Load_Indepentend_Data(self.Topic_Tool.Get_Data_Label(), self.Topic_Tool.Get_OneHot_Encording_Label()) # 呼叫切割影像物件
|
||||||
self.image_processing = Read_image_and_Process_image()
|
|
||||||
self.ImageGenerator = Image_generator("", "")
|
|
||||||
self.merge = merge()
|
self.merge = merge()
|
||||||
|
|
||||||
self.model_name = Model_Name # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
self.model_name = Model_Name # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
||||||
|
|
@ -82,8 +77,8 @@ class experiments():
|
||||||
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
|
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
|
||||||
self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label
|
self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label
|
||||||
|
|
||||||
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1)
|
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1, True)
|
||||||
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, self.train_batch_size)
|
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, self.train_batch_size, True)
|
||||||
|
|
||||||
# self.Grad = Grad_CAM(self.Topic_Tool.Get_Data_Label(), self.test_label, self.experiment_name, self.convolution_name)
|
# self.Grad = Grad_CAM(self.Topic_Tool.Get_Data_Label(), self.test_label, self.experiment_name, self.convolution_name)
|
||||||
|
|
||||||
|
|
|
||||||
11
main.py
11
main.py
|
|
@ -32,19 +32,20 @@ if __name__ == "__main__":
|
||||||
tool.Set_OneHotEncording(Labels)
|
tool.Set_OneHotEncording(Labels)
|
||||||
Encording_Label = tool.Get_OneHot_Encording_Label()
|
Encording_Label = tool.Get_OneHot_Encording_Label()
|
||||||
Label_Length = len(Labels)
|
Label_Length = len(Labels)
|
||||||
Classification = 2 # 分類數量
|
Classification = 3 # 分類數量
|
||||||
|
|
||||||
Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
||||||
Experiment_Name = "Xception Skin to train Normal stomach cancer"
|
Experiment_Name = "Xception Skin to train Normal stomach cancer"
|
||||||
Generator_Batch_Size = 50
|
Generator_Batch_Size = 50
|
||||||
Epoch = 10000
|
Epoch = 10000
|
||||||
Train_Batch_Size = 50
|
Train_Batch_Size = 50
|
||||||
|
Image_Size = 256
|
||||||
Convolution_Name = "block14_sepconv2"
|
Convolution_Name = "block14_sepconv2"
|
||||||
|
|
||||||
Prepare = Load_Data_Prepare()
|
Prepare = Load_Data_Prepare()
|
||||||
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels)
|
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels, Image_Size)
|
||||||
experiment = experiments(Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, Convolution_Name, tool, Classification, Status)
|
experiment = experiments(Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, Convolution_Name, tool, Classification, Status)
|
||||||
image_processing = Read_image_and_Process_image()
|
image_processing = Read_image_and_Process_image(Image_Size)
|
||||||
Merge = merge()
|
Merge = merge()
|
||||||
Calculate_Tool = Calculate()
|
Calculate_Tool = Calculate()
|
||||||
|
|
||||||
|
|
@ -96,9 +97,9 @@ if __name__ == "__main__":
|
||||||
# training_data = list(total_trains) # 轉換資料型態
|
# training_data = list(total_trains) # 轉換資料型態
|
||||||
|
|
||||||
training_data, train_label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
training_data, train_label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
||||||
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size)
|
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size, True)
|
||||||
|
|
||||||
|
|
||||||
|
# 查看Dataloader的Shape
|
||||||
for idx, data in enumerate(Training_Dataset):
|
for idx, data in enumerate(Training_Dataset):
|
||||||
datas = data[0]
|
datas = data[0]
|
||||||
print(f"Shape: {datas.shape}")
|
print(f"Shape: {datas.shape}")
|
||||||
|
|
|
||||||
Binary file not shown.
|
|
@ -1,6 +1,5 @@
|
||||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
from merge_class.merge import merge
|
|
||||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||||
from Load_process.LoadData import Load_Data_Prepare, Process_File, Load_Data_Tools
|
from Load_process.LoadData import Load_Data_Prepare, Process_File, Load_Data_Tools
|
||||||
import shutil
|
import shutil
|
||||||
|
|
@ -21,7 +20,7 @@ class Cut_Indepentend_Data():
|
||||||
self.Cut_Of_Independent_Data(get_all_image_data, Indepentend_Data_Root, Test_Size)
|
self.Cut_Of_Independent_Data(get_all_image_data, Indepentend_Data_Root, Test_Size)
|
||||||
|
|
||||||
def Balance_Cut_Of_Independent_Data(self, Independent_Dict_Data_Content, Test_Size):
|
def Balance_Cut_Of_Independent_Data(self, Independent_Dict_Data_Content, Test_Size):
|
||||||
image_processing = Read_image_and_Process_image()
|
image_processing = Read_image_and_Process_image(123)
|
||||||
Prepare = Load_Data_Prepare()
|
Prepare = Load_Data_Prepare()
|
||||||
Prepare.Set_Data_Content([], len(self.Labels))
|
Prepare.Set_Data_Content([], len(self.Labels))
|
||||||
Prepare.Set_Data_Dictionary(self.Labels, Prepare.Get_Data_Content(), 2)
|
Prepare.Set_Data_Dictionary(self.Labels, Prepare.Get_Data_Content(), 2)
|
||||||
|
|
@ -36,8 +35,7 @@ class Cut_Indepentend_Data():
|
||||||
|
|
||||||
def Cut_Of_Independent_Data(self, Independent_Dict_Data_Content, IndependentDataRoot, Test_Size):
|
def Cut_Of_Independent_Data(self, Independent_Dict_Data_Content, IndependentDataRoot, Test_Size):
|
||||||
'''切割獨立資料(e.g. Validation、training)'''
|
'''切割獨立資料(e.g. Validation、training)'''
|
||||||
image_processing = Read_image_and_Process_image()
|
image_processing = Read_image_and_Process_image(122)
|
||||||
Prepaer = Load_Data_Prepare()
|
|
||||||
File = Process_File()
|
File = Process_File()
|
||||||
i = 0
|
i = 0
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user