133 lines
6.1 KiB
Python
133 lines
6.1 KiB
Python
from experiments.experiment import experiments
|
||
from Image_Process.load_and_ImageGenerator import Load_ImageGenerator
|
||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||
from Training_Tools.Tools import Tool
|
||
from model_data_processing.processing import shuffle_data
|
||
from Load_process.LoadData import Load_Data_Prepare
|
||
from Calculate_Process.Calculate import Calculate
|
||
from merge_class.merge import merge
|
||
import time
|
||
import torch
|
||
import os
|
||
|
||
if __name__ == "__main__":
|
||
# 測試GPU是否可用
|
||
flag = torch.cuda.is_available()
|
||
if not flag:
|
||
print("CUDA不可用\n")
|
||
else:
|
||
print(f"CUDA可用,數量為{torch.cuda.device_count()}\n")
|
||
|
||
Status = 2 # 決定要使用什麼資料集
|
||
# 要換不同資料集就要改
|
||
tool = Tool()
|
||
tool.Set_Labels()
|
||
tool.Set_Save_Roots()
|
||
|
||
Labels = tool.Get_Data_Label()
|
||
Trainig_Root, Testing_Root, Validation_Root = tool.Get_Save_Roots(Status) # 一般的
|
||
Generator_Root = tool.Get_Generator_Save_Roots(Status)
|
||
|
||
# 取得One-hot encording 的資料
|
||
tool.Set_OneHotEncording(Labels)
|
||
Encording_Label = tool.Get_OneHot_Encording_Label()
|
||
Label_Length = len(Labels)
|
||
|
||
Gneerator_size = 0
|
||
Prepare = Load_Data_Prepare()
|
||
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels)
|
||
experiment = experiments(tool, Status)
|
||
image_processing = Read_image_and_Process_image()
|
||
Merge = merge()
|
||
Calculate_Tool = Calculate()
|
||
|
||
counter = 5
|
||
|
||
for i in range(0, counter, 1): # 做規定次數的訓練
|
||
# 讀取資料
|
||
Data_Dict_Data = loading_data.process_main(Label_Length)
|
||
|
||
Data_Dict_Data = shuffle_data(Data_Dict_Data, Labels, 2)
|
||
tmp = []
|
||
Train_Size = 0
|
||
if len(Data_Dict_Data[Labels[0]]) >= len(Data_Dict_Data[Labels[1]]):
|
||
Train_Size = len(Data_Dict_Data[Labels[1]])
|
||
for j in range(Train_Size):
|
||
tmp.append(Data_Dict_Data[Labels[0]][j])
|
||
Data_Dict_Data[Labels[0]] = tmp
|
||
else:
|
||
Train_Size = len(Data_Dict_Data[Labels[0]])
|
||
for j in range(Train_Size):
|
||
tmp.append(Data_Dict_Data[Labels[1]][j])
|
||
Data_Dict_Data[Labels[1]] = tmp
|
||
|
||
# 輸出內容
|
||
print("Negative Data有 " + str(len(Data_Dict_Data[Labels[1]])) + " 筆資料")
|
||
print("Positive Data有 " + str(len(Data_Dict_Data[Labels[0]])) + " 筆資料")
|
||
print("總共有 " + str(len(Data_Dict_Data[Labels[0]]) + len(Data_Dict_Data[Labels[1]])) + " 筆資料")
|
||
|
||
# 做出跟資料相同數量的Label
|
||
Negative_Num = image_processing.make_label_list(Train_Size, Encording_Label[1])
|
||
Positive_Num = image_processing.make_label_list(Train_Size, Encording_Label[0])
|
||
|
||
# 將資料做成Dict的資料型態
|
||
Prepare.Set_Final_Dict_Data(Labels, Data_Dict_Data, [Positive_Num, Negative_Num], 2)
|
||
Final_Dict_Data = Prepare.Get_Final_Data_Dict()
|
||
keys = list(Final_Dict_Data.keys())
|
||
|
||
training_data = Merge.merge_all_image_data(Final_Dict_Data[keys[0]], Final_Dict_Data[keys[1]]) # 將訓練資料合併成一個list
|
||
training_label = Merge.merge_all_image_data(Final_Dict_Data[keys[2]], Final_Dict_Data[keys[3]]) #將訓練資料的label合併成一個label的list
|
||
|
||
start = time.time()
|
||
trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 多執行續讀檔
|
||
total_trains, train_label = shuffle_data(trains_Data_Image, training_label) # 將資料打亂
|
||
training_data = list(total_trains) # 轉換資料型態
|
||
|
||
print(len(training_data))
|
||
training_data, train_label = image_processing.image_data_processing(training_data, train_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
||
training_data = image_processing.normalization(training_data)
|
||
|
||
end = time.time()
|
||
print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start))
|
||
|
||
# 針對其他資料執行值方圖等化加去背
|
||
# with ProcessPoolExecutor() as executor: ## 默认为1,多執行續
|
||
# trains = list(executor.map(adaptive_histogram_equalization, trains_another))
|
||
|
||
# # 針對正常資料做值方圖等化加去背
|
||
# train_normal = list(executor.map(image_processing.get_data, normal)) # 多執行續讀檔
|
||
# normal = list(executor.map(Remove_Background, train_normal)) # 多執行續去背
|
||
# normal_data = list(executor.map(adaptive_histogram_equalization, normal))
|
||
|
||
# # 針對猴痘水痘進行讀檔
|
||
# Chickenpox = list(executor.map(image_processing.get_data, chickenpox)) # 多執行續讀檔
|
||
# train_chickenpox = list(executor.map(shapen, Chickenpox))
|
||
|
||
# Monkeypox = list(executor.map(image_processing.get_data, monkeypox)) # 多執行續讀檔
|
||
# train_monkeypox = list(executor.map(shapen, Monkeypox)) # 銳化
|
||
|
||
|
||
# for All_Normal_Data in normal_data:
|
||
# trains.append(All_Normal_Data)
|
||
# training_label.append([0, 0, 0, 0, 0, 1, 0])
|
||
|
||
# for All_Chhickenpox_Data in train_chickenpox:
|
||
# trains.append(All_Chhickenpox_Data)
|
||
# training_label.append([0, 0, 0, 1, 0, 0, 0])
|
||
|
||
# for All_Monkeypox_Data in train_monkeypox:
|
||
# trains.append(All_Monkeypox_Data)
|
||
# training_label.append([0, 0, 0, 0, 1, 0, 0])
|
||
|
||
loss, accuracy, precision, recall, AUC, f = experiment.processing_main(training_data, train_label, i) # 執行訓練方法
|
||
Calculate_Tool.Append_numbers(loss, accuracy, precision, recall, AUC, f)
|
||
|
||
print("實驗結果")
|
||
print("--------------------------------------------")
|
||
print("平均值: ")
|
||
print(Calculate_Tool.Calculate_Mean())
|
||
print("標準差: ")
|
||
print(Calculate_Tool.Calculate_Std())
|
||
print("結果: ")
|
||
print(Calculate_Tool.Output_Style())
|
||
|