20250213 commit: To added list to Dataset Script

This commit is contained in:
2025-02-13 03:11:54 +08:00
parent bb1ab2b541
commit c9d9e7882e
32 changed files with 277 additions and 163 deletions

Binary file not shown.

View File

@@ -1,8 +1,8 @@
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
from _validation.ValidationTheEnterData import validation_the_enter_data
from Load_process.file_processing import Process_File
from keras.preprocessing.image import ImageDataGenerator
from Load_process.LoadData import Load_Data_Prepare
from torchvision import transforms
class Image_generator():
'''製作資料強化'''
@@ -155,47 +155,35 @@ class Image_generator():
dtype : 生成數組使用的數據類型。
'''
if judge == 1:
datagen = ImageDataGenerator(
rotation_range=30, # 旋轉影像
width_shift_range=0.1, # 圖像隨機水平移動,位移距離為圖像長度乘以參數(0.1)
height_shift_range=0.1, # 圖像隨機垂直移動,位移距離為圖像長度乘以參數(0.1)
zoom_range=0.2, # 隨機縮放範圍,[lower, upper] = [1-zoom_range, 1+zoom_range]
horizontal_flip=False, # 水平翻轉
vertical_flip=False, # 垂直翻轉
fill_mode='nearest' # 在旋轉或平移造成空隙時,則空隙補常數
)
if judge == 2:
datagen = ImageDataGenerator(
rotation_range=180,
width_shift_range=0.2,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
if judge == 3:
datagen = ImageDataGenerator(
rotation_range=45, # 旋轉影像
width_shift_range=0.02, # 圖像隨機水平移動,位移距離為圖像長度乘以參數(0.1)
height_shift_range=0.02, # 圖像隨機垂直移動,位移距離為圖像長度乘以參數(0.1)
shear_range = 0.02,
zoom_range=0.02, # 隨機縮放範圍,[lower, upper] = [1-zoom_range, 1+zoom_range]
horizontal_flip = True,
fill_mode = "reflect"
)
if judge == 4: # 第二份paper的資料強化
datagen = ImageDataGenerator(
rotation_range=50, # 旋轉影像
width_shift_range=0.2, # 圖像隨機水平移動,位移距離為圖像長度乘以參數(0.1)
height_shift_range=0.2, # 圖像隨機垂直移動,位移距離為圖像長度乘以參數(0.1)
shear_range = 0.25,
zoom_range=0.1, # 隨機縮放範圍,[lower, upper] = [1-zoom_range, 1+zoom_range]
channel_shift_range = 20 # 隨機通道轉換的範圍
)
if judge == 5: # 第一份paper的資料強化
datagen = ImageDataGenerator(rescale = 1 / 255)
return datagen
return transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
])
elif judge == 2:
return transforms.Compose([
transforms.RandomRotation(180),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
])
elif judge == 3:
return transforms.Compose([
transforms.RandomRotation(45),
transforms.RandomResizedCrop(224, scale=(0.9, 1.0)),
transforms.RandomAffine(degrees=20, shear=0.2),
transforms.ColorJitter(brightness=0.2, contrast=0.2),
transforms.RandomHorizontalFlip(),
])
elif judge == 4:
return transforms.Compose([
transforms.RandomRotation(50),
transforms.RandomResizedCrop(224, scale=(0.75, 1.0)),
transforms.RandomAffine(degrees=30, shear=0.25),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.2),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
])
else:
return transforms.ToTensor() # 將數值歸一化到[0, 1]之間

View File

@@ -46,4 +46,8 @@ class Process_File():
self.JudgeRoot_MakeDir(model_dir)
modelfiles = self.Make_Save_Root(File_Name + ".txt", model_dir) # 將檔案名稱及路徑字串合併成完整路徑
with open(modelfiles, mode = 'a') as file:
file.write(content)
file.write(content)
def Save_PIL_File(self, FileName, save_root, image): # 存CSV檔
save_root = self.Make_Save_Root(FileName, save_root)
image.save(save_root)

View File

@@ -1,5 +1,6 @@
from torch import nn
from torch.nn import functional
import torch
class Entropy_Loss(nn.Module):
@@ -9,7 +10,10 @@ class Entropy_Loss(nn.Module):
def forward(self, outputs, labels):
# 範例: 使用均方誤差作為損失計算
# outputs = torch.argmax(outputs, 1)
# outputs = outputs.float()
outputs = outputs.float()
# input shape has a question
# print(f"Label result: {labels}, result: {outputs}")
labels = labels.float()
loss = functional.binary_cross_entropy(outputs, labels)
loss = functional.binary_cross_entropy(outputs[0], labels)
return loss

Binary file not shown.

View File

@@ -1,5 +1,6 @@
import cv2
import numpy as np
import torch
class Read_image_and_Process_image:
def __init__(self) -> None:

View File

@@ -1,7 +1,21 @@
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from torch.nn import functional
import torch
from torch.utils.data import Dataset, DataLoader
class ListDataset(Dataset):
def __init__(self, data_list, labels_list):
self.data = data_list
self.labels = labels_list
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
label = self.labels[idx]
return sample, label
class Tool:
def __init__(self) -> None:
self.__ICG_Training_Root = ""
@@ -48,17 +62,23 @@ class Tool:
self.__Normal_ImageGenerator_Data_Root = "../Dataset/Training/Normal_ImageGenerator"
self.__Comprehensive_Generator_Root = "../Dataset/Training/Comprehensive_ImageGenerator"
def Set_OneHotEncording(self, content, Number_Of_Classes):
OneHot_labels = functional.one_hot(content, Number_Of_Classes)
return OneHot_labels
def Set_OneHotEncording(self, content):
Counter = []
for i in range(len(content)):
Counter.append(i)
Counter = torch.tensor(Counter)
self.__OneHot_Encording = functional.one_hot(Counter, len(content))
pass
def Set_Zips(self, Datas, Labels, Address_Name):
if Address_Name == "Training":
self.Training_Zip = zip(Datas, Labels)
if Address_Name == "Validation":
self.Validation_Zip = zip(Datas, Labels)
if Address_Name == "Testing":
self.Testing_Zip = zip(Datas, Labels)
def Set_Data_To_DataSet(self, Datas : list, Labels : list, Batch_Size : int):
# 創建 Dataset
dataset = ListDataset(Datas, Labels)
return DataLoader(dataset = dataset, batch_size = Batch_Size, shuffle=True, num_workers = 0, pin_memory=True)
def Get_Data_Label(self):
'''

Binary file not shown.

Binary file not shown.

View File

@@ -50,7 +50,7 @@ def call_back(model_name, index, optimizer):
model_dir = '../Result/save_the_best_model/' + model_name
File.JudgeRoot_MakeDir(model_dir)
modelfiles = File.Make_Save_Root('best_model( ' + str(datetime.date.today()) + " )-" + str(index) + ".weights.h5", model_dir)
modelfiles = File.Make_Save_Root('best_model( ' + str(datetime.date.today()) + " )-" + str(index) + ".weights.pt", model_dir)
# model_mckp = ModelCheckpoint(modelfiles, monitor='val_loss', save_best_only=True, save_weights_only = True, mode='auto')

View File

@@ -4,7 +4,7 @@ import torch
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from torchmetrics.functional import auroc
import torch.optim as optim
import numpy as np
from all_models_tools.all_model_tools import call_back
from Model_Loss.Loss import Entropy_Loss
@@ -21,6 +21,10 @@ class All_Step:
self.Epoch = Epoch
self.Number_Of_Classes = Number_Of_Classes
self.Training_Data_Length = len(list(zip(*Training_Data_And_Label))[1])
self.Testing_Data_Length = len(list(zip(*Test_Data_And_Label))[1])
self.Validation_Data_Length = len(list(zip(*Validation_Data_And_Label))[1])
pass
def Training_Step(self, model_name, counter):
@@ -34,42 +38,45 @@ class All_Step:
train_accuracies = []
val_accuracies = []
for epoch in range(self.Epoch):
self.Model.train()
for epoch in range(self.Epoch): # 訓練迴圈
self.Model.train() # 開始訓練
running_loss = 0.0
all_train_preds = []
all_train_labels = []
epoch_iterator = tqdm(self.Training_Data_And_Label, desc= "Training (Epoch %d)" % epoch)
epoch_iterator = tqdm(self.Training_Data_And_Label, desc= "Training (Epoch %d)" % epoch) # 使用進度條
for inputs, labels in epoch_iterator:
# labels = np.reshape(labels, (int(labels.shape[0]), 1))
inputs, OneHot_labels = inputs.to(self.device), OneHot_labels.to(self.device)
# inputs, labels = inputs.cuda(), labels.cuda()
# print(inputs.shape)
# 輸入的維度為3維 但模型要的是4維 所以要再多加一維
inputs = np.expand_dims(inputs, axis = 0)
# print(inputs.shape)
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
Optimizer.zero_grad()
outputs = self.Model(inputs)
loss = criterion(outputs, OneHot_labels)
loss = criterion(outputs, labels)
loss.backward()
Optimizer.step()
running_loss += loss.item()
# 收集訓練預測和標籤
_, preds = torch.max(outputs, 1)
all_train_preds.extend(preds.cpu().numpy())
all_train_labels.extend(labels.cpu().numpy())
labels = np.argmax(labels.cpu().numpy())
# all_train_preds.extend(preds.cpu().numpy())
# all_train_labels.extend(labels.cpu().numpy())
all_train_preds.append(preds.cpu().numpy())
all_train_labels.append(labels)
Training_Loss = running_loss/len(self.Training_Data_And_Label)
# all_train_labels = torch.FloatTensor(all_train_labels)
# all_train_labels = torch.argmax(all_train_labels, 1)
Training_Loss = running_loss / self.Training_Data_Length
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
train_losses.append(Training_Loss)
train_accuracies.append(train_accuracy)
print(f"Epoch [{epoch+1}/{self.epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ')
print(f"Epoch [{epoch+1}/{self.Epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ')
self.Model.eval()
val_loss = 0.0
@@ -78,24 +85,30 @@ class All_Step:
with torch.no_grad():
for inputs, labels in self.Validation_Data_And_Label:
inputs, OneHot_labels = inputs.to(self.device), labels.to(self.device)
inputs = np.expand_dims(inputs, axis = 0)
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
outputs = self.Model(inputs)
loss = criterion(outputs, OneHot_labels)
loss = criterion(outputs, labels)
val_loss += loss.item()
# 驗證預測與標籤
_, preds = torch.max(outputs, 1)
all_val_preds.extend(preds.cpu().numpy())
all_val_labels.extend(labels.cpu().numpy())
labels = np.argmax(labels.cpu().numpy())
# all_val_preds.extend(preds.cpu().numpy())
# all_val_labels.extend(labels.cpu().numpy())a
all_val_preds.append(preds.cpu().numpy())
all_val_labels.append(labels)
# 計算驗證損失與準確率
val_loss /= len(list(self.Validation_Data_And_Label))
val_loss /= self.Validation_Data_Length
val_accuracy = accuracy_score(all_val_labels, all_val_preds)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(f"Epoch [{epoch+1}/{self.epoch}], Loss: {val_loss:.4f}, Accuracy: {val_accuracy:0.2f}")
print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}")
early_stopping(val_loss, self.Model, model_path)
if early_stopping.early_stop:
@@ -114,18 +127,25 @@ class All_Step:
loss = 0.0
with torch.no_grad():
for images, labels in self.Test_Data_And_Label:
images, OneHot_labels = images.to(self.device), OneHot_labels.to(self.device)
images = np.expand_dims(images, axis = 0)
images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device)
outputs = cnn_model(images)
_, predicted = torch.max(outputs, 1)
Predict_Label.extend(predicted.cpu().numpy())
True_Label.extend(labels.cpu().numpy())
loss /= len(self.Test_Data_And_Label)
_, predicted = torch.max(outputs, 1)
labels = np.argmax(labels.cpu().numpy())
Predict_Label.append(predicted.cpu().numpy())
True_Label.append(labels)
# Predict_Label.extend(predicted.cpu().numpy())
# True_Label.extend(labels.cpu().numpy())
loss /= self.Testing_Data_Length
accuracy = accuracy_score(True_Label, Predict_Label)
precision = precision_score(True_Label, Predict_Label)
recall = recall_score(True_Label, Predict_Label)
AUC = auroc(True_Label, Predict_Label, task = ["Stomatch_Cancer", "Normal"])
AUC = auroc(True_Label, Predict_Label, task = "binary")
f1 = f1_score(True_Label, Predict_Label)
return loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label

Binary file not shown.

Binary file not shown.

View File

@@ -5,11 +5,10 @@ from Load_process.Load_Indepentend import Load_Indepentend_Data
from _validation.ValidationTheEnterData import validation_the_enter_data
from Load_process.file_processing import Process_File
from merge_class.merge import merge
from draw_tools.Grad_cam import Grad_CAM
from sklearn.metrics import confusion_matrix
from experiments.pytorch_Model import ModifiedXception
from Image_Process.Image_Generator import Image_generator
from Model_All_Step import All_Step
from experiments.Model_All_Step import All_Step
import pandas as pd
import numpy as np
import torch
@@ -51,11 +50,11 @@ class experiments():
self.validation_obj = validation_the_enter_data() # 呼叫驗證物件
self.cut_image = Load_Indepentend_Data(self.Topic_Tool.Get_Data_Label(), self.Topic_Tool.Get_OneHot_Encording_Label()) # 呼叫切割影像物件
self.image_processing = Read_image_and_Process_image()
self.ImageGenerator = Image_generator("", "")
self.merge = merge()
self.model_name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
self.experiment_name = "Xception Skin to train Normal stomach cancer"
# self.file_name = "Remove background of Chickenpox with normal image"
self.generator_batch_size = 50
self.epoch = 10000
self.train_batch_size = 128
@@ -82,9 +81,14 @@ class experiments():
# 將處理好的test Data 與 Validation Data 丟給這個物件的變數
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label
self.Topic_Tool.Set_Zips(train, train_label, "Training")
self.Topic_Tool.Set_Zips(self.test, self.test_label, "Testing")
self.Topic_Tool.Set_Zips(self.validation, self.validation_label, "Validation")
train = self.Preprocess_Image_Data(train)
self.test = self.Preprocess_Image_Data(self.test)
self.validation = self.Preprocess_Image_Data(self.validation)
self.Topic_Tool.Set_Data_To_DataSet(train, train_label, "Training")
self.Topic_Tool.Set_Data_To_DataSet(self.test, self.test_label, "Testing")
self.Topic_Tool.Set_Data_To_DataSet(self.validation, self.validation_label, "Validation")
self.Training_Zip, self.Testing_Zip, self.Validation_Zip = self.Topic_Tool.Get_Zip()
@@ -114,13 +118,27 @@ class experiments():
def construct_model(self):
'''決定我這次訓練要用哪個model'''
cnn_model = ModifiedXception()
cnn_model = ModifiedXception(self.Number_Of_Classes)
if torch.cuda.device_count() > 1:
cnn_model = nn.DataParallel(cnn_model)
cnn_model = cnn_model.to(self.device)
return cnn_model
def Preprocess_Image_Data(self, Image_Datas):
transform = self.ImageGenerator.Generator_Content(5)
Transform_Image = []
for Image in Image_Datas:
Image = transform(Image)
Transform_Image.append(Image)
# Transform_Image.append(self.ImageGenerator._apply_transform(transform, Image))
Transform_Image = torch.tensor(np.array(Transform_Image))
print(Transform_Image.shape)
return Transform_Image
def record_matrix_image(self, True_Labels, Predict_Labels, model_name, index):
'''劃出混淆矩陣(熱力圖)'''

View File

@@ -2,30 +2,49 @@ import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision import models
import timm
class ModifiedXception(nn.Module):
def __init__(self):
def __init__(self, num_classes):
super(ModifiedXception, self).__init__()
# 加載 Xception 預訓練模型,去掉最後一層 (fc 層)
self.base_model = timm.create_model('xception', pretrained=True)
self.base_model.fc = nn.Identity() # 移除原來的 fully connected 層
self.base_model = timm.create_model(
'xception',
pretrained=True,
features_only=True, # 只保留特徵提取部分
out_indices=[3] # 選擇特徵層索引(根據模型結構)
)
# 新增全局平均池化層、隱藏層和輸出層
self.global_avg_pool = nn.AdaptiveAvgPool2d(1) # 全局平均池化
self.hidden_layer = nn.Linear(2048, 1370) # 隱藏層,輸入大小取決於 Xception 的輸出大小
self.output_layer = nn.Linear(1370, 2) # 輸出層,依據分類數目設定
# 自定義分類頭
self.custom_head = nn.Sequential(
nn.AdaptiveAvgPool2d(1), # Global Average Pooling,
nn.Flatten(),
nn.Linear(728, 368), # Xception 輸出特徵維度為2048
nn.ReLU(), # 可選激活函數
nn.Linear(368, num_classes),
nn.Sigmoid()
)
# self.base_model.fc = nn.Identity() # 移除原來的 fully connected 層
# 激活函數與 dropout
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.6)
# # 新增全局平均池化層、隱藏層和輸出層
# self.global_avg_pool = nn.AdaptiveAvgPool2d(1) # 全局平均池化
# self.hidden_layer = nn.Linear(2048, 1370) # 隱藏層,輸入大小取決於 Xception 的輸出大小
# self.output_layer = nn.Linear(1370, 2) # 輸出層,依據分類數目設定
# # 激活函數與 dropout
# self.relu = nn.ReLU()
# self.dropout = nn.Dropout(0.6)
def forward(self, x):
x = self.base_model(x) # Xception 主體
x = self.global_avg_pool(x) # 全局平均池化
x = self.relu(self.hidden_layer(x)) # 隱藏層 + ReLU
x = self.dropout(x) # Dropout
x = self.output_layer(x) # 輸出層
return x
x = x[0]
output = self.custom_head(x)
# x = self.global_avg_pool(x) # 全局平均池化
# x = self.relu(self.hidden_layer(x)) # 隱藏層 + ReLU
# x = self.dropout(x) # Dropout
# x = self.output_layer(x) # 輸出層
return output

50
main.py
View File

@@ -2,7 +2,7 @@ from experiments.experiment import experiments
from Image_Process.load_and_ImageGenerator import Load_ImageGenerator
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
from Training_Tools.Tools import Tool
from model_data_processing.processing import shuffle_data
from model_data_processing.processing import shuffle_data, Balance_Process
from Load_process.LoadData import Load_Data_Prepare
from Calculate_Process.Calculate import Calculate
from merge_class.merge import merge
@@ -32,11 +32,11 @@ if __name__ == "__main__":
tool.Set_OneHotEncording(Labels)
Encording_Label = tool.Get_OneHot_Encording_Label()
Label_Length = len(Labels)
Classification = 2 # 分類數量
Gneerator_size = 0
Prepare = Load_Data_Prepare()
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels)
experiment = experiments(tool, Status)
experiment = experiments(tool, Classification, Status)
image_processing = Read_image_and_Process_image()
Merge = merge()
Calculate_Tool = Calculate()
@@ -46,37 +46,32 @@ if __name__ == "__main__":
for i in range(0, counter, 1): # 做規定次數的訓練
# 讀取資料
Data_Dict_Data = loading_data.process_main(Label_Length)
Data_Dict_Data = shuffle_data(Data_Dict_Data, Labels, 2)
tmp = []
Train_Size = 0
if len(Data_Dict_Data[Labels[0]]) >= len(Data_Dict_Data[Labels[1]]):
Train_Size = len(Data_Dict_Data[Labels[1]])
for j in range(Train_Size):
tmp.append(Data_Dict_Data[Labels[0]][j])
Data_Dict_Data[Labels[0]] = tmp
else:
Train_Size = len(Data_Dict_Data[Labels[0]])
for j in range(Train_Size):
tmp.append(Data_Dict_Data[Labels[1]][j])
Data_Dict_Data[Labels[1]] = tmp
Data_Dict_Data, Train_Size = Balance_Process(Data_Dict_Data, Labels)
# 輸出內容
print("Negative Data" + str(len(Data_Dict_Data[Labels[1]])) + " 筆資料")
print("Positive Data有 " + str(len(Data_Dict_Data[Labels[0]])) + " 筆資料")
print("總共有 " + str(len(Data_Dict_Data[Labels[0]]) + len(Data_Dict_Data[Labels[1]])) + " 筆資料")
# 輸出內容
print("每個類別各" + str(Train_Size) + " 筆資料")
total = 0
for j in range(Label_Length):
total += Train_Size
print("總共有 " + str(total) + " 筆資料")
# 做出跟資料相同數量的Label
Negative_Num = image_processing.make_label_list(Train_Size, Encording_Label[1])
Positive_Num = image_processing.make_label_list(Train_Size, Encording_Label[0])
Classes = []
for encording in Encording_Label:
Classes.append(image_processing.make_label_list(Train_Size, encording))
# 將資料做成Dict的資料型態
Prepare.Set_Final_Dict_Data(Labels, Data_Dict_Data, [Positive_Num, Negative_Num], 2)
Prepare.Set_Final_Dict_Data(Labels, Data_Dict_Data, Classes, Label_Length)
Final_Dict_Data = Prepare.Get_Final_Data_Dict()
keys = list(Final_Dict_Data.keys())
training_data = Merge.merge_all_image_data(Final_Dict_Data[keys[0]], Final_Dict_Data[keys[1]]) # 將訓練資料合併成一個list
training_label = Merge.merge_all_image_data(Final_Dict_Data[keys[2]], Final_Dict_Data[keys[3]]) #將訓練資料的label合併成一個label的list
for i in range(2, Label_Length):
training_data = Merge.merge_all_image_data(training_data, Final_Dict_Data[keys[i]]) # 將訓練資料合併成一個list
training_label = Merge.merge_all_image_data(Final_Dict_Data[keys[Label_Length]], Final_Dict_Data[keys[Label_Length + 1]]) #將訓練資料的label合併成一個label的list
for i in range(Label_Length + 2, 2 * Label_Length):
training_label = Merge.merge_all_image_data(training_label, Final_Dict_Data[keys[i]]) # 將訓練資料合併成一個list
start = time.time()
trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 多執行續讀檔
@@ -85,7 +80,10 @@ if __name__ == "__main__":
print(len(training_data))
training_data, train_label = image_processing.image_data_processing(training_data, train_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
training_data = image_processing.normalization(training_data)
# training_data = image_processing.normalization(training_data)
# training_data = training_data.permute(0, 3, 1, 2)
print(training_data.shape)
end = time.time()
print("\n\n\n讀取訓練資料(70000)執行時間:%f\n\n" % (end - start))

View File

@@ -36,4 +36,17 @@ def shuffle_data(image, label, mode = 1):
shuffle_image[Label] = image[Label]
random.shuffle(shuffle_image[Label])
return shuffle_image
return shuffle_image
def Balance_Process(Data_Content, Labels):
Data_Dict_Data = shuffle_data(Data_Content, Labels, 2)
Train_Size = 0
Train_Size = min(len(Data_Dict_Data[Labels[0]]), len(Data_Dict_Data[Labels[1]]))
for i in range(1, len(Labels) - 1):
Train_Size = min(Train_Size, len(Data_Dict_Data[Labels[i + 1]]))
for i in range(len(Labels)):
Data_Dict_Data[Labels[i]] = Data_Dict_Data[Labels[i]][0 : Train_Size]
return Data_Dict_Data, Train_Size

View File

@@ -1,35 +1,64 @@
import paramiko
from scp import SCPClient
import os
import pexpect
# import paramiko
# from scp import SCPClient
# import os
# import pexpect
# def createSSHClient(server, port, user, password):
# client = paramiko.SSHClient()
# client.load_system_host_keys()
# client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
# client.connect(server, port, user, password)
# # def createSSHClient(server, port, user, password):
# # client = paramiko.SSHClient()
# # client.load_system_host_keys()
# # client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
# # client.connect(server, port, user, password)
# return client
# # return client
# ssh = createSSHClient("10.1.29.28", 31931, "root", "whitekirin")
# # ssh = createSSHClient("10.1.29.28", 31931, "root", "whitekirin")
# # os.mkdir("Original_ResNet101V2_with_NPC_Augmentation_Image")
# # with open("Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt", "w") as file:
# # pass
# # # os.mkdir("Original_ResNet101V2_with_NPC_Augmentation_Image")
# # # with open("Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt", "w") as file:
# # # pass
# with SCPClient(ssh.get_transport()) as scp:
# scp.get("/mnt/c/張晉嘉/stomach_cancer/Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt", "/raid/whitekirin/stomach_cancer/Model_result/save_the_train_result(2024-10-05)/Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt")
# # with SCPClient(ssh.get_transport()) as scp:
# # scp.get("/mnt/c/張晉嘉/stomach_cancer/Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt", "/raid/whitekirin/stomach_cancer/Model_result/save_the_train_result(2024-10-05)/Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt")
def upload(port, filename, user, ip, dst_path):
cmdline = "scp %s -r %s %s@%s:%s" % (port, filename, user, ip, dst_path)
# def upload(port, filename, user, ip, dst_path):
# cmdline = "scp %s -r %s %s@%s:%s" % (port, filename, user, ip, dst_path)
try:
child = pexpect.spawn(cmdline)
child.expect("whitekirin109316118")
child.sendline()
child.expect(pexpect.EOF)
print("file upload Finish")
except Exception as e:
print("upload faild: ", e)
# try:
# child = pexpect.spawn(cmdline)
# child.expect("whitekirin109316118")
# child.sendline()
# child.expect(pexpect.EOF)
# print("file upload Finish")
# except Exception as e:
# print("upload faild: ", e)
upload(2222, "/raid/whitekirin/stomach_cancer/Model_result/save_the_train_result(2024-10-05)", "whitekirin", "203.64.84.39", "/mnt/c/張晉嘉/stomach_cancer")
# upload(2222, "/raid/whitekirin/stomach_cancer/Model_result/save_the_train_result(2024-10-05)", "whitekirin", "203.64.84.39", "/mnt/c/張晉嘉/stomach_cancer")
from torch.utils.data import Dataset
from torch.utils.data import Subset, DataLoader
class ListDataset(Dataset):
def __init__(self, data_list, labels_list):
self.data = data_list
self.labels = labels_list
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
label = self.labels[idx]
return sample, label
# 示例數據
data_list = ["image1.jpg", "image2.jpg", "image3.jpg"]
labels_list = [0, 1, 0]
# 創建 Dataset
dataset = ListDataset(data_list, labels_list)
# 測試
# print(type(dataset[0])) # ('image1.jpg', 0)
dataloader = DataLoader(dataset = dataset, batch_size = 1, shuffle=True, num_workers = 0, pin_memory=True)
print(dataloader)