博达网站建设流程,被自考本科坑了一辈子,徐州睢宁建设网站,企业服务专区基于百度AIStudio飞桨paddleRS-develop版道路模型开发训练
参考地址#xff1a;https://aistudio.baidu.com/projectdetail/8271882
基于python35paddle120env环境 预测可视化结果#xff1a;
#xff08;一#xff09;安装环境#xff1a; 先上传本地下载的源代码Pad…基于百度AIStudio飞桨paddleRS-develop版道路模型开发训练
参考地址https://aistudio.baidu.com/projectdetail/8271882
基于python35paddle120env环境 预测可视化结果
一安装环境 先上传本地下载的源代码PaddleRS-develop.zip 解压PaddleRS-develop.zip到目录PaddleRS 然后分别执行下面安装命令!pip install
!unzip -q /home/aistudio/data/data191076/PaddleRS-develop.zip mv PaddleRS-develop PaddleRS
!pip install matplotlib3.4 scikit-image pycocotools -t /home/aistudio/external-libraries
!pip install opencv-contrib-python -t /home/aistudio/external-libraries
!pip install -r PaddleRS/requirements.txt -t /home/aistudio/external-libraries
!pip install -e PaddleRS/ -t /home/aistudio/external-libraries
!pip install paddleslim2.6.0 -t /home/aistudio/external-libraries添加环境组件
# 因为sys.path可能没有及时更新这里选择手动更新
import sys
sys.path.append(/home/aistudio/external-libraries)
sys.path.append(/home/aistudio/PaddleRS)二数据预处理tran_dataPre.py
%run tran_dataPre.py三开始模型训练
%run trans.py四) tran_dataPre.py内容如下所示
#先解压数据集
#!unzip -oq -d /home/aistudio/massroad /home/aistudio/data/data56961/mass_road.zip# 划分训练集/验证集/测试集并生成文件名列表import random
import os.path as osp
from os import listdirimport cv2# 随机数生成器种子
RNG_SEED 56961
# 调节此参数控制训练集数据的占比
TRAIN_RATIO 0.9
# 数据集路径
DATA_DIR /home/aistudio/massroad# 分割类别
CLASSES (background,road,
)def write_rel_paths(phase, names, out_dir, prefix):将文件相对路径存储在txt格式文件中with open(osp.join(out_dir, phase.txt), w) as f:for name in names:f.write( .join([osp.join(prefix, input, name),osp.join(prefix, output, name)]))f.write(\n)random.seed(RNG_SEED)train_prefix osp.join(road_segmentation_ideal, training)
test_prefix osp.join(road_segmentation_ideal, testing)
train_names listdir(osp.join(DATA_DIR, train_prefix, output))
train_names list(filter(lambda n: n.endswith(.png), train_names))
test_names listdir(osp.join(DATA_DIR, test_prefix, output))
test_names list(filter(lambda n: n.endswith(.png), test_names))
# 对文件名进行排序以确保多次运行结果一致
train_names.sort()
test_names.sort()
random.shuffle(train_names)
len_train int(len(train_names)*TRAIN_RATIO)
write_rel_paths(train, train_names[:len_train], DATA_DIR, train_prefix)
write_rel_paths(val, train_names[len_train:], DATA_DIR, train_prefix)
write_rel_paths(test, test_names, DATA_DIR, test_prefix)# 写入类别信息
with open(osp.join(DATA_DIR, labels.txt), w) as f:for cls in CLASSES:f.write(cls\n)print(数据集划分已完成。)# 将GT中的255改写为1便于训练import os.path as osp
from glob import globimport cv2
from tqdm import tqdm# 数据集路径
# DATA_DIR /home/aistudio/massroadtrain_prefix osp.join(road_segmentation_ideal, training)
test_prefix osp.join(road_segmentation_ideal, testing)train_paths glob(osp.join(DATA_DIR, train_prefix, output, *.png))
test_paths glob(osp.join(DATA_DIR, test_prefix, output, *.png))
for path in tqdm(train_pathstest_paths):im cv2.imread(path, cv2.IMREAD_GRAYSCALE)im[im0] 1# 原地改写cv2.imwrite(path, im)(五) trans.py内容如下所示
# 导入需要用到的库import random
import os.path as ospimport cv2
import numpy as np
import paddle
import paddlers as pdrs
from paddlers import transforms as T
from matplotlib import pyplot as plt
from PIL import Imageimport sys
sys.path.append(/home/aistudio/external-libraries)
sys.path.append(/home/aistudio/PaddleRS)# 定义全局变量# 随机种子
SEED 56961
# 数据集存放目录
DATA_DIR /home/aistudio/massroad/
# 训练集file_list文件路径
TRAIN_FILE_LIST_PATH /home/aistudio/massroad/train.txt
# 验证集file_list文件路径
VAL_FILE_LIST_PATH /home/aistudio/massroad/val.txt
# 测试集file_list文件路径
TEST_FILE_LIST_PATH /home/aistudio/massroad/test.txt
# 数据集类别信息文件路径
LABEL_LIST_PATH /home/aistudio/massroad/labels.txt
# 实验目录保存输出的模型权重和结果
EXP_DIR /home/aistudio/exp/# 固定随机种子尽可能使实验结果可复现random.seed(SEED)
np.random.seed(SEED)
paddle.seed(SEED)# 构建数据集# 定义训练和验证时使用的数据变换数据增强、预处理等
train_transforms T.Compose([T.DecodeImg(),# 随机裁剪T.RandomCrop(crop_size512),# 以50%的概率实施随机水平翻转T.RandomHorizontalFlip(prob0.5),# 以50%的概率实施随机垂直翻转T.RandomVerticalFlip(prob0.5),# 将数据归一化到[-1,1]T.Normalize(mean[0.5, 0.5, 0.5], std[0.5, 0.5, 0.5]),T.ArrangeSegmenter(train)
])eval_transforms T.Compose([T.DecodeImg(),T.Resize(target_size1500),# 验证阶段与训练阶段的数据归一化方式必须相同T.Normalize(mean[0.5, 0.5, 0.5], std[0.5, 0.5, 0.5]),T.ArrangeSegmenter(eval)
])# 分别构建训练和验证所用的数据集
train_dataset pdrs.datasets.SegDataset(data_dirDATA_DIR,file_listTRAIN_FILE_LIST_PATH,label_listLABEL_LIST_PATH,transformstrain_transforms,num_workers4,shuffleTrue
)val_dataset pdrs.datasets.SegDataset(data_dirDATA_DIR,file_listVAL_FILE_LIST_PATH,label_listLABEL_LIST_PATH,transformseval_transforms,num_workers0,shuffleFalse
)# 构建DeepLab V3模型使用ResNet-50作为backbone
model pdrs.tasks.seg.DeepLabV3P(in_channels3,num_classeslen(train_dataset.labels),backboneResNet50_vd
)
model.initialize_net(pretrain_weightsCITYSCAPES,save_dirosp.join(EXP_DIR, pretrain),resume_checkpointNone,is_backbone_weightsFalse
)# 构建优化器
optimizer paddle.optimizer.Adam(learning_rate0.001, parametersmodel.net.parameters()
)# 执行模型训练
model.train(num_epochs100,train_datasettrain_dataset,train_batch_size8,eval_datasetval_dataset,optimizeroptimizer,save_interval_epochs10,# 每多少次迭代记录一次日志log_interval_steps30,save_dirEXP_DIR,# 是否使用early stopping策略当精度不再改善时提前终止训练early_stopFalse,# 是否启用VisualDL日志功能use_vdlTrue,# 指定从某个检查点继续训练resume_checkpointNone
)六训练生成过程信息
Output exceeds the size limit. Open the full output data in a text editor
2024-09-05 14:16:51 [INFO] Loading pretrained model from /home/aistudio/exp/pretrain/model.pdparams
2024-09-05 14:16:53 [WARNING] [SKIP] Shape of parameters head.decoder.conv.weight do not match. (pretrained: [19, 256, 1, 1] vs actual: [2, 256, 1, 1])
2024-09-05 14:16:53 [WARNING] [SKIP] Shape of parameters head.decoder.conv.bias do not match. (pretrained: [19] vs actual: [2])
2024-09-05 14:16:53 [INFO] There are 358/360 variables loaded into DeepLabV3P.
2024-09-05 14:17:46 [INFO] [TRAIN] Epoch1/100, Step30/90, loss0.133503, lr0.001000, time_each_step1.77s, eta4:24:32
2024-09-05 14:18:25 [INFO] [TRAIN] Epoch1/100, Step60/90, loss0.181917, lr0.001000, time_each_step1.31s, eta3:14:53
2024-09-05 14:19:02 [INFO] [TRAIN] Epoch1/100, Step90/90, loss0.112567, lr0.001000, time_each_step1.22s, eta3:2:6
2024-09-05 14:19:03 [INFO] [TRAIN] Epoch 1 finished, loss0.15933047160506247 .
2024-09-05 14:19:44 [INFO] [TRAIN] Epoch2/100, Step30/90, loss0.141528, lr0.001000, time_each_step1.36s, eta3:22:2
2024-09-05 14:20:20 [INFO] [TRAIN] Epoch2/100, Step60/90, loss0.165187, lr0.001000, time_each_step1.22s, eta3:0:42
2024-09-05 14:20:57 [INFO] [TRAIN] Epoch2/100, Step90/90, loss0.145009, lr0.001000, time_each_step1.22s, eta2:59:1
2024-09-05 14:20:58 [INFO] [TRAIN] Epoch 2 finished, loss0.1168842613697052 .
2024-09-05 14:21:39 [INFO] [TRAIN] Epoch3/100, Step30/90, loss0.126603, lr0.001000, time_each_step1.38s, eta3:22:13
2024-09-05 14:22:16 [INFO] [TRAIN] Epoch3/100, Step60/90, loss0.117296, lr0.001000, time_each_step1.22s, eta2:58:14
2024-09-05 14:22:53 [INFO] [TRAIN] Epoch3/100, Step90/90, loss0.072859, lr0.001000, time_each_step1.23s, eta2:58:46
2024-09-05 14:22:53 [INFO] [TRAIN] Epoch 3 finished, loss0.10787189056475957 .
2024-09-05 14:23:34 [INFO] [TRAIN] Epoch4/100, Step30/90, loss0.081685, lr0.001000, time_each_step1.37s, eta3:18:39
2024-09-05 14:24:11 [INFO] [TRAIN] Epoch4/100, Step60/90, loss0.087735, lr0.001000, time_each_step1.23s, eta2:57:28
2024-09-05 14:24:48 [INFO] [TRAIN] Epoch4/100, Step90/90, loss0.084795, lr0.001000, time_each_step1.22s, eta2:55:44
2024-09-05 14:24:49 [INFO] [TRAIN] Epoch 4 finished, loss0.10476481277081702 .
2024-09-05 14:25:30 [INFO] [TRAIN] Epoch5/100, Step30/90, loss0.098625, lr0.001000, time_each_step1.37s, eta3:16:59
2024-09-05 14:26:07 [INFO] [TRAIN] Epoch5/100, Step60/90, loss0.078188, lr0.001000, time_each_step1.24s, eta2:57:12
2024-09-05 14:26:43 [INFO] [TRAIN] Epoch5/100, Step90/90, loss0.098015, lr0.001000, time_each_step1.21s, eta2:52:11
2024-09-05 14:26:44 [INFO] [TRAIN] Epoch 5 finished, loss0.10311256903741095 .
2024-09-05 14:27:25 [INFO] [TRAIN] Epoch6/100, Step30/90, loss0.109136, lr0.001000, time_each_step1.38s, eta3:16:8
...
2024-09-05 15:39:38 [INFO] Start to evaluate (total_samples81, total_steps81)...
2024-09-05 15:40:14 [INFO] [EVAL] Finished, Epoch40, miou0.716638, category_iou[0.96831487 0.46496069], oacc0.969164, category_acc[0.97447995 0.81316509], kappa0.619485, category_F1-score[0.98390241 0.63477565] .
2024-09-05 15:40:14 [INFO] Current evaluated best model on eval_dataset is epoch_10, miou0.7255623401044613
2024-09-05 15:40:18 [INFO] Model saved in /home/aistudio/exp/epoch_40.七) 测试集预测结果
# 构建测试集
test_dataset pdrs.datasets.SegDataset(data_dirDATA_DIR,file_listTEST_FILE_LIST_PATH,label_listLABEL_LIST_PATH,transformseval_transforms,num_workers0,shuffleFalse
)# 为模型加载历史最佳权重
state_dict paddle.load(osp.join(EXP_DIR, best_model/model.pdparams))
model.net.set_state_dict(state_dict)# 执行测试
test_result model.evaluate(test_dataset)
print(测试集上指标IoU为{:.2f}Acc为{:.2f}Kappa系数为{:.2f}, F1为{:.2f}.format(test_result[category_iou][1], test_result[category_acc][1],test_result[kappa],test_result[category_F1-score][1])
)2024-09-05 20:07:40 [INFO] 13 samples in file /home/aistudio/massroad/test.txt
2024-09-05 20:07:41 [INFO] Start to evaluate (total_samples13, total_steps13)...
测试集上指标IoU为0.47Acc为0.82Kappa系数为0.62, F1为0.64八预测结果可视化情况
# 预测结果可视化
# 重复运行本单元可以查看不同结果def read_image(path):im cv2.imread(path)return im[...,::-1]def show_images_in_row(ims, fig, title, quantizeFalse):n len(ims)fig.suptitle(title)axs fig.subplots(nrows1, ncolsn)for idx, (im, ax) in enumerate(zip(ims, axs)):# 去掉刻度线和边框ax.spines[top].set_visible(False)ax.spines[right].set_visible(False)ax.spines[bottom].set_visible(False)ax.spines[left].set_visible(False)ax.get_xaxis().set_ticks([])ax.get_yaxis().set_ticks([])if isinstance(im, str):im read_image(im)if quantize:im (im*255).astype(uint8)if im.ndim 2:im np.tile(im[...,np.newaxis], [1,1,3])ax.imshow(im)# 需要展示的样本个数
num_imgs_to_show 4
# 随机抽取样本
chosen_indices random.choices(range(len(test_dataset)), knum_imgs_to_show)# 参考 https://stackoverflow.com/a/68209152
fig plt.figure(constrained_layoutTrue)
fig.suptitle(Test Results)subfigs fig.subfigures(nrows3, ncols1)# 读取输入影像并显示
im_paths [test_dataset.file_list[idx][image] for idx in chosen_indices]
show_images_in_row(im_paths, subfigs[0], titleImage)# 获取模型预测输出
with paddle.no_grad():model.net.eval()preds []for idx in chosen_indices:input, mask test_dataset[idx]input paddle.to_tensor(input[image]).unsqueeze(0)logits, *_ model.net(input)pred paddle.argmax(logits[0], axis0)preds.append(pred.numpy())
show_images_in_row(preds, subfigs[1], titlePred, quantizeTrue)# 读取真值标签并显示
im_paths [test_dataset.file_list[idx][mask] for idx in chosen_indices]
show_images_in_row(im_paths, subfigs[2], titleGT, quantizeTrue)# 渲染结果
fig.canvas.draw()
Image.frombytes(RGB, fig.canvas.get_width_height(), fig.canvas.tostring_rgb())(九) 导出静态模型 训练后保存的模型为动态模型布署发布模型为静态模型因此需要导出操作
import matplotlib.pyplot as plt
import random
import cv2
import numpy as np
import paddle
import paddlers as pdrs
from PIL import Imageimport os
from paddlers.tasks import load_modelmodel_path ./exp/best_modelimg_14i:/cwgis_ai/cup/mass_road/road_segmentation_ideal/testing/input/img-14.png
img_10i:/cwgis_ai/cup/mass_road/road_segmentation_ideal/testing/input/img-10.png#save_dir./models/road_infer_model_100
save_dir./models/road_infer_model_100_custom# export model OK
# Set environment variables
os.environ[PADDLEX_EXPORT_STAGE] True
os.environ[PADDLESEG_EXPORT_STAGE] True# Load model from directory
model load_model(model_path)#fixed_input_shape None
#fixed_input_shape [1500,1500]
fixed_input_shape [17761,25006] #[w,h]# Do dynamic-to-static cast 动态到静态的转换
# XXX: Invoke a protected (single underscore) method outside of subclasses.
model.export_inference_model(save_dir, fixed_input_shape)十) 预测单张图片代码
import matplotlib.pyplot as plt
import random
import cv2
import numpy as np
import paddle
import paddlers as pdrs
from PIL import Imageimport os
from paddlers.tasks import load_model# 因为sys.path可能没有及时更新这里选择手动更新
import sys
sys.path.append(/home/aistudio/external-libraries)
sys.path.append(/home/aistudio/PaddleRS)img_14./massroad/road_segmentation_ideal/testing/input/img-14.png
img_10./massroad/road_segmentation_ideal/testing/input/img-10.png
img_5./massroad/road_segmentation_ideal/testing/input/img-5.pngcustomImg./customImage/DeepLearning_Image.png #file tif to png #model_dir./models/road_infer_model_100
#model_dir./models/road_infer_model_100_None
model_dir./models/road_infer_model_100_custom#model pdrs.deploy.Predictor(model_dir)
model pdrs.deploy.Predictor(model_dir,use_gpuTrue)# 读取输入影像并显示
im_paths [customImg]
im_lis []
for name in im_paths:print(name)img cv2.imread(name) print(img.shape) #img paddle.to_tensor(img) #.unsqueeze(0) #标量输入im_lis.append(img)
# 获取模型预测输出img_fileimg_10
preds []
results model.predict(im_lis)
#print(results)label_mapresults[0][label_map]
#print(label_map)
label_map[label_map0] 255
cv2.imwrite(./outImage/label_map_custom.png, label_map)score_mapresults[0][score_map]
#cv2.imwrite(./outImage/score_map.png, score_map[0])
print(score_map)print(预测完成)本blog地址https://blog.csdn.net/hsg77