OpenMMLab AI实战营 第七课笔记


import os
import numpy as np
from PIL import Image import os.path as osp
from tqdm import tqdm import mmcv
import mmengine
import matplotlib.pyplot as plt
%matplotlib inline
# 数据集图片和标注路径
data_root = 'data/koto/'
img_dir = 'imgs'
ann_dir = 'annos' # 类别和对应的颜色
classes = ('bg', 'person')
palette = [[0, 0, 0],[255, 255, 255]]
# palette = [[128, 128, 128], [129, 127, 38], [120, 69, 125], [53, 125, 34],
# [0, 11, 123], [118, 20, 12], [122, 81, 25], [241, 134, 51]]
Image.open('data/koto/imgs/00001-125.jpg')





Image.open('data/koto/annos/00001-125.png')





import matplotlib.patches as mpatches
img = Image.open('data/koto/annos/00001-125.png')
plt.figure(figsize=(8, 6))
im = plt.imshow(np.array(img.convert('RGB'))) # 图例小块
patches = [mpatches.Patch(color=np.array(palette[i])/255., label=classes[i]) for i in range(2)]
# 图例
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize='large')
plt.show()





from mmseg.registry import DATASETS
from mmseg.datasets import BaseSegDataset @DATASETS.register_module()
class StanfordBackgroundDataset(BaseSegDataset):
METAINFO = dict(classes = classes, palette = palette)
def __init__(self, **kwargs):
super().__init__(img_suffix='.jpg', seg_map_suffix='.png', **kwargs)
# 下载 config 文件 和 预训练模型checkpoint权重文件
!mim download mmsegmentation --config pspnet_r50-d8_4xb2-40k_cityscapes-512x1024 --dest .
processing pspnet_r50-d8_4xb2-40k_cityscapes-512x1024...
[32mpspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth exists in /home/linxu/Desktop/OpenMMLab-Space/mmsegmentation[0m
[32mSuccessfully dumped pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py to /home/linxu/Desktop/OpenMMLab-Space/mmsegmentation[0m
from mmengine import Config
cfg = Config.fromfile('configs/pspnet/pspnet_r50-d8_4xb2-40k_cityscapes-512x1024.py')
cfg.norm_cfg = dict(type='BN', requires_grad=True) # 只使用GPU时,BN取代SyncBN
cfg.crop_size = (256, 256)
cfg.model.data_preprocessor.size = cfg.crop_size
cfg.model.backbone.norm_cfg = cfg.norm_cfg
cfg.model.decode_head.norm_cfg = cfg.norm_cfg
cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg
# modify num classes of the model in decode/auxiliary head
cfg.model.decode_head.num_classes = 2
cfg.model.auxiliary_head.num_classes = 8 # 修改数据集的 type 和 root
cfg.dataset_type = 'StanfordBackgroundDataset'
cfg.data_root = data_root cfg.train_dataloader.batch_size = 8 cfg.train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='RandomResize', scale=(320, 240), ratio_range=(0.5, 2.0), keep_ratio=True),
dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
] cfg.test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
] cfg.train_dataloader.dataset.type = cfg.dataset_type
cfg.train_dataloader.dataset.data_root = cfg.data_root
cfg.train_dataloader.dataset.data_prefix = dict(img_path=img_dir, seg_map_path=ann_dir)
cfg.train_dataloader.dataset.pipeline = cfg.train_pipeline
cfg.train_dataloader.dataset.ann_file = 'data/koto/train_list.txt' cfg.val_dataloader.dataset.type = cfg.dataset_type
cfg.val_dataloader.dataset.data_root = cfg.data_root
cfg.val_dataloader.dataset.data_prefix = dict(img_path=img_dir, seg_map_path=ann_dir)
cfg.val_dataloader.dataset.pipeline = cfg.test_pipeline
cfg.val_dataloader.dataset.ann_file = 'data/koto/valid_list.txtt' cfg.test_dataloader = cfg.val_dataloader # 载入预训练模型权重
cfg.load_from = 'pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' # 工作目录
cfg.work_dir = './work_dirs/tutorial' # 训练迭代次数
cfg.train_cfg.max_iters = 800
# 评估模型间隔
cfg.train_cfg.val_interval = 400
# 日志记录间隔
cfg.default_hooks.logger.interval = 100
# 模型权重保存间隔
cfg.default_hooks.checkpoint.interval = 400 # 随机数种子
cfg['randomness'] = dict(seed=0)
print(cfg.pretty_text)
norm_cfg = dict(type='BN', requires_grad=True)
data_preprocessor = dict(
type='SegDataPreProcessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_val=0,
seg_pad_val=255,
size=(512, 1024))
model = dict(
type='EncoderDecoder',
data_preprocessor=dict(
type='SegDataPreProcessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_val=0,
seg_pad_val=255,
size=(256, 256)),
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='PSPHead',
in_channels=2048,
in_index=3,
channels=512,
pool_scales=(1, 2, 3, 6),
dropout_ratio=0.1,
num_classes=2,
norm_cfg=dict(type='BN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=8,
norm_cfg=dict(type='BN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
train_cfg=dict(),
test_cfg=dict(mode='whole'))
dataset_type = 'StanfordBackgroundDataset'
data_root = 'data/koto/'
crop_size = (256, 256)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(320, 240),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=dict(backend='local')),
dict(
type='TestTimeAug',
transforms=[[{
'type': 'Resize',
'scale_factor': 0.5,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 0.75,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.0,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.25,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.5,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.75,
'keep_ratio': True
}],
[{
'type': 'RandomFlip',
'prob': 0.0,
'direction': 'horizontal'
}, {
'type': 'RandomFlip',
'prob': 1.0,
'direction': 'horizontal'
}], [{
'type': 'LoadAnnotations'
}], [{
'type': 'PackSegInputs'
}]])
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type='StanfordBackgroundDataset',
data_root='data/koto/',
data_prefix=dict(img_path='imgs', seg_map_path='annos'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(320, 240),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
],
ann_file='data/koto/train_list.txt'))
val_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='StanfordBackgroundDataset',
data_root='data/koto/',
data_prefix=dict(img_path='imgs', seg_map_path='annos'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
],
ann_file='data/koto/valid_list.txtt'))
test_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='StanfordBackgroundDataset',
data_root='data/koto/',
data_prefix=dict(img_path='imgs', seg_map_path='annos'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
],
ann_file='data/koto/valid_list.txtt'))
val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
default_scope = 'mmseg'
env_cfg = dict(
cudnn_benchmark=True,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='SegLocalVisualizer',
vis_backends=[dict(type='LocalVisBackend')],
name='visualizer')
log_processor = dict(by_epoch=False)
log_level = 'INFO'
load_from = 'pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
resume = False
tta_model = dict(type='SegTTAModel')
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005),
clip_grad=None)
param_scheduler = [
dict(
type='PolyLR',
eta_min=0.0001,
power=0.9,
begin=0,
end=40000,
by_epoch=False)
]
train_cfg = dict(type='IterBasedTrainLoop', max_iters=800, val_interval=400)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=100, log_metric_by_epoch=False),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=400),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='SegVisualizationHook'))
work_dir = './work_dirs/tutorial'
randomness = dict(seed=0)
from mmengine.runner import Runner
from mmseg.utils import register_all_modules # register all modules in mmseg into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
runner = Runner.from_cfg(cfg)
02/11 22:55:46 - mmengine - [4m[97mINFO[0m -
------------------------------------------------------------
System environment:
sys.platform: linux
Python: 3.8.16 (default, Jan 17 2023, 23:13:24) [GCC 11.2.0]
CUDA available: True
numpy_random_seed: 0
GPU 0: NVIDIA GeForce RTX 3060 Laptop GPU
CUDA_HOME: /usr/local/cuda-11.6
NVCC: Cuda compilation tools, release 11.6, V11.6.124
GCC: gcc (Uos 8.3.0.3-3+rebuild) 8.3.0
PyTorch: 1.13.1+cu116
PyTorch compiling details: PyTorch built with:
- GCC 9.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.6
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.6 (built against CUDA 11.8)
- Built with CuDNN 8.3.2
- Magma 2.6.1
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.6, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.13.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, TorchVision: 0.14.1+cu116
OpenCV: 4.7.0
MMEngine: 0.5.0 Runtime environment:
cudnn_benchmark: True
mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}
dist_cfg: {'backend': 'nccl'}
seed: 0
Distributed launcher: none
Distributed training: False
GPU number: 1
------------------------------------------------------------ 02/11 22:55:46 - mmengine - [4m[97mINFO[0m - Config:
norm_cfg = dict(type='BN', requires_grad=True)
data_preprocessor = dict(
type='SegDataPreProcessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_val=0,
seg_pad_val=255,
size=(512, 1024))
model = dict(
type='EncoderDecoder',
data_preprocessor=dict(
type='SegDataPreProcessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_val=0,
seg_pad_val=255,
size=(256, 256)),
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='PSPHead',
in_channels=2048,
in_index=3,
channels=512,
pool_scales=(1, 2, 3, 6),
dropout_ratio=0.1,
num_classes=2,
norm_cfg=dict(type='BN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=8,
norm_cfg=dict(type='BN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
train_cfg=dict(),
test_cfg=dict(mode='whole'))
dataset_type = 'StanfordBackgroundDataset'
data_root = 'data/koto/'
crop_size = (256, 256)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(320, 240),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=dict(backend='local')),
dict(
type='TestTimeAug',
transforms=[[{
'type': 'Resize',
'scale_factor': 0.5,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 0.75,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.0,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.25,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.5,
'keep_ratio': True
}, {
'type': 'Resize',
'scale_factor': 1.75,
'keep_ratio': True
}],
[{
'type': 'RandomFlip',
'prob': 0.0,
'direction': 'horizontal'
}, {
'type': 'RandomFlip',
'prob': 1.0,
'direction': 'horizontal'
}], [{
'type': 'LoadAnnotations'
}], [{
'type': 'PackSegInputs'
}]])
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type='StanfordBackgroundDataset',
data_root='data/koto/',
data_prefix=dict(img_path='imgs', seg_map_path='annos'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(320, 240),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
],
ann_file='data/koto/train_list.txt'))
val_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='StanfordBackgroundDataset',
data_root='data/koto/',
data_prefix=dict(img_path='imgs', seg_map_path='annos'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
],
ann_file='data/koto/valid_list.txtt'))
test_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='StanfordBackgroundDataset',
data_root='data/koto/',
data_prefix=dict(img_path='imgs', seg_map_path='annos'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(320, 240), keep_ratio=True),
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
],
ann_file='data/koto/valid_list.txtt'))
val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
default_scope = 'mmseg'
env_cfg = dict(
cudnn_benchmark=True,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='SegLocalVisualizer',
vis_backends=[dict(type='LocalVisBackend')],
name='visualizer')
log_processor = dict(by_epoch=False)
log_level = 'INFO'
load_from = 'pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
resume = False
tta_model = dict(type='SegTTAModel')
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005),
clip_grad=None)
param_scheduler = [
dict(
type='PolyLR',
eta_min=0.0001,
power=0.9,
begin=0,
end=40000,
by_epoch=False)
]
train_cfg = dict(type='IterBasedTrainLoop', max_iters=800, val_interval=400)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=100, log_metric_by_epoch=False),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=400),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='SegVisualizationHook'))
work_dir = './work_dirs/tutorial'
randomness = dict(seed=0) 02/11 22:55:46 - mmengine - [5m[4m[33mWARNING[0m - The "visualizer" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:46 - mmengine - [5m[4m[33mWARNING[0m - The "vis_backend" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:47 - mmengine - [5m[4m[33mWARNING[0m - The "model" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead. /home/linxu/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/backbones/resnet.py:431: UserWarning: DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
/home/linxu/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/decode_heads/decode_head.py:120: UserWarning: For binary segmentation, we suggest using`out_channels = 1` to define the outputchannels of segmentor, and use `threshold`to convert `seg_logits` into a predictionapplying a threshold
warnings.warn('For binary segmentation, we suggest using'
/home/linxu/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/builder.py:36: UserWarning: ``build_loss`` would be deprecated soon, please use ``mmseg.registry.MODELS.build()``
warnings.warn('``build_loss`` would be deprecated soon, please use '
/home/linxu/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:235: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.
warnings.warn( 02/11 22:55:48 - mmengine - [4m[97mINFO[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.
02/11 22:55:48 - mmengine - [5m[4m[33mWARNING[0m - The "hook" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:48 - mmengine - [4m[97mINFO[0m - Hooks will be executed in the following order:
before_run:
(VERY_HIGH ) RuntimeInfoHook
(BELOW_NORMAL) LoggerHook
--------------------
before_train:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(VERY_LOW ) CheckpointHook
--------------------
before_train_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(NORMAL ) DistSamplerSeedHook
--------------------
before_train_iter:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
--------------------
after_train_iter:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(NORMAL ) SegVisualizationHook
(BELOW_NORMAL) LoggerHook
(LOW ) ParamSchedulerHook
(VERY_LOW ) CheckpointHook
--------------------
after_train_epoch:
(NORMAL ) IterTimerHook
(LOW ) ParamSchedulerHook
(VERY_LOW ) CheckpointHook
--------------------
before_val_epoch:
(NORMAL ) IterTimerHook
--------------------
before_val_iter:
(NORMAL ) IterTimerHook
--------------------
after_val_iter:
(NORMAL ) IterTimerHook
(NORMAL ) SegVisualizationHook
(BELOW_NORMAL) LoggerHook
--------------------
after_val_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
(LOW ) ParamSchedulerHook
(VERY_LOW ) CheckpointHook
--------------------
before_test_epoch:
(NORMAL ) IterTimerHook
--------------------
before_test_iter:
(NORMAL ) IterTimerHook
--------------------
after_test_iter:
(NORMAL ) IterTimerHook
(NORMAL ) SegVisualizationHook
(BELOW_NORMAL) LoggerHook
--------------------
after_test_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
--------------------
after_run:
(BELOW_NORMAL) LoggerHook
-------------------- /home/linxu/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/engine/hooks/visualization_hook.py:61: UserWarning: The draw is False, it means that the hook for visualization will not take effect. The results will NOT be visualized or stored.
warnings.warn('The draw is False, it means that the '
runner.train()
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "loop" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "dataset" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "transform" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "data sampler" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "optimizer wrapper constructor" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "optimizer" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "optim_wrapper" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "parameter scheduler" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "metric" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead. /home/linxu/anaconda3/envs/mmlab2/lib/python3.8/site-packages/mmengine/evaluator/metric.py:47: UserWarning: The prefix is not set in metric class IoUMetric.
warnings.warn('The prefix is not set in metric class ' 02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The "weight initializer" registry in mmseg did not set import location. Fallback to call `mmseg.utils.register_all_modules` instead.
02/11 22:55:54 - mmengine - [4m[97mINFO[0m - load model from: open-mmlab://resnet50_v1c
02/11 22:55:54 - mmengine - [4m[97mINFO[0m - Loads checkpoint by openmmlab backend from path: open-mmlab://resnet50_v1c
02/11 22:55:54 - mmengine - [5m[4m[33mWARNING[0m - The model and loaded state dict do not match exactly unexpected key in source state_dict: fc.weight, fc.bias Loads checkpoint by local backend from path: pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth
The model and loaded state dict do not match exactly size mismatch for decode_head.conv_seg.weight: copying a param with shape torch.Size([19, 512, 1, 1]) from checkpoint, the shape in current model is torch.Size([2, 512, 1, 1]).
size mismatch for decode_head.conv_seg.bias: copying a param with shape torch.Size([19]) from checkpoint, the shape in current model is torch.Size([2]).
size mismatch for auxiliary_head.conv_seg.weight: copying a param with shape torch.Size([19, 256, 1, 1]) from checkpoint, the shape in current model is torch.Size([8, 256, 1, 1]).
size mismatch for auxiliary_head.conv_seg.bias: copying a param with shape torch.Size([19]) from checkpoint, the shape in current model is torch.Size([8]).
02/11 22:55:55 - mmengine - [4m[97mINFO[0m - Load checkpoint from pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth
02/11 22:55:55 - mmengine - [4m[97mINFO[0m - Checkpoints will be saved to /home/linxu/Desktop/OpenMMLab-Space/mmsegmentation/work_dirs/tutorial. --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) Cell In[14], line 1
----> 1 runner.train() File ~/anaconda3/envs/mmlab2/lib/python3.8/site-packages/mmengine/runner/runner.py:1686, in Runner.train(self)
1680 # Initiate inner count of `optim_wrapper`.
1681 self.optim_wrapper.initialize_count_status(
1682 self.model,
1683 self._train_loop.iter, # type: ignore
1684 self._train_loop.max_iters) # type: ignore
-> 1686 model = self.train_loop.run() # type: ignore
1687 self.call_hook('after_run')
1688 return model File ~/anaconda3/envs/mmlab2/lib/python3.8/site-packages/mmengine/runner/loops.py:264, in IterBasedTrainLoop.run(self)
261 self.runner.model.train()
263 data_batch = next(self.dataloader_iterator)
--> 264 self.run_iter(data_batch)
266 self._decide_current_val_interval()
267 if (self.runner.val_loop is not None
268 and self._iter >= self.val_begin
269 and self._iter % self.val_interval == 0): File ~/anaconda3/envs/mmlab2/lib/python3.8/site-packages/mmengine/runner/loops.py:287, in IterBasedTrainLoop.run_iter(self, data_batch)
282 self.runner.call_hook(
283 'before_train_iter', batch_idx=self._iter, data_batch=data_batch)
284 # Enable gradient accumulation mode and avoid unnecessary gradient
285 # synchronization during gradient accumulation process.
286 # outputs should be a dict of loss.
--> 287 outputs = self.runner.model.train_step(
288 data_batch, optim_wrapper=self.runner.optim_wrapper)
290 self.runner.call_hook(
291 'after_train_iter',
292 batch_idx=self._iter,
293 data_batch=data_batch,
294 outputs=outputs)
295 self._iter += 1 File ~/anaconda3/envs/mmlab2/lib/python3.8/site-packages/mmengine/model/base_model/base_model.py:114, in BaseModel.train_step(self, data, optim_wrapper)
112 with optim_wrapper.optim_context(self):
113 data = self.data_preprocessor(data, True)
--> 114 losses = self._run_forward(data, mode='loss') # type: ignore
115 parsed_losses, log_vars = self.parse_losses(losses) # type: ignore
116 optim_wrapper.update_params(parsed_losses) File ~/anaconda3/envs/mmlab2/lib/python3.8/site-packages/mmengine/model/base_model/base_model.py:326, in BaseModel._run_forward(self, data, mode)
316 """Unpacks data for :meth:`forward`
317
318 Args:
(...)
323 dict or list: Results of training or testing mode.
324 """
325 if isinstance(data, dict):
--> 326 results = self(**data, mode=mode)
327 elif isinstance(data, (list, tuple)):
328 results = self(*data, mode=mode) File ~/anaconda3/envs/mmlab2/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], [] File ~/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/segmentors/base.py:94, in BaseSegmentor.forward(self, inputs, data_samples, mode)
64 """The unified entry for a forward process in both training and test.
65
66 The method should accept three modes: "tensor", "predict" and "loss":
(...)
91 - If ``mode="loss"``, return a dict of tensor.
92 """
93 if mode == 'loss':
---> 94 return self.loss(inputs, data_samples)
95 elif mode == 'predict':
96 return self.predict(inputs, data_samples) File ~/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py:176, in EncoderDecoder.loss(self, inputs, data_samples)
172 x = self.extract_feat(inputs)
174 losses = dict()
--> 176 loss_decode = self._decode_head_forward_train(x, data_samples)
177 losses.update(loss_decode)
179 if self.with_auxiliary_head: File ~/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py:137, in EncoderDecoder._decode_head_forward_train(self, inputs, data_samples)
134 """Run forward function and calculate loss for decode head in
135 training."""
136 losses = dict()
--> 137 loss_decode = self.decode_head.loss(inputs, data_samples,
138 self.train_cfg)
140 losses.update(add_prefix(loss_decode, 'decode'))
141 return losses File ~/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/decode_heads/decode_head.py:262, in BaseDecodeHead.loss(self, inputs, batch_data_samples, train_cfg)
249 """Forward function for training.
250
251 Args:
(...)
259 dict[str, Tensor]: a dictionary of loss components
260 """
261 seg_logits = self.forward(inputs)
--> 262 losses = self.loss_by_feat(seg_logits, batch_data_samples)
263 return losses File ~/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/decode_heads/decode_head.py:336, in BaseDecodeHead.loss_by_feat(self, seg_logits, batch_data_samples)
329 else:
330 loss[loss_decode.loss_name] += loss_decode(
331 seg_logits,
332 seg_label,
333 weight=seg_weight,
334 ignore_index=self.ignore_index)
--> 336 loss['acc_seg'] = accuracy(
337 seg_logits, seg_label, ignore_index=self.ignore_index)
338 return loss File ~/Desktop/OpenMMLab-Space/mmsegmentation/mmseg/models/losses/accuracy.py:49, in accuracy(pred, target, topk, thresh, ignore_index)
47 correct = correct & (pred_value > thresh).t()
48 if ignore_index is not None:
---> 49 correct = correct[:, target != ignore_index]
50 res = []
51 eps = torch.finfo(torch.float32).eps RuntimeError: CUDA error: an illegal memory access was encountered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.

OpenMMLab AI实战营 第七课笔记的更多相关文章

  1. 华为云 AI 实战营计划,带你迈上 AI 之路

    当今,AI的开发人才需求呈现极大的供需不平衡.所有开发者都关心,要如何从一名开发者晋升为AI开发者?AI开发能力,是主要的进入障碍.不用慌,华为云推出了 <华为云ModelArts-Lab AI ...

  2. Elasticsearch7.X 入门学习第七课笔记-----Mapping多字段与自定义Analyzer

    原文:Elasticsearch7.X 入门学习第七课笔记-----Mapping多字段与自定义Analyzer 版权声明:本文为博主原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处 ...

  3. 红帽学习笔记[RHCSA] 第七课[网络配置相关]

    第七课[网络配置相关] 在Vmware中添加网卡 编辑 -> 编辑虚拟网络 -> 添加网络->随便选择一个如VMnet2-> 选择仅主机模式 -> 勾掉使用本地DHCP服 ...

  4. 小马哥的 Java 项目实战营学习笔记(1)

    小马哥的 Java 项目实战营 小马哥的 Java 项目实战营 第二节:数据存储之 JDBC JDBC 核心 API 数据源 接口 - javax.sql.DataSource获取方式 1.普通对象初 ...

  5. Elasticsearch7.X 入门学习第三课笔记----search api学习(URI Search)

    原文:Elasticsearch7.X 入门学习第三课笔记----search api学习(URI Search) 版权声明:本文为博主原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出 ...

  6. 清华大学ucore操作系统课笔记

    操作系统 清华大学ucore操作系统课笔记 全文思维导图 1. 操作系统概述 1.1 什么是操作系统? 操作系统的定义 没有公认的精确定义 一个控制程序 一个系统软件 控制程序执行过程,防止错误和计算 ...

  7. Kali Linux Web 渗透测试视频教程— 第七课 OpenVas

    Kali Linux Web 渗透测试视频教程— 第七课 OpenVas 文/玄魂 视频教程地址:http://edu.51cto.com/course/course_id-1887.html 目录 ...

  8. NeHe OpenGL教程 第七课:光照和键盘

    转自[翻译]NeHe OpenGL 教程 前言 声明,此 NeHe OpenGL教程系列文章由51博客yarin翻译(2010-08-19),本博客为转载并稍加整理与修改.对NeHe的OpenGL管线 ...

  9. Udacity调试课笔记之断言异常

    Udacity调试课笔记之断言异常 这一单元的内容不是很多,如Zeller教授所说,就是如何写.检查断言,并如何使用工具实现自动推导出断言的条件. 现在,多数的编程语言,尤其是高级编程语言都会有内置的 ...

  10. 菜农群课笔记之ICP与ISP----20110412(整理版)

    耗时一上午时间对HOT大叔昨晚的群课内容进行温故并整理,现将其上传,若想看直播可到下面链接处下载:http://bbs.21ic.com/icview-229746-1-1.html        成 ...

随机推荐

  1. 华为云云容器引擎CCE踩坑记

    前几天把在公司办公区机房的一部分业务系统迁移至华为云,在华为云上现有的kubernetes集群(云容器引擎CCE)中创建了一个命名空间,并启用了命名空间的网络隔离功能,在该命名空间中创建了一些pod- ...

  2. 狂神说-Docker基础-学习笔记-06 commit镜像

    狂神说-Docker基础-学习笔记-06 commit镜像 视频地址:https://www.bilibili.com/video/BV1og4y1q7M4?p=20 如何提交一个自己的镜像? doc ...

  3. Docker容器与守护进程运维 --项目四

    一.Docker容器配置进阶 1.容器的自动重启 Docker提供重启策略控制容器退出时或Docker重启时是否自动启动该容器. 容器默认不支持自动重启,要使用 --restart 选项指定重启策略. ...

  4. Java常见面试真题之中级进阶(HashMap篇)

    前言 本来想着给自己放松一下,刷刷博客,突然被几道面试题难倒!说说Hashtable 与 HashMap 的区别?HashMap 中的 key 我们可以使用任何类作为 key 吗?HashMap 的长 ...

  5. 初识GO语言--错误处理

  6. Windows安装Mysql后一段时间后Mysql服务无法启动的问题

    本人在windows重装电脑后遇到一个比较麻烦的问题一直没有解决,今日有幸看到某大佬的博客得以解决.真实万分感激,特来分享一下. 第一次安装Mysql8.0之后,此次安装是将整个mysql包进行安装, ...

  7. K8s之运行时containerd安装和使用

    一.containerd 1. 前生今世 很久以前,Docker 强势崛起,以"镜像"这个大招席卷全球,对其他容器技术进行致命的降维打击,使其毫无招架之力,就连 Google 也不 ...

  8. Bulk-Crap-Uninstaller:一个高效卸载,轻松管理你的应用程序的.Net开源工具

    我们在工作中,经常需要安装大量的软件,随着应用程序的不断增多,管理这些软件变得非常困难. 下面介绍一款具备高效.简洁的特点,可以帮助我们快速卸载大量不需要的应用程序,让电脑管理变得更加轻松. 01 项 ...

  9. 异常断链的惨痛经历!拯救Air780EP模块紧急项目

    ​ 必须要吐槽一下:最近被老板驱使,要用Air780EP模块做几个紧急项目... 就怕紧急项目,时间紧任务重,遇到了一些棘手问题,可把我给折腾死了-- 这里把遇到的问题,排查记录下来,看能不能帮到因遇 ...

  10. LLM应用实战: 给个公司简称,输出公司全称

    1.背景 本qiang~本周在处理手头项目工作的时候,遇到了一个问题,就是友方提供了一个公司名称列表(量不小~,因此无法人工处理),且该公司名称列表均为简称,需要与库中的全称做一个映射匹配. 看似简单 ...