个人电脑wordpress,电商seo推广,企业网站维护工作,广东东莞房价1.代码修改
源码是针对3D单通道图像的#xff0c;只需要简单改写为2D就行#xff0c;修改nnMamba4cls.py代码如下#xff1a;
# -*- coding: utf-8 -*-
# 作者: Mr Cun
# 文件名: nnMamba4cls.py
# 创建时间: 2024-10-25
# 文件描述#xff1a;修改nnmamba#xff0c;使…1.代码修改
源码是针对3D单通道图像的只需要简单改写为2D就行修改nnMamba4cls.py代码如下
# -*- coding: utf-8 -*-
# 作者: Mr Cun
# 文件名: nnMamba4cls.py
# 创建时间: 2024-10-25
# 文件描述修改nnmamba使其适应3通道2分类DR分类任务import torch
import torch.nn as nn
import torch.nn.functional as F
from mamba_ssm import Mambadef conv3x3(in_planes, out_planes, stride1, groups1, dilation1):3x3 convolution with padding.return nn.Conv2d(in_planes,out_planes,kernel_size3,stridestride,paddingdilation,groupsgroups,biasFalse,dilationdilation,)def conv1x1(in_planes, out_planes, stride1):1x1 convolution.return nn.Conv2d(in_planes, out_planes, kernel_size1, stridestride, biasFalse)class BasicBlock(nn.Module):expansion 1def __init__(self, inplanes, planes, stride1, downsampleNone):super(BasicBlock, self).__init__()# Both self.conv1 and self.downsample layers downsample the input when stride ! 1self.conv1 conv3x3(inplanes, planes, stride)self.bn1 nn.BatchNorm2d(planes)self.relu nn.ReLU(inplaceTrue)self.conv2 conv3x3(planes, planes)self.bn2 nn.BatchNorm2d(planes)self.downsample downsampleself.stride stridedef forward(self, x):identity xout self.conv1(x)out self.bn1(out)out self.relu(out)out self.conv2(out)out self.bn2(out)if self.downsample is not None:identity self.downsample(x)out identityout self.relu(out)return outdef make_res_layer(inplanes, planes, blocks, stride1):downsample nn.Sequential(conv1x1(inplanes, planes, stride),nn.BatchNorm2d(planes),)layers []layers.append(BasicBlock(inplanes, planes, stride, downsample))for _ in range(1, blocks):layers.append(BasicBlock(planes, planes))return nn.Sequential(*layers)class MambaLayer(nn.Module):def __init__(self, dim, d_state8, d_conv4, expand2):super().__init__()self.dim dimself.nin conv1x1(dim, dim)self.nin2 conv1x1(dim, dim)self.norm2 nn.BatchNorm2d(dim) # LayerNormself.relu2 nn.ReLU(inplaceTrue)self.relu3 nn.ReLU(inplaceTrue)self.norm nn.BatchNorm2d(dim) # LayerNormself.relu nn.ReLU(inplaceTrue)self.mamba Mamba(d_modeldim, # Model dimension d_modeld_stated_state, # SSM state expansion factord_convd_conv, # Local convolution widthexpandexpand # Block expansion factor)def forward(self, x):B, C x.shape[:2]x self.nin(x)x self.norm(x)x self.relu(x)act_x xassert C self.dimn_tokens x.shape[2:].numel()img_dims x.shape[2:]x_flat x.reshape(B, C, n_tokens).transpose(-1, -2)x_mamba self.mamba(x_flat)out x_mamba.transpose(-1, -2).reshape(B, C, *img_dims)# act_x self.relu3(x)out act_xout self.nin2(out)out self.norm2(out)out self.relu2(out)return outclass MambaSeq(nn.Module):def __init__(self, dim, d_state16, d_conv4, expand2):super().__init__()self.dim dimself.relu nn.ReLU(inplaceTrue)self.mamba Mamba(d_modeldim, # Model dimension d_modeld_stated_state, # SSM state expansion factord_convd_conv, # Local convolution widthexpandexpand # Block expansion factor)def forward(self, x):B, C x.shape[:2]x self.relu(x)assert C self.dimn_tokens x.shape[2:].numel()img_dims x.shape[2:]x_flat x.reshape(B, C, n_tokens).transpose(-1, -2)x_mamba self.mamba(x_flat)out x_mamba.transpose(-1, -2).reshape(B, C, *img_dims)return outclass DoubleConv(nn.Module):def __init__(self, in_ch, out_ch, stride1, kernel_size3):super(DoubleConv, self).__init__()self.conv nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_sizekernel_size, stridestride, paddingint(kernel_size / 2)),nn.BatchNorm2d(out_ch),nn.ReLU(inplaceTrue),nn.Conv2d(out_ch, out_ch, 3, padding1, dilation1),nn.BatchNorm2d(out_ch),nn.ReLU(inplaceTrue),)def forward(self, input):return self.conv(input)class SingleConv(nn.Module):def __init__(self, in_ch, out_ch):super(SingleConv, self).__init__()self.conv nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding1), nn.BatchNorm2d(out_ch), nn.ReLU(inplaceTrue))def forward(self, input):return self.conv(input)class nnMambaEncoder(nn.Module):def __init__(self, in_ch3, channels32, blocks3, number_classes2):super(nnMambaEncoder, self).__init__()self.in_conv DoubleConv(in_ch, channels, stride2, kernel_size3)self.mamba_layer_stem MambaLayer(dimchannels, # Model dimension d_modeld_state8, # SSM state expansion factord_conv4, # Local convolution widthexpand2 # Block expansion factor)self.layer1 make_res_layer(channels, channels * 2, blocks, stride2)self.layer2 make_res_layer(channels * 2, channels * 4, blocks, stride2)self.layer3 make_res_layer(channels * 4, channels * 8, blocks, stride2)self.pooling nn.AdaptiveAvgPool2d((1, 1))self.mamba_seq MambaSeq(dimchannels*2, # Model dimension d_modeld_state8, # SSM state expansion factord_conv2, # Local convolution widthexpand2 # Block expansion factor)self.mlp nn.Sequential(nn.Linear(channels*14, channels), nn.ReLU(), nn.Dropout(0.5), nn.Linear(channels, number_classes))def forward(self, x):c1 self.in_conv(x)c1_s self.mamba_layer_stem(c1) c1c2 self.layer1(c1_s)c3 self.layer2(c2)c4 self.layer3(c3)pooled_c2_s self.pooling(c2)pooled_c3_s self.pooling(c3)pooled_c4_s self.pooling(c4)h_feature torch.cat((pooled_c2_s.reshape(c1.shape[0], c1.shape[1]*2, 1), pooled_c3_s.reshape(c1.shape[0], c1.shape[1]*2, 2), pooled_c4_s.reshape(c1.shape[0], c1.shape[1]*2, 4)), dim2)h_feature_att self.mamba_seq(h_feature) h_featureh_feature h_feature_att.reshape(c1.shape[0], -1)return self.mlp(h_feature)if __name__ __main__:model nnMambaEncoder().cuda()input torch.zeros((8, 3, 224,224)).cuda()output model(input)print(output.shape)
2.增加训练代码和数据集代码
dr_dataset.py
# -*- coding: utf-8 -*-
# 作者: Mr.Cun
# 文件名: dr_dataset.py
# 创建时间: 2024-10-25
# 文件描述视网膜数据处理import torch
import numpy as np
import os
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import transforms, datasetsroot_path /home/aic/deep_learning_data/retino_data
batch_size 64 # 根据自己电脑量力而行
class_labels {0: Diabetic Retinopathy, 1: No Diabetic Retinopathy}
# 设置随机种子
torch.manual_seed(42)
np.random.seed(42)class RetinaDataset:def __init__(self, root_path, batch_size,class_labels):self.root_path root_pathself.batch_size batch_sizeself.class_labels class_labelsself.transform self._set_transforms()self.train_dataset self._load_dataset(train)self.val_dataset self._load_dataset(valid)self.test_dataset self._load_dataset(test)self.train_loader DataLoader(self.train_dataset, batch_sizeself.batch_size, shuffleTrue)self.valid_loader DataLoader(self.val_dataset, batch_sizeself.batch_size, shuffleFalse)self.test_loader DataLoader(self.test_dataset, batch_sizeself.batch_size, shuffleFalse)def _set_transforms(self):return transforms.Compose([transforms.Resize((224, 224)),transforms.RandomHorizontalFlip(p0.5),transforms.RandomVerticalFlip(p0.5),transforms.RandomRotation(30),transforms.ToTensor(),transforms.Normalize(mean[0.485, 0.456, 0.406], std[0.229, 0.224, 0.225]),])def _load_dataset(self, split):root os.path.join(self.root_path, split)return datasets.ImageFolder(rootroot, transformself.transform)def visualize_samples(self, loader):figure plt.figure(figsize(12, 12))cols, rows 4, 4for i in range(1, cols * rows 1):sample_idx np.random.randint(len(loader.dataset))img, label loader.dataset[sample_idx]figure.add_subplot(rows, cols, i)plt.title(self.class_labels[label])plt.axis(off)img_np img.numpy().transpose((1, 2, 0))img_valid_range np.clip(img_np, 0, 1)plt.imshow(img_valid_range)plt.show()if __name__ __main__:processor RetinaDataset(root_path, batch_size,class_labels)processor.visualize_samples(processor.train_loader)train.py
# -*- coding: utf-8 -*-
# 作者: Mr Cun
# 文件名: train.py
# 创建时间: 2024-10-25
# 文件描述模型训练
import json
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from timm.utils import accuracy, AverageMeter, ModelEma
from sklearn.metrics import classification_report
from timm.data.mixup import Mixup
from nnMamba4cls import *
from torchvision import datasets
torch.backends.cudnn.benchmark False
import warnings
from dr_dataset import RetinaDatasetwarnings.filterwarnings(ignore)
os.environ[CUDA_VISIBLE_DEVICES]0# 设置随机因子
def seed_everything(seed42):os.environ[PYHTONHASHSEED] str(seed)torch.manual_seed(seed)torch.cuda.manual_seed(seed)torch.backends.cudnn.deterministic True# 设置全局参数
model_lr 3e-4
BATCH_SIZE 64
EPOCHS 300
DEVICE torch.device(cuda:0 if torch.cuda.is_available() else cpu)
use_amp False # 是否使用混合精度
use_dp False # 是否开启dp方式的多卡训练
classes 2
resume None
CLIP_GRAD 5.0
Best_ACC 0 # 记录最高得分
use_ema False
use_mixup False
model_ema_decay 0.9998
start_epoch 1
seed 1
seed_everything(seed)# 数据预处理
transform transforms.Compose([transforms.RandomRotation(10),transforms.GaussianBlur(kernel_size(5, 5), sigma(0.1, 3.0)),transforms.ColorJitter(brightness0.5, contrast0.5, saturation0.5),transforms.Resize((224, 224)),transforms.ToTensor(),transforms.Normalize(mean[0.41593555, 0.22245076, 0.075719066],std[0.23819199, 0.13202211, 0.05282707])])
transform_test transforms.Compose([transforms.Resize((224, 224)),transforms.ToTensor(),transforms.Normalize(mean[0.41593555, 0.22245076, 0.075719066],std[0.23819199, 0.13202211, 0.05282707])
])mixup_fn Mixup(mixup_alpha0.8, cutmix_alpha1.0, cutmix_minmaxNone,prob0.1, switch_prob0.5, modebatch,label_smoothing0.1, num_classesclasses)# 加载数据集
root_path /home/aic/deep_learning_data/retino_data
train_path os.path.join(root_path, train)
valid_path os.path.join(root_path, valid)
test_path os.path.join(root_path, test)
dataset_train datasets.ImageFolder(train_path, transformtransform)
dataset_test datasets.ImageFolder(test_path, transformtransform_test)
class_labels {0: Diabetic Retinopathy, 1: No Diabetic Retinopathy}
# 导入数据
train_loader torch.utils.data.DataLoader(dataset_train, batch_sizeBATCH_SIZE, num_workers8, shuffleTrue,drop_lastTrue)
test_loader torch.utils.data.DataLoader(dataset_test, batch_sizeBATCH_SIZE, shuffleFalse)# 设置loss
# 实例化模型并且移动到GPU
# criterion_train SoftTargetCrossEntropy() #mixup_fn
criterion_train torch.nn.CrossEntropyLoss()
criterion_val torch.nn.CrossEntropyLoss()# 设置模型
# 设置模型
model_ft nnMambaEncoder()print(model_ft)if resume:model torch.load(resume)print(model[state_dict].keys())model_ft.load_state_dict(model[state_dict])Best_ACC model[Best_ACC]start_epoch model[epoch] 1
model_ft.to(DEVICE)
print(model_ft)# 选择简单暴力的Adam优化器学习率调低
optimizer optim.AdamW(model_ft.parameters(), lrmodel_lr)
cosine_schedule optim.lr_scheduler.CosineAnnealingLR(optimizeroptimizer, T_max20, eta_min1e-6)# 设置混合精度EMA
if use_amp:scaler torch.cuda.amp.GradScaler()
if torch.cuda.device_count() 1 and use_dp:print(Lets use, torch.cuda.device_count(), GPUs!)model_ft torch.nn.DataParallel(model_ft)
if use_ema:model_ema ModelEma(model_ft,decaymodel_ema_decay,deviceDEVICE,resumeresume)
else:model_ema None# 定义训练过程
def train(model, device, train_loader, optimizer, epoch, model_ema):model.train()loss_meter AverageMeter()acc1_meter AverageMeter()total_num len(train_loader.dataset)print(total_num, len(train_loader))for batch_idx, (data, target) in enumerate(train_loader):data, target data.to(device, non_blockingTrue), target.to(device, non_blockingTrue)if use_mixup:samples, targets mixup_fn(data, target)else:samples, targets data, targetoutput model(samples)optimizer.zero_grad()if use_amp:with torch.cuda.amp.autocast():loss torch.nan_to_num(criterion_train(output, targets))scaler.scale(loss).backward()torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_GRAD)# Unscales gradients and calls# or skips optimizer.step()scaler.step(optimizer)# Updates the scale for next iterationscaler.update()else:loss criterion_train(output, targets)loss.backward()# torch.nn.utils.clip_grad_norm_(models.parameters(), CLIP_GRAD)optimizer.step()if model_ema is not None:model_ema.update(model)torch.cuda.synchronize()lr optimizer.state_dict()[param_groups][0][lr]loss_meter.update(loss.item(), target.size(0))# acc1, acc5 accuracy(output, target)acc1 accuracy(output, target)[0]loss_meter.update(loss.item(), target.size(0))acc1_meter.update(acc1.item(), target.size(0))if (batch_idx 1) % 10 0:print(Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR:{:.9f}.format(epoch, (batch_idx 1) * len(data), len(train_loader.dataset),100. * (batch_idx 1) / len(train_loader), loss.item(), lr))ave_loss loss_meter.avgacc acc1_meter.avgprint(epoch:{}\tloss:{:.2f}\tacc:{:.2f}.format(epoch, ave_loss, acc))return ave_loss, acc# 验证过程
torch.no_grad()
def val(model, device, test_loader):global Best_ACCmodel.eval()loss_meter AverageMeter()acc1_meter AverageMeter()# acc5_meter AverageMeter()total_num len(test_loader.dataset)print(total_num, len(test_loader))val_list []pred_list []for data, target in test_loader:for t in target:val_list.append(t.data.item())data, target data.to(device,non_blockingTrue), target.to(device,non_blockingTrue)output model(data)loss criterion_val(output, target)_, pred torch.max(output.data, 1)for p in pred:pred_list.append(p.data.item())acc1 accuracy(output, target)[0]loss_meter.update(loss.item(), target.size(0))acc1_meter.update(acc1.item(), target.size(0))acc acc1_meter.avgprint(\nVal set: Average loss: {:.4f}\tAcc1:{:.3f}%\t.format(loss_meter.avg, acc,))if acc Best_ACC:if isinstance(model, torch.nn.DataParallel):torch.save(model.module, file_dir / best.pth)else:torch.save(model, file_dir / best.pth)Best_ACC accif isinstance(model, torch.nn.DataParallel):state {epoch: epoch,state_dict: model.module.state_dict(),Best_ACC:Best_ACC}if use_ema:state[state_dict_ema]model.module.state_dict()torch.save(state, file_dir / model_ str(epoch) _ str(round(acc, 3)) .pth)else:state {epoch: epoch,state_dict: model.state_dict(),Best_ACC: Best_ACC}if use_ema:state[state_dict_ema]model.state_dict()torch.save(state, file_dir / model_ str(epoch) _ str(round(acc, 3)) .pth)return val_list, pred_list, loss_meter.avg, acc# 绘制训练和验证的损失和准确率曲线
def plot_training_curves(file_dir,epoch_list,train_loss_list,val_loss_list,train_acc_list,val_acc_list):fig plt.figure(1)plt.plot(epoch_list, train_loss_list, r-, labeluTrain Loss)# 显示图例plt.plot(epoch_list, val_loss_list, b-, labeluVal Loss)plt.legend([Train Loss, Val Loss], locupper right)plt.xlabel(uepoch)plt.ylabel(uloss)plt.title(Model Loss )plt.savefig(file_dir /loss.png)plt.close(1)fig2 plt.figure(2)plt.plot(epoch_list, train_acc_list, r-, labeluTrain Acc)plt.plot(epoch_list, val_acc_list, b-, labeluVal Acc)plt.legend([Train Acc, Val Acc], loclower right)plt.title(Model Acc)plt.ylabel(acc)plt.xlabel(epoch)plt.savefig(file_dir /acc.png)plt.close(2)if __name__ __main__:# 创建保存模型的文件夹file_dir checkpoints/EfficientVMamba/if os.path.exists(file_dir):print(true)os.makedirs(file_dir, exist_okTrue)else:os.makedirs(file_dir)# 训练与验证is_set_lr Falselog_dir {}train_loss_list, val_loss_list, train_acc_list, val_acc_list, epoch_list [], [], [], [], []if resume and os.path.isfile(file_dirresult.json):with open(file_dirresult.json, r, encodingutf-8) as file:logs json.load(file)train_acc_list logs[train_acc]train_loss_list logs[train_loss]val_acc_list logs[val_acc]val_loss_list logs[val_loss]epoch_list logs[epoch_list]for epoch in range(start_epoch, EPOCHS 1):epoch_list.append(epoch)log_dir[epoch_list] epoch_listtrain_loss, train_acc train(model_ft,DEVICE,train_loader,optimizer,epoch,model_ema)train_loss_list.append(train_loss)train_acc_list.append(train_acc)log_dir[train_acc] train_acc_listlog_dir[train_loss] train_loss_listif use_ema:val_list, pred_list, val_loss, val_acc val(model_ema.ema, DEVICE, test_loader)else:val_list, pred_list, val_loss, val_acc val(model_ft, DEVICE, test_loader)val_loss_list.append(val_loss)val_acc_list.append(val_acc)log_dir[val_acc] val_acc_listlog_dir[val_loss] val_loss_listlog_dir[best_acc] Best_ACCwith open(file_dir /result.json, w, encodingutf-8) as file:file.write(json.dumps(log_dir))print(classification_report(val_list, pred_list, target_namesdataset_train.class_to_idx))if epoch 600:cosine_schedule.step()else:if not is_set_lr:for param_group in optimizer.param_groups:param_group[lr] 1e-6is_set_lr True# 绘制训练和验证的损失和准确率曲线plot_training_curves(file_dir,epoch_list,train_loss_list,val_loss_list,train_acc_list,val_acc_list)3.效果 对比之前的几种mamba针对糖尿病视网膜病变数据集采用同样的训练参数300 Epochs32 Batch Size。
序号模型验证集最高准确率显存占用训练时间1Vision Mamba94%约12GB约3小时2VMamba98.12%约24GB约2小时3EfficientVMamba95.23%约20GB约2小时4MedMamba92.3%约20GB约2小时5MambaVision95.4%约20GB约2小时6nnMamba96.53%约6GB约30分钟
4.修改代码试试看
这里我只是在增加了一层Residual Block提取验证集最好的ACC是96.53%
class nnMambaEncoder(nn.Module):def __init__(self, in_ch3, channels32, blocks3, number_classes2):super(nnMambaEncoder, self).__init__()self.in_conv DoubleConv(in_ch, channels, stride2, kernel_size3)self.mamba_layer_stem MambaLayer(dimchannels, # Model dimension d_modeld_state8, # SSM state expansion factord_conv4, # Local convolution widthexpand2 # Block expansion factor)self.layer1 make_res_layer(channels, channels * 2, blocks, stride2)self.layer2 make_res_layer(channels * 2, channels * 4, blocks, stride2)self.layer3 make_res_layer(channels * 4, channels * 8, blocks, stride2)self.layer4 make_res_layer(channels * 8, channels * 16, blocks, stride2)self.pooling nn.AdaptiveAvgPool2d((1, 1))self.mamba_seq MambaSeq(dimchannels*2, # Model dimension d_modeld_state8, # SSM state expansion factord_conv2, # Local convolution widthexpand2 # Block expansion factor)self.mlp nn.Sequential(nn.Linear(channels*30, channels), nn.ReLU(), nn.Dropout(0.5), nn.Linear(channels, number_classes))def forward(self, x):c1 self.in_conv(x)c1_s self.mamba_layer_stem(c1) c1c2 self.layer1(c1_s)c3 self.layer2(c2)c4 self.layer3(c3)c5 self.layer4(c4)pooled_c2_s self.pooling(c2)pooled_c3_s self.pooling(c3)pooled_c4_s self.pooling(c4)pooled_c5_s self.pooling(c5)h_feature torch.cat((pooled_c2_s.reshape(c1.shape[0], c1.shape[1]*2, 1),pooled_c3_s.reshape(c1.shape[0], c1.shape[1]*2, 2),pooled_c4_s.reshape(c1.shape[0], c1.shape[1]*2, 4),pooled_c5_s.reshape(c1.shape[0], c1.shape[1]*2, 8)), dim2)h_feature_att self.mamba_seq(h_feature) h_feature # B 64 15h_feature h_feature_att.reshape(c1.shape[0], -1) # B 960return self.mlp(h_feature)如果继续优化层的设置应该会有更好的提升这里就不继续做了 文章转载自: http://www.morning.hmxrs.cn.gov.cn.hmxrs.cn http://www.morning.lwcqh.cn.gov.cn.lwcqh.cn http://www.morning.gklxm.cn.gov.cn.gklxm.cn http://www.morning.rlkgc.cn.gov.cn.rlkgc.cn http://www.morning.zrnph.cn.gov.cn.zrnph.cn http://www.morning.bpmfr.cn.gov.cn.bpmfr.cn http://www.morning.bpmdh.cn.gov.cn.bpmdh.cn http://www.morning.dpflt.cn.gov.cn.dpflt.cn http://www.morning.zbmcz.cn.gov.cn.zbmcz.cn http://www.morning.qflcb.cn.gov.cn.qflcb.cn http://www.morning.njpny.cn.gov.cn.njpny.cn http://www.morning.mldrd.cn.gov.cn.mldrd.cn http://www.morning.trrpb.cn.gov.cn.trrpb.cn http://www.morning.jlxqx.cn.gov.cn.jlxqx.cn http://www.morning.gsyns.cn.gov.cn.gsyns.cn http://www.morning.qwdlj.cn.gov.cn.qwdlj.cn http://www.morning.bncrx.cn.gov.cn.bncrx.cn http://www.morning.tkflb.cn.gov.cn.tkflb.cn http://www.morning.jpdbj.cn.gov.cn.jpdbj.cn http://www.morning.amlutsp.cn.gov.cn.amlutsp.cn http://www.morning.rwlsr.cn.gov.cn.rwlsr.cn http://www.morning.jthjr.cn.gov.cn.jthjr.cn http://www.morning.xqwq.cn.gov.cn.xqwq.cn http://www.morning.nba1on1.com.gov.cn.nba1on1.com http://www.morning.lzsxp.cn.gov.cn.lzsxp.cn http://www.morning.clccg.cn.gov.cn.clccg.cn http://www.morning.chzqy.cn.gov.cn.chzqy.cn http://www.morning.bgqqr.cn.gov.cn.bgqqr.cn http://www.morning.bphqd.cn.gov.cn.bphqd.cn http://www.morning.rfrnc.cn.gov.cn.rfrnc.cn http://www.morning.ybgyz.cn.gov.cn.ybgyz.cn http://www.morning.jrsgs.cn.gov.cn.jrsgs.cn http://www.morning.trrhj.cn.gov.cn.trrhj.cn http://www.morning.grzpc.cn.gov.cn.grzpc.cn http://www.morning.tcxzn.cn.gov.cn.tcxzn.cn http://www.morning.lmxrt.cn.gov.cn.lmxrt.cn http://www.morning.wmglg.cn.gov.cn.wmglg.cn http://www.morning.bpmtq.cn.gov.cn.bpmtq.cn http://www.morning.hfxks.cn.gov.cn.hfxks.cn http://www.morning.fnczn.cn.gov.cn.fnczn.cn http://www.morning.txlxr.cn.gov.cn.txlxr.cn http://www.morning.gwtgt.cn.gov.cn.gwtgt.cn http://www.morning.nzlsm.cn.gov.cn.nzlsm.cn http://www.morning.zdqsc.cn.gov.cn.zdqsc.cn http://www.morning.kfsfm.cn.gov.cn.kfsfm.cn http://www.morning.rnnwd.cn.gov.cn.rnnwd.cn http://www.morning.wqbfd.cn.gov.cn.wqbfd.cn http://www.morning.dyght.cn.gov.cn.dyght.cn http://www.morning.hlppp.cn.gov.cn.hlppp.cn http://www.morning.hgtr.cn.gov.cn.hgtr.cn http://www.morning.yxmcx.cn.gov.cn.yxmcx.cn http://www.morning.khclr.cn.gov.cn.khclr.cn http://www.morning.kdxzy.cn.gov.cn.kdxzy.cn http://www.morning.rxcqt.cn.gov.cn.rxcqt.cn http://www.morning.kdrly.cn.gov.cn.kdrly.cn http://www.morning.lffgs.cn.gov.cn.lffgs.cn http://www.morning.rhdln.cn.gov.cn.rhdln.cn http://www.morning.zknjy.cn.gov.cn.zknjy.cn http://www.morning.pmptm.cn.gov.cn.pmptm.cn http://www.morning.xlmgq.cn.gov.cn.xlmgq.cn http://www.morning.mmtjk.cn.gov.cn.mmtjk.cn http://www.morning.tmrjb.cn.gov.cn.tmrjb.cn http://www.morning.rlwcs.cn.gov.cn.rlwcs.cn http://www.morning.tsxg.cn.gov.cn.tsxg.cn http://www.morning.btwlp.cn.gov.cn.btwlp.cn http://www.morning.gsdbg.cn.gov.cn.gsdbg.cn http://www.morning.junyaod.com.gov.cn.junyaod.com http://www.morning.tzjqm.cn.gov.cn.tzjqm.cn http://www.morning.vvbsxm.cn.gov.cn.vvbsxm.cn http://www.morning.pqfbk.cn.gov.cn.pqfbk.cn http://www.morning.brkrt.cn.gov.cn.brkrt.cn http://www.morning.hkng.cn.gov.cn.hkng.cn http://www.morning.gydth.cn.gov.cn.gydth.cn http://www.morning.pwgzh.cn.gov.cn.pwgzh.cn http://www.morning.qfmcm.cn.gov.cn.qfmcm.cn http://www.morning.gthc.cn.gov.cn.gthc.cn http://www.morning.lwgrf.cn.gov.cn.lwgrf.cn http://www.morning.tgfjm.cn.gov.cn.tgfjm.cn http://www.morning.gtkyr.cn.gov.cn.gtkyr.cn http://www.morning.fnssm.cn.gov.cn.fnssm.cn