一,train_val.prototxt

name: "CIFAR10_quick"
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
# mirror: true
# mean_file: "examples/cifar10/mean.binaryproto"uu
mean_file: "myself/00b/00bmean.binaryproto"
}
data_param {
# source: "examples/cifar10/cifar10_train_lmdb"
source: "myself/00b/00b_train_lmdb"
batch_size:
backend: LMDB
}
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
# mean_file: "examples/cifar10/mean.binaryproto"
mean_file: "myself/00b/00bmean.binaryproto"
}
data_param {
# source: "examples/cifar10/cifar10_test_lmdb"
source: "myself/00b/00b_val_lmdb"
batch_size:
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult:
}
param {
lr_mult:
}
convolution_param {
num_output:
# pad:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.0001
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "pool1"
top: "pool1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult:
}
param {
lr_mult:
}
convolution_param {
num_output:
# pad:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: AVE
kernel_size:
stride:
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult:
}
param {
lr_mult:
}
convolution_param {
num_output:
# pad:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: AVE
kernel_size:
stride:
}
}
layer {
name: "conv4"
type: "Convolution"
bottom: "pool3"
top: "conv4"
param {
lr_mult:
}
param {
lr_mult:
}
convolution_param {
num_output:
# pad:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "pool4"
type: "Pooling"
bottom: "conv4"
top: "pool4"
pooling_param {
pool: AVE
kernel_size:
stride:
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool4"
top: "ip1"
param {
lr_mult:
}
param {
lr_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.1
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult:
}
param {
lr_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.1
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}

二,solver.prototxt

# reduce the learning rate after  epochs ( iters) by a factor of 

# The train/test net protocol buffer definition
net: "myself/00b/train_val.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size and test iterations,
# covering the full , testing images.
test_iter:
# Carry out testing every training iterations.
test_interval:
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# lr_policy: "step"
gamma: 0.1
stepsize:
# Display every iterations
display:
# The maximum number of iterations
max_iter:
# snapshot intermediate results
# snapshot:
# snapshot_format: HDF5
snapshot_prefix: "myself/00b/00b"
# solver mode: CPU or GPU
solver_mode: CPU

三,deploy.prototxt

name: "CIFAR10_quick"
layer {
name: "data"
type: "Input"
top: "data"
input_param { shape: { dim: dim: dim: dim: } }
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output:
kernel_size:
stride:
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
convolution_param {
num_output:
kernel_size:
stride:
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
convolution_param {
num_output:
kernel_size:
stride:
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "conv4"
type: "Convolution"
bottom: "pool3"
top: "conv4"
convolution_param {
num_output:
kernel_size:
stride:
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "pool4"
type: "Pooling"
bottom: "conv4"
top: "pool4"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool4"
top: "ip1"
inner_product_param {
num_output:
}
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
inner_product_param {
num_output:
}
}
layer {
#name: "loss" name: "prob"
type: "Softmax"
bottom: "ip2"
top: "prob" #top: "loss"
}

参考一:

模型就用程序自带的caffenet模型,位置在 models/bvlc_reference_caffenet/文件夹下, 将需要的两个配置文件,复制到myfile文件夹内

# sudo cp models/bvlc_reference_caffenet/solver.prototxt examples/myfile/
# sudo cp models/bvlc_reference_caffenet/train_val.prototxt examples/myfile/

修改train_val.protxt,只需要修改两个阶段的data层就可以了,其它可以不用管。


name: "CaffeNet"
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "examples/myfile/mean.binaryproto"
}
data_param {
source: "examples/myfile/img_train_lmdb"
batch_size: 256
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
crop_size: 227
mean_file: "examples/myfile/mean.binaryproto"
}
data_param {
source: "examples/myfile/img_test_lmdb"
batch_size: 50
backend: LMDB
}
}
 

实际上就是修改两个data layer的mean_file和source这两个地方,其它都没有变化 。

修改其中的solver.prototxt

# sudo vi examples/myfile/solver.prototxt
net: "examples/myfile/train_val.prototxt"
test_iter: 2
test_interval: 50
base_lr: 0.001
lr_policy: "step"
gamma: 0.1
stepsize: 100
display: 20
max_iter: 500
momentum: 0.9
weight_decay: 0.005
solver_mode: GPU

100个测试数据,batch_size为50,因此test_iter设置为2,就能全cover了。在训练过程中,调整学习率,逐步变小。

参考二:

前面做好了lmdb和均值文件,下面以Googlenet为例修改网络并训练模型。

我们将caffe-master\models下的bvlc_googlenet文件夹复制到caffe-master\examples\imagenet下。(因为我们的lmdb和均值都在这里,放一起方便些)

打开train_val.txt,修改:

1.修改data层:

  1. layer {
  2. name: "data"
  3. type: "Data"
  4. top: "data"
  5. top: "label"
  6. include {
  7. phase: TRAIN
  8. }
  9. transform_param {
  10. mirror: true
  11. crop_size: 224
  12. mean_file: "examples/imagenet/mydata_mean.binaryproto" #均值文件
  13. #mean_value: 104 #这些注释掉
  14. #mean_value: 117
  15. #mean_value: 123
  16. }
  17. data_param {
  18. source: "examples/imagenet/mydata_train_lmdb" #训练集的lmdb
  19. batch_size: 32 #根据GPU修改
  20. backend: LMDB
  21. }
  22. }
 
  1. layer {
  2. name: "data"
  3. type: "Data"
  4. top: "data"
  5. top: "label"
  6. include {
  7. phase: TEST
  8. }
  9. transform_param {
  10. mirror: false
  11. crop_size: 224
  12. mean_file: "examples/imagenet/mydata_mean.binaryproto" #均值文件
  13. #mean_value: 104
  14. #mean_value: 117
  15. #mean_value: 123
  16. }
  17. data_param {
  18. source: "examples/imagenet/mydata_val_lmdb" #验证集lmdb
  19. batch_size: 50 #和solver中的test_iter相乘约等于验证集大小
  20. backend: LMDB
  21. }
  22. }
 

2.修改输出:

由于Googlenet有三个输出,所以改三个地方,其他网络一般只有一个输出,则改一个地方即可。

如果是微调,那么输出层的层名也要修改。(参数根据层名来初始化,由于输出改了,该层参数就不对应了,因此要改名)

layer {
name: "loss1/classifier"
type: "InnerProduct"
bottom: "loss1/fc"
top: "loss1/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000 #改成你的数据集类别数
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss2/classifier"
type: "InnerProduct"
bottom: "loss2/fc"
top: "loss2/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000 #改成你的数据集类别数
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss3/classifier"
type: "InnerProduct"
bottom: "pool5/7x7_s1"
top: "loss3/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000 #改成你的数据集类别数
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}

3.打开deploy.prototxt,修改:

layer {
name: "loss3/classifier"
type: "InnerProduct"
bottom: "pool5/7x7_s1"
top: "loss3/classifier"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000 #改成你的数据集类别数
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}

如果是微调,该层层名和train_val.prototxt修改一致。

接着,打开solver,修改:

net: "examples/imagenet/bvlc_googlenet/train_val.prototxt" #路径不要错
test_iter: 1000 #前面已说明该值
test_interval: 4000 #迭代多少次测试一次
test_initialization: false
display: 40
average_loss: 40
base_lr: 0.01
lr_policy: "step"
stepsize: 320000 #迭代多少次改变一次学习率
gamma: 0.96
max_iter: 10000000 #迭代次数
momentum: 0.9
weight_decay: 0.0002
snapshot: 40000
snapshot_prefix: "examples/imagenet/bvlc_googlenet" #生成的caffemodel保存在imagenet下,形如bvlc_googlenet_iter_***.caffemodel
solver_mode: GPU

这时,我们回到caffe-master\examples\imagenet下,打开train_caffenet.sh,修改:

(如果是微调,在脚本里加入-weights **/**/**.caffemodel即可,即用来微调的caffemodel路径)

#!/usr/bin/env sh

./build/tools/caffe train \
-solver examples/imagenet/bvlc_googlenet/solver.prototxt -gpu 0

(如果有多个GPU,可自行选择) 然后,在caffe-master下执行改脚本即可开始训练:$caffe-master ./examples/imagenet/train_caffenet.sh

训练得到的caffemodel就可以用来做图像分类了,此时,需要(1)得到的labels.txt,(2)得到的mydata_mean.binaryproto,(3)得到的caffemodel以及已经修改过的deploy.prototxt,共四个文件,具体过程看:http://blog.csdn.net/sinat_30071459/article/details/50974695

参考三:

*_train_test.prototxt,*_deploy.prototxt,*_slover.prototxt文件编写时注意

1、*_train_test.prototxt文件

这是训练与测试网络配置文件

(1)在数据层中 参数include{

phase:TRAIN/TEST

}

TRAIN与TEST不能有“...”否则会报错,还好提示信息里,会提示哪一行出现了问题,如下图:

数字8就代表配置文件的第8行出现了错误

(2)卷积层和全连接层相似:卷积层(Convolution),全连接层(InnerProduct,容易翻译成内积层)相似处有两个【1】:都有两个param{lr_mult:1

decay_mult:1

}

param{lr_mult: 2

decay_mult: 0

}

【2】:convolution_param{}与inner_product_param{}里面的参数相似,甚至相同

今天有事,明天再续!

续上!

(3)平均值文件*_mean.binaryproto要放在transform_param{}里,训练与测试数据集放在data_param{}里

2.*_deploy.prototxt文件

【1】*_deploy.prototxt文件的构造和*_train_test.prototxt文件的构造稍有不同首先没有test网络中的test模块,只有训练模块

【2】数据层的写法和原来也有不同,更加简洁:

input: "data" input_dim: 1 input_dim: 3 input_dim: 32 input_dim: 32

注意红色部分,那是数据层的名字,没有这个的话,第一卷积层无法找到数据,我一开始没有加这句就报错。下面的四个参数有点类似batch_size(1,3,32,32)里四个参数

【3】卷积层和全连接层中weight_filler{}与bias_filler{}两个参数不用再填写,应为这两个参数的值,由已经训练好的模型*.caffemodel文件提供

【4】输出层的变化(1)没有了test模块测试精度(2)输出层

*_train_test.prototxt文件:

layer{   name: "loss"   type: "SoftmaxWithLoss"#注意此处与下面的不同   bottom: "ip2"   bottom: "label"#注意标签项在下面没有了,因为下面的预测属于哪个标签,因此不能提供标签   top: "loss" }

*_deploy.prototxt文件:

layer {   name: "prob"   type: "Softmax"   bottom: "ip2"   top: "prob" }

***注意在两个文件中输出层的类型都发生了变化一个是SoftmaxWithLoss,另一个是Softmax。另外为了方便区分训练与应用输出,训练是输出时是loss,应用时是prob。

3、*_slover.prototxt

net: "test.prototxt" #训练网络的配置文件 test_iter: 100 #test_iter 指明在测试阶段有多上个前向过程(也就是有多少图片)被执行。 在MNIST例子里,在网络配置文件里已经设置test网络的batch size=100,这里test_iter 设置为100,那在测试阶段共有100*100=10000 图片被处理 test_interval: 500 #每500次训练迭代后,执行一次test base_lr: 0.01 #学习率初始化为0.01 momentum:0.9 #u=0.9 weight_decay:0.0005 # lr_policy: "inv" gamma: 0.0001 power: 0.75 #以上三个参数都和降低学习率有关,详细的学习策略和计算公式见下面 // The learning rate decay policy. The currently implemented learning rate

// policies are as follows:

//    - fixed: always return base_lr.

//    - step: return base_lr * gamma ^ (floor(iter / step))

//    - exp: return base_lr * gamma ^ iter

//    - inv: return base_lr * (1 + gamma * iter) ^ (- power)

//    - multistep: similar to step but it allows non uniform steps defined by

//      stepvalue

//    - poly: the effective learning rate follows a polynomial decay, to be

//      zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)

//    - sigmoid: the effective learning rate follows a sigmod decay

//      return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))

// where base_lr, max_iter, gamma, step, stepvalue and power are defined

// in the solver parameter protocol buffer, and iter is the current iteration. display:100 #每100次迭代,显示结果 snapshot: 5000 #每5000次迭代,保存一次快照 snapshot_prefix: "path_prefix" #快照保存前缀:更准确的说是快照保存路径+前缀,应为文件名后的名字是固定的 solver_mode:GPU #选择解算器是用cpu还是gpu

批处理文件编写:

F:/caffe/caffe-windows-master/bin/caffe.exe train --solver=C:/Users/Administrator/Desktop/caffe_test/cifar-10/cifar10_slover_prototxt --gpu=all pause

参考四:

06

将train_val.prototxt 转换成deploy.prototxt

分类:caffe

(282)                             (0)

1.删除输入数据(如:type:data...inckude{phase: TRAIN}),然后添加一个数据维度描述。

  1. input: "data"
  2. input_dim: 1
  3. input_dim: 3
  4. input_dim: 224
  5. input_dim: 224
  6. force_backward: true
input: "data"
input_dim: 1
input_dim: 3
input_dim: 224
input_dim: 224
force_backward: true

2.移除最后的“loss” 和“accuracy” 层,加入“prob”层。

  1. layers {
  2. name: "prob"
  3. type: SOFTMAX
  4. bottom: "fc8"
  5. top: "prob"
  6. }
layers {
name: "prob"
type: SOFTMAX
bottom: "fc8"
top: "prob"
}

如果train_val文件中还有其他的预处理层,就稍微复杂点。如下,在'data'层,在‘data’层和‘conv1’层(with bottom:”data”  / top:”conv1″). 插入一个层来计算输入数据的均值。

  1. layer {
  2. name: “mean”
  3. type: “Convolution”
  4. <strong>bottom: “data”
  5. top: “data”</strong>
  6. param {
  7. lr_mult: 0
  8. decay_mult: 0
  9. }
  10. …}
在deploy.prototxt文件中,“mean” 层必须保留,只是容器改变,相应的‘conv1’也要改变 ( bottom:”mean”/ top:”conv1″ )。
  1. layer {
  2. name: “mean”
  3. type: “Convolution”
  4. <strong>bottom: “data”
  5. top: “mean“</strong>
  6. param {
  7. lr_mult: 0
  8. decay_mult: 0
  9. }
  10. …}

4.caffe:train_val.prototxt、 solver.prototxt 、 deploy.prototxt( 创建模型与编写配置文件)的更多相关文章

  1. windows+caffe(四)——创建模型并编写配置文件+训练和测试

    1.模型就用程序自带的caffenet模型,位置在 models/bvlc_reference_caffenet/文件夹下, 将需要的两个配置文件,复制到myfile文件夹内 2. 修改solver. ...

  2. train_val.prototxt文件和deploy.prototxt文件开头的区别

    1.开头不同 对train_val.prototxt文件来说,开头部分定义训练和测试的网络及参数 对deploy.prototxt文件来说,开头部分定义实际运用场景的配置文件,其参数不定义数据来源,仅 ...

  3. 根据 train_test.prototxt文件生成 deploy.prototxt文件

    本文参考博文 (1)介绍 *_train_test.prototxt文件与 *_deploy.prototxt文件的不同:http://blog.csdn.net/sunshine_in_moon/a ...

  4. 4'.deploy.prototxt

    1: 神经网络中,我们通过最小化神经网络来训练网络,所以在训练时最后一层是损失函数层(LOSS), 在测试时我们通过准确率来评价该网络的优劣,因此最后一层是准确率层(ACCURACY). 但是当我们真 ...

  5. Caffe学习系列(8):solver,train_val.prototxt,deploy.prototxt及其配置

    solver是caffe的核心. net: "examples/mnist/lenet_train_test.prototxt" test_iter: 100 test_inter ...

  6. 浅谈caffe中train_val.prototxt和deploy.prototxt文件的区别

    本文以CaffeNet为例: 1. train_val.prototxt  首先,train_val.prototxt文件是网络配置文件.该文件是在训练的时候用的. 2.deploy.prototxt ...

  7. Caffe中deploy.prototxt 和 train_val.prototxt 区别

    之前用deploy.prototxt 还原train_val.prototxt过程中,遇到了坑,所以打算总结一下 本人以熟悉的LeNet网络结构为例子 不同点主要在一前一后,相同点都在中间 train ...

  8. caffe生成deploy.prototxt文件

    参考: http://blog.csdn.net/cham_3/article/details/52682479 以caffe工程自带的mnist数据集,lenet网络为例: 将lenet_train ...

  9. 利用Caffe训练模型(solver、deploy、train_val)+python使用已训练模型

    本文部分内容来源于CDA深度学习实战课堂,由唐宇迪老师授课 如果你企图用CPU来训练模型,那么你就疯了- 训练模型中,最耗时的因素是图像大小size,一般227*227用CPU来训练的话,训练1万次可 ...

随机推荐

  1. 微信小程序 请求签名接口超时 踩坑路。。

    我们公司一般做开发都是先用测试机的接口调试功能,等功能都调试的差不多了,再换成线上的正式接口,因为正式接口要验证签名. 这几个功能都调试的差不多了,准备换成线上正式接口了,结果却出了问题,提示请求超时 ...

  2. 当微信小程序遇到AR(二)

    当微信小程序遇到AR,会擦出怎么样的火花?期待与激动...... 通过该教程,可以从基础开始打造一个微信小程序的AR框架,所有代码开源,提供大家学习. 本课程需要一定的基础:微信开发者工具,JavaS ...

  3. 分布式消息通信之RabbitMQ_02

    目录 1. 可靠性投递分析 1.1 消息投递 1.2 消息路由 1.3 消息存储 1.4 消息消费 1.5 其他 2. 高可用架构部署方案 2.1 集群 2.2 镜像 3. 经验总结 3.1 配置文件 ...

  4. ubuntu 右上角网络图标不见了

    sudo service network-manager stop sudo rm /var/lib/NetworkManager/NetworkManager.state sudo service ...

  5. 10.Windows远程管理工具RAT----Metasploit基础----Metasploit模块----fsociety工具包

    Windows远程管理工具RAT QuasarRAT github.com/quasar/QuasarRAT 命令环境 MINGW64 (GCC编译器) mkdir RAT cd RAT git cl ...

  6. vue学习中遇到的问题

    1.axios使用post传值时无法使用键值对传值的问题 问题的原因:主要是HTTP请求中的get请求和post请求参数的存放位置是不一样的,get请求的参数以键值对的方式跟在url后面的,而post ...

  7. 最新 三七互娱java校招面经 (含整理过的面试题大全)

    从6月到10月,经过4个月努力和坚持,自己有幸拿到了网易雷火.京东.去哪儿.三七互娱等10家互联网公司的校招Offer,因为某些自身原因最终选择了三七互娱.6.7月主要是做系统复习.项目复盘.Leet ...

  8. vue项目富文本编辑器vue-quill-editor之自定义图片上传

    使用富文本编辑器的第一步肯定是先安装依赖 npm i vue-quill-editor 1.如果按照官网富文本编辑器中的图片上传是将图片转为base64格式的,如果需要上传图片到自己的服务器,需要修改 ...

  9. rqnoj PID95:多多看DVD(加强版)

    题目描述 多多进幼儿园了,今天报名了.只有今晚可以好好放松一下了(以后上了学后会很忙).她的叔叔决定给他买一些动画片DVD晚上看.可是爷爷规定他们只能在一定的时间段L看完.(因为叔叔还要搞NOIP不能 ...

  10. solr7.2.1+tomcat8.5.37+jdk8安装配置

    软件下载 solr7.2.1:http://archive.apache.org/dist/lucene/solr/7.2.1/solr-7.2.1.tgz 注意是.tgz结尾的文件,而不是.zip ...