Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
8a6cd51
Modified some example conf files for consistent benchmarking.
Feb 23, 2016
2204dd7
LayerProto in job.proto specifies that extensions should use 1001 to …
Feb 23, 2016
bcbcffb
Created more sample conf files
Feb 24, 2016
0c54a87
Merge remote-tracking branch 'upstream/master'
Feb 27, 2016
32a663e
Modified ClusterProto to support GPU information.
undisputed-seraphim Mar 11, 2016
dddac21
Modified common.proto to add enume for DeviceType.
undisputed-seraphim Mar 11, 2016
8cc7fa5
Slightly modified worker.h for now.
undisputed-seraphim Mar 11, 2016
681ebba
Added DeviceType across Driver, Worker and JobProto.
undisputed-seraphim Mar 12, 2016
64b43e2
Fix the worker sleep hack into a more proper solution.
undisputed-seraphim Mar 12, 2016
884fc35
Added 2x2 conf file for mnist.
undisputed-seraphim Mar 12, 2016
92bda99
C++ Linting.
undisputed-seraphim Mar 12, 2016
9837faa
Implemented thread pool.
undisputed-seraphim Mar 13, 2016
c84c087
ActivateDevice in Context has been removed because now we can always …
undisputed-seraphim Mar 13, 2016
3dbc22b
Revert "ActivateDevice in Context has been removed because now we can…
undisputed-seraphim Mar 14, 2016
b3d00aa
Replaced SetupDevice to chain call ActivateDevice.
undisputed-seraphim Mar 15, 2016
831ca21
Merge branch 'reorganize' of github.com:undisputed-seraphim/incubator…
undisputed-seraphim Mar 15, 2016
04bc140
Removed commented out dead code.
undisputed-seraphim Mar 15, 2016
b81bd52
Transplanted worker setup logic from Driver to Worker. Errors exist.
undisputed-seraphim Mar 15, 2016
f3a1a0f
Merge remote-tracking branch 'upstream/master'
Mar 19, 2016
fbf9c12
Merge branch 'master' into experimental
Mar 19, 2016
1acee36
Removed useless conf files.
undisputed-seraphim Mar 22, 2016
864f4e5
Merge branch 'experimental' of github.com:undisputed-seraphim/incubat…
undisputed-seraphim Mar 22, 2016
f22b24d
Work for the night, going to try another strategy.
undisputed-seraphim Mar 22, 2016
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions examples/cifar10/cudnn_hybrid.conf
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
name: "cifar10-convnet"
train_steps: 10000
train_steps: 1000
test_steps: 0
test_freq: 200
#validate_steps: 100
#validate_freq: 300
disp_freq: 200
disp_freq: 50
gpu: 0
gpu: 1
#debug: true
Expand Down Expand Up @@ -43,7 +43,6 @@ neuralnet {
shape: 32
}
include: kTrain
partition_dim: 0
}
# layer{
# name: "data"
Expand Down Expand Up @@ -73,7 +72,6 @@ neuralnet {
shape: 32
}
include: kTest
partition_dim: 0
}

layer {
Expand Down
282 changes: 282 additions & 0 deletions examples/cifar10/gpu.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,282 @@
name: "cifar10-convnet"
train_steps: 1000
test_steps: 100
test_freq: 200
#validate_steps: 100
#validate_freq: 300
disp_freq: 50
gpu:0


#checkpoint_path: "examples/cifar10/checkpoint/step1000-worker0"
train_one_batch {
alg: kBP
}
updater{
type: kSGD
weight_decay:0.004
momentum:0.9
learning_rate {
type: kFixedStep
fixedstep_conf:{
step:0
step:60000
step:65000
step_lr:0.001
step_lr:0.0001
step_lr:0.00001
}
}
}
neuralnet {
layer{
name: "data"
type: kRecordInput
store_conf {
backend: "kvfile"
path: "examples/cifar10/train_data.bin"
mean_file: "examples/cifar10/image_mean.bin"
batchsize: 100
#random_skip: 5000
shape: 3
shape: 32
shape: 32
}
include: kTrain
}
# layer{
# name: "data"
# type: kRecordInput
# store_conf {
# backend: "kvfile"
# path: "examples/cifar10/val_data.bin"
# mean_file: "examples/cifar10/image_mean.bin"
# batchsize: 64
# random_skip: 5000
# shape: 3
# shape: 32
# shape: 32
# }
# include: kVal
# }
layer{
name: "data"
type: kRecordInput
store_conf {
backend: "kvfile"
path: "examples/cifar10/test_data.bin"
mean_file: "examples/cifar10/image_mean.bin"
batchsize: 100
shape: 3
shape: 32
shape: 32
}
include: kTest
}

layer {
name: "conv1"
type: kCConvolution
srclayers: "data"
convolution_conf {
num_filters: 32
kernel: 5
stride: 1
pad:2
}
param {
name: "w1"
init {
type:kGaussian
std:0.0001
}
}
param {
name: "b1"
lr_scale:2.0
init {
type: kConstant
value:0
}
}
}

layer {
name: "pool1"
type: kCPooling
srclayers: "conv1"
pooling_conf {
pool: MAX
kernel: 3
stride: 2
}
}
layer {
name: "relu1"
type: kReLU
srclayers:"pool1"
}
layer {
name: "norm1"
type: kLRN
lrn_conf {
local_size: 3
alpha: 5e-05
beta: 0.75
}
srclayers:"relu1"
}
layer {
name: "conv2"
type: kCConvolution
srclayers: "norm1"
convolution_conf {
num_filters: 32
kernel: 5
stride: 1
pad:2
}
param {
name: "w2"
init {
type:kGaussian
std:0.01
}
}
param {
name: "b2"
lr_scale:2.0
init {
type: kConstant
value:0
}
}
}
layer {
name: "relu2"
type: kReLU
srclayers:"conv2"
}
layer {
name: "pool2"
type: kCPooling
srclayers: "relu2"
pooling_conf {
pool: AVG
kernel: 3
stride: 2
}
}
layer {
name: "norm2"
type: kLRN
lrn_conf {
local_size: 3
alpha: 5e-05
beta: 0.75
}
srclayers:"pool2"
}
layer {
name: "conv3"
type: kCConvolution
srclayers: "norm2"
convolution_conf {
num_filters: 64
kernel: 5
stride: 1
pad:2
}
param {
name: "w3"
init {
type:kGaussian
std:0.01
}
}
param {
name: "b3"
init {
type: kConstant
value:0
}
}
}
layer {
name: "relu3"
type: kReLU
srclayers:"conv3"
}
layer {
name: "pool3"
type: kCPooling
srclayers: "relu3"
pooling_conf {
pool: AVG
kernel: 3
stride: 2
}
}
layer {
name: "ip1"
type: kInnerProduct
srclayers:"pool3"
innerproduct_conf {
num_output: 10
}
param {
name: "w4"
wd_scale:250
init {
type:kGaussian
std:0.01
}
}
param {
name: "b4"
lr_scale:2.0
wd_scale:0
init {
type: kConstant
value:0
}
}
}
# layer {
# name : "softmax"
# type: kSoftmax
# srclayers: "ip1"
# }
#
# layer {
# name : "argsort"
# type: kArgSort
# srclayers: "softmax"
# }
layer{
name: "loss"
type: kSoftmaxLoss
softmaxloss_conf{
topk:1
}
srclayers:"ip1"
srclayers: "data"
}
# uncomment "softmax", "argsort", "output" layer and comment "loss" layer
# to extract features from argsort
# layer {
# name : "output"
# type: kCSVOutput
# srclayers: "argsort"
# store_conf {
# path: "examples/cifar10/out.csv"
# }
# }
}
cluster {
nworker_groups: 1
nserver_groups: 1
nworkers_per_group: 1
nworkers_per_procs: 1
workspace: "examples/cifar10"
}
4 changes: 3 additions & 1 deletion examples/cifar10/hybrid.conf
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ test_steps: 0
test_freq: 200
#validate_steps: 100
#validate_freq: 300
disp_freq: 30
disp_freq: 50


#debug: true
#checkpoint_path: "examples/cifar10/checkpoint/step1000-worker0"
train_one_batch {
Expand Down
3 changes: 3 additions & 0 deletions examples/cifar10/job.conf
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ test_freq: 200
#validate_steps: 100
#validate_freq: 300
disp_freq: 50



#checkpoint_path: "examples/cifar10/checkpoint/step1000-worker0"
train_one_batch {
alg: kBP
Expand Down
Loading