skull stripping


//Checking a skullstrip
  //using tkmedit
  tkmedit  brainmask.mgz -aux T1.mgz -surfs

  //using freeview
  cd to /Applications/freesurfer/subjects/

  freeview -v /mri/T1.mgz \
  /mri/brainmask.mgz \
  -f /surf/lh.white:edgecolor=yellow \
  /surf/lh.pial:edgecolor=red \
  /surf/rh.white:edgecolor=yellow \
  /surf/rh.pial:edgecolor=red


//fix using gcut
recon-all -skullstrip -clean-bm -gcut -subjid 
  //view gcut results
    tkmedit  T1.mgz -segmentation brainmask.gcuts.mgz
    //OR
    tkemdit  brainmask.gcuts.mgz -aux T1.mgz
  

//fix using watershed parameters
setenv SUBJECTS_DIR /path/to/your/data          //or 'source' instead of 'setenv' for bash
setenv WATERSHED_PREFLOOD_HEIGHTS '20 30 40 50' //use first two lines only if changing default
//watershed values (defaults are: 5, 10, 20, 30)
recon-all -multistrip -clean-bm -s  -no-isrunning


//manually edit
//Fix the skullstripping in the brainmask.mgz file
tkmedit  brainmask.mgz -aux T1.mgz
/*
tkmedit setup
Tools > Configure Brush info > Radius 4 > Circle > Close
Tools > Configure Volume Brush > Clone > Aux volume (for brush 2) >
  Close and then click “Edit voxel tools” square button
Option + Click = fill in
Cmd + Click = erase
Ctrl+1/Ctrl+2 = toggle to/from auxiliary
*/


//rerun autorecon
recon-all -autorecon-pial -subjid   //can use for pial surface recon (too much brain left behind)
//OR
recon-all -autorecon2 -autorecon3 -subjid   //can use for part of the brain had been cut out 

glmnet


> install.packages("glmnet", repos = "http://cran.us.r-project.org")
> library(glmnet)
> data(QuickStartExample)
> fit = glmnet(x, y)
> plot(fit)

Rplot


> coef(fit,s=0.1)

21 x 1 sparse Matrix of class "dgCMatrix"
                       1
(Intercept)  0.150928072
V1           1.320597195
V2           .          
V3           0.675110234
V4           .          
V5          -0.817411518
V6           0.521436671
V7           0.004829335
V8           0.319415917
V9           .          
V10          .          
V11          0.142498519
V12          .          
V13          .          
V14         -1.059978702
V15          .          
V16          .          
V17          .          
V18          .          
V19          .          
V20         -1.021873704

> nx = matrix(rnorm(10*20),10,20)
> predict(fit,newx=nx,s=c(0.1,0.05))
               1           2
 [1,] -5.5637700 -5.83388941
 [2,]  0.1583885 -0.07526616
 [3,]  1.0761634  1.05031528
 [4,]  0.7341100  0.58998483
 [5,] -4.7027890 -4.82573938
 [6,] -2.7505943 -2.87508310
 [7,]  4.6762363  4.70497532
 [8,] -1.7977951 -1.83678975
 [9,]  2.1365770  2.10036218
[10,]  0.6937184  0.87952438
> cvfit = cv.glmnet(x, y)
> plot(cvfit)

Rplot01


> cvfit$lambda.min
[1] 0.06896889
> coef(cvfit, s = "lambda.min")
21 x 1 sparse Matrix of class "dgCMatrix"
                       1
(Intercept)  0.147927056
V1           1.337393911
V2           .          
V3           0.704086480
V4           .          
V5          -0.842853149
V6           0.549403480
V7           0.032703914
V8           0.342430521
V9           .          
V10          0.001206607
V11          0.178995989
V12          .          
V13          .          
V14         -1.079993473
V15          .          
V16          .          
V17          .          
V18          .          
V19          .          
V20         -1.061382443

> predict(cvfit, newx = x[1:5,], s = "lambda.min")
              1
[1,] -1.3626977
[2,]  2.5736774
[3,]  0.5767335
[4,]  2.0062604
[5,]  1.5410061

Logistic Regression


> data(BinomialExample)
> fit = glmnet(x, y, family = "binomial")
> plot(fit, xvar = "dev", label = TRUE)

Rplot02


> predict(fit, newx = x[1:5,], type = "class", s = c(0.05, 0.01))
     1   2  
[1,] "0" "0"
[2,] "1" "1"
[3,] "1" "1"
[4,] "0" "0"
[5,] "1" "1"
> cvfit = cv.glmnet(x, y, family = "binomial", type.measure = "class")
> plot(cvfit)

Rplot03


> cvfit$lambda.min
[1] 0.03741031
> cvfit$lambda.1se
[1] 0.05427596
> coef(cvfit, s = "lambda.min")
31 x 1 sparse Matrix of class "dgCMatrix"
                      1
(Intercept)  0.21531808
V1           .         
V2           0.35204197
V3          -0.25501389
V4          -0.77948728
V5          -0.07772289
V6          -0.54451004
V7           .         
V8          -0.29072606
V9           0.35493516
V10         -0.88664559
V11          .         
V12          .         
V13          .         
V14          .         
V15          .         
V16          0.01632705
V17          .         
V18          .         
V19          .         
V20          .         
V21          .         
V22          0.13882866
V23          0.15552657
V24          .         
V25          0.34738315
V26         -0.20631840
V27          .         
V28          0.05213348
V29         -0.07602940
V30          .         
> predict(cvfit, newx = x[1:10,], s = "lambda.min", type = "class")
      1  
 [1,] "0"
 [2,] "1"
 [3,] "1"
 [4,] "0"
 [5,] "1"
 [6,] "1"
 [7,] "0"
 [8,] "0"
 [9,] "1"
[10,] "1"

Cox Model


> data(CoxExample)
> y[1:5,]
           time status
[1,] 1.76877757      1
[2,] 0.54528404      1
[3,] 0.04485918      0
[4,] 0.85032298      0
[5,] 0.61488426      1
> fit = glmnet(x, y, family = "cox")
> plot(fit)

Rplot04


> coef(fit, s = 0.05)
30 x 1 sparse Matrix of class "dgCMatrix"
              1
V1   0.37693638
V2  -0.09547797
V3  -0.13595972
V4   0.09814146
V5  -0.11437545
V6  -0.38898545
V7   0.24291400
V8   0.03647596
V9   0.34739813
V10  0.03865115
V11  .         
V12  .         
V13  .         
V14  .         
V15  .         
V16  .         
V17  .         
V18  .         
V19  .         
V20  .         
V21  .         
V22  .         
V23  .         
V24  .         
V25  .         
V26  .         
V27  .         
V28  .         
V29  .         
V30  .         
> cvfit = cv.glmnet(x, y, family = "cox")
> plot(cvfit)

Rplot05


> cvfit$lambda.1se
[1] 0.04436439
> coef.min = coef(cvfit, s = "lambda.min")
> active.min = which(coef.min != 0)
> index.min = coef.min[active.min]
> index.min
 [1]  0.496699041 -0.177917576 -0.222445759  0.178507885 -0.190073472 -0.494708144
 [7]  0.339530800  0.093719350  0.454738096  0.119110422  0.019623132 -0.020382328
[13] -0.004743974 -0.003861639 -0.025818950  0.004390694 -0.010499361
> coef.min
30 x 1 sparse Matrix of class "dgCMatrix"
               1
V1   0.496699041
V2  -0.177917576
V3  -0.222445759
V4   0.178507885
V5  -0.190073472
V6  -0.494708144
V7   0.339530800
V8   0.093719350
V9   0.454738096
V10  0.119110422
V11  .          
V12  .          
V13  0.019623132
V14  .          
V15  .          
V16  .          
V17 -0.020382328
V18  .          
V19  .          
V20  .          
V21 -0.004743974
V22 -0.003861639
V23  .          
V24  .          
V25 -0.025818950
V26  .          
V27  0.004390694
V28  .          
V29  .          
V30 -0.010499361

 

apply, lapply, sapply, tapply

apply

> d <- matrix(1:9, ncol=3) > d
     [,1] [,2] [,3]
[1,]    1    4    7
[2,]    2    5    8
[3,]    3    6    9
> apply(d, 1, sum) - 행(rows)로 함수 sum을 적용
[1] 12 15 18
> apply(d,2,sum) - 열(columns)로 함수 sum을 적용
[1]  6 15 24

> head(iris,3) - iris 데이터셋의 첫 3번째까지의 행을 관찰
  Sepal.Length Sepal.Width Petal.Length Petal.Width Species
1          5.1         3.5          1.4         0.2  setosa
2          4.9         3.0          1.4         0.2  setosa
3          4.7         3.2          1.3         0.2  setosa
> apply(iris[,1:4], 2, sum) - iris 데이터셋에서 1~4번째 열을 선택
Sepal.Length  Sepal.Width Petal.Length  Petal.Width 
       876.5        458.6        563.7        179.9 

lapply


> lapply(iris[,1:4], mean)
$Sepal.Length
[1] 5.843333

$Sepal.Width
[1] 3.057333

$Petal.Length
[1] 3.758

$Petal.Width
[1] 1.199333

sapply


> sapply(iris[,1:4], mean)
Sepal.Length  Sepal.Width Petal.Length  Petal.Width 
    5.843333     3.057333     3.758000     1.199333 
> y <- sapply(iris[,1:4], function(x){x > 3})
> head(y,3)
     Sepal.Length Sepal.Width Petal.Length Petal.Width
[1,]         TRUE        TRUE        FALSE       FALSE
[2,]         TRUE       FALSE        FALSE       FALSE
[3,]         TRUE        TRUE        FALSE       FALSE

tapply


> tapply(1:10, rep(1,10), sum)
 1 
55 
> levels(iris$Species)
[1] "setosa"     "versicolor" "virginica" 
> tapply(iris$Sepal.Length, iris$Species, mean)
    setosa versicolor  virginica 
     5.006      5.936      6.588 

plaidml


> virtualenv plaidml
> source plaidml/bin/activate
> pip install plaidml-keras plaidbench 
> plaidml-setup
PlaidML Setup (0.7.0)

Thanks for using PlaidML!

The feedback we have received from our users indicates an ever-increasing need
for performance, programmability, and portability. During the past few months,
we have been restructuring PlaidML to address those needs.  To make all the
changes we need to make while supporting our current user base, all development
of PlaidML has moved to a branch — plaidml-v1. We will continue to maintain and
support the master branch of PlaidML and the stable 0.7.0 release.

Read more here: https://github.com/plaidml/plaidml 

Some Notes:
  * Bugs and other issues: https://github.com/plaidml/plaidml/issues
  * Questions: https://stackoverflow.com/questions/tagged/plaidml
  * Say hello: https://groups.google.com/forum/#!forum/plaidml-dev
  * PlaidML is licensed under the Apache License 2.0
 

Default Config Devices:
   llvm_cpu.0 : CPU (via LLVM)
   metal_amd_radeon_hd_-_firepro_d700.1 : AMD Radeon HD - FirePro D700 (Metal)
   metal_amd_radeon_hd_-_firepro_d700.0 : AMD Radeon HD - FirePro D700 (Metal)

Experimental Config Devices:
   llvm_cpu.0 : CPU (via LLVM)
   opencl_amd_radeon_hd_-_firepro_d700_compute_engine.1 : AMD AMD Radeon HD - FirePro D700 Compute Engine (OpenCL)
   opencl_amd_radeon_hd_-_firepro_d700_compute_engine.0 : AMD AMD Radeon HD - FirePro D700 Compute Engine (OpenCL)
   metal_amd_radeon_hd_-_firepro_d700.1 : AMD Radeon HD - FirePro D700 (Metal)
   metal_amd_radeon_hd_-_firepro_d700.0 : AMD Radeon HD - FirePro D700 (Metal)

Using experimental devices can cause poor performance, crashes, and other nastiness.

Enable experimental device support? (y,n)[n]:y

Multiple devices detected (You can override by setting PLAIDML_DEVICE_IDS).
Please choose a default device:

   1 : llvm_cpu.0
   2 : opencl_amd_radeon_hd_-_firepro_d700_compute_engine.1
   3 : opencl_amd_radeon_hd_-_firepro_d700_compute_engine.0
   4 : metal_amd_radeon_hd_-_firepro_d700.1
   5 : metal_amd_radeon_hd_-_firepro_d700.0

Default device? (1,2,3,4,5)[1]:0
Invalid choice: 0

Default device? (1,2,3,4,5)[1]:1

Selected device:
    llvm_cpu.0

Almost done. Multiplying some matrices...
Tile code:
  function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }
Whew. That worked.

Save settings to /Users/taeylee/.plaidml? (y,n)[y]:y
Success!

> plaidbench keras mobilenet
Running 1024 examples with mobilenet, batch size 1, on backend plaid
INFO:plaidml:Opening device "llvm_cpu.0"
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.6/mobilenet_1_0_224_tf.h5
17227776/17225924 [==============================] - 3s 0us/step
Compiling network... Warming up... Running...
Example finished, elapsed: 4.677s (compile), 95.086s (execution)

-----------------------------------------------------------------------------------------
Network Name         Inference Latency         Time / FPS          
-----------------------------------------------------------------------------------------
mobilenet            92.86 ms                  87.92 ms / 11.37 fps
Correctness: PASS, max_error: 2.0418785425135866e-05, max_abs_error: 1.5869736671447754e-06, fail_ratio: 0.0


Running 1024 examples with mobilenet, batch size 1, on backend plaid
INFO:plaidml:Opening device "metal_amd_radeon_hd_-_firepro_d700.0"
Compiling network... Warming up... Running...
Example finished, elapsed: 0.637s (compile), 10.064s (execution)

-----------------------------------------------------------------------------------------
Network Name         Inference Latency         Time / FPS          
-----------------------------------------------------------------------------------------
mobilenet            9.83 ms                   0.00 ms / 1000000000.00 fps
Correctness: PASS, max_error: 1.681717185419984e-05, max_abs_error: 7.674098014831543e-07, fail_ratio: 0.0