@@ -6,24 +6,28 @@ var argv = require('yargs')
66 . usage ( 'Angular e2e/perf test options.' )
77 . options ( {
88 'sample-size' : {
9- describe : 'sample size' ,
9+ describe : 'Used for perf: sample size. ' ,
1010 default : 20
1111 } ,
1212 'force-gc' : {
13- describe : 'force gc.' ,
13+ describe : 'Used for perf: force gc.' ,
1414 default : false ,
1515 type : 'boolean'
1616 } ,
1717 'benchmark' : {
18- describe : 'whether to run the benchmarks' ,
18+ describe : 'If true, run only the performance benchmarks. If false, run only the e2e tests.' ,
19+ default : false
20+ } ,
21+ 'dryrun' : {
22+ describe : 'If true, only run performance benchmarks once.' ,
1923 default : false
2024 } ,
2125 'browsers' : {
22- describe : 'comma separated list of preconfigured browsers to use' ,
26+ describe : 'Comma separated list of preconfigured browsers to use. ' ,
2327 default : 'ChromeDesktop'
2428 } ,
2529 'spec' : {
26- describe : 'comma separated file patterns to test' ,
30+ describe : 'Comma separated file patterns to test. By default, globs all test/perf files. ' ,
2731 default : false
2832 }
2933 } )
@@ -107,7 +111,7 @@ var BROWSER_CAPS = {
107111 }
108112} ;
109113
110- var getBenchmarkFiles = function ( benchmark , spec ) {
114+ var getTestFiles = function ( benchmark , spec ) {
111115 var specFiles = [ ] ;
112116 var perfFiles = [ ] ;
113117 if ( spec . length ) {
@@ -119,11 +123,14 @@ var getBenchmarkFiles = function (benchmark, spec) {
119123 specFiles . push ( 'dist/js/cjs/**/e2e_test/**/*_spec.js' ) ;
120124 perfFiles . push ( 'dist/js/cjs/**/e2e_test/**/*_perf.js' ) ;
121125 }
122- return benchmark ? perfFiles : specFiles . concat ( perfFiles ) ;
126+ return benchmark ? perfFiles : specFiles ;
123127} ;
124128
125129var config = exports . config = {
126130 onPrepare : function ( ) {
131+ // TODO(juliemr): remove this hack and use the config option
132+ // restartBrowserBetweenTests once that is not hanging.
133+ // See https://github.com/angular/protractor/issues/1983
127134 patchProtractorWait ( browser ) ;
128135 // During benchmarking, we need to open a new browser
129136 // for every benchmark, otherwise the numbers can get skewed
@@ -139,13 +146,13 @@ var config = exports.config = {
139146 global . $$ = global . browser . $$ ;
140147 } ) ;
141148 afterEach ( function ( ) {
142- global . browser . quit ( ) ;
143- global . browser = originalBrowser ;
149+ global . browser . quit ( ) ;
150+ global . browser = originalBrowser ;
144151 } ) ;
145152 }
146153 } ,
147154
148- specs : getBenchmarkFiles ( argv [ 'benchmark' ] , argv [ 'spec' ] ) ,
155+ specs : getTestFiles ( argv [ 'benchmark' ] , argv [ 'spec' ] ) ,
149156
150157 exclude : [
151158 'dist/js/cjs/**/node_modules/**' ,
@@ -164,7 +171,7 @@ var config = exports.config = {
164171
165172 jasmineNodeOpts : {
166173 showColors : true ,
167- defaultTimeoutInterval : argv [ 'benchmark' ] ? 1200000 : 30000
174+ defaultTimeoutInterval : argv [ 'benchmark' ] ? 1200000 : 60000
168175 } ,
169176 params : {
170177 benchmark : {
@@ -225,7 +232,7 @@ exports.createBenchpressRunner = function(options) {
225232 benchpress . JsonFileReporter . BINDINGS ,
226233 benchpress . bind ( benchpress . JsonFileReporter . PATH ) . toValue ( resultsFolder )
227234 ] ;
228- if ( argv [ 'benchmark ' ] ) {
235+ if ( ! argv [ 'dryrun ' ] ) {
229236 bindings . push ( benchpress . Validator . bindTo ( benchpress . RegressionSlopeValidator ) ) ;
230237 bindings . push ( benchpress . bind ( benchpress . RegressionSlopeValidator . SAMPLE_SIZE ) . toValue ( argv [ 'sample-size' ] ) ) ;
231238 } else {
0 commit comments