-
Notifications
You must be signed in to change notification settings - Fork 718
319 lines (306 loc) · 12 KB
/
ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
name: Run all checks
on:
pull_request:
branches:
- main
workflow_dispatch:
inputs:
ref:
description: 'The git ref to build the package for'
required: false
default: ''
type: string
use_lkg:
description: 'Whether to use the last known good versions of dependencies'
required: false
default: True
type: boolean
# nightly
schedule:
- cron: '0 0 * * *'
# Only run once per PR, canceling any previous runs
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
# Precompute the ref if the workflow was triggered by a workflow dispatch rather than copying this logic repeatedly
env:
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
jobs:
eval:
name: Evaluate changes
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
name: Checkout repository
with:
ref: ${{ env.ref }}
fetch-depth: 2
# We want to enforce the following rules for PRs:
# * if all modifications are to README.md
# no testing is needed
# * if there are modifications to docs/* or to any code
# then docs need to be built to verify consistency
# * if there are modifications to notebooks/* or to any code
# then notebooks need to be run to verify consistency
# * for any code changes (or changes to metadata files)
# linting and testing should be run
# For a PR build, HEAD will be the merge commit, and we want to diff against the base branch,
# which will be the first parent: HEAD^
# (For non-PR changes, we will always perform all CI tasks)
# Note that GitHub Actions provides path filters, but they operate at the workflow level, not the job level
- run: |
if ($env:GITHUB_EVENT_NAME -eq 'pull_request') {
$editedFiles = git diff HEAD^ --name-only
$editedFiles # echo edited files to enable easier debugging
$codeChanges = $false
$docChanges = $false
$nbChanges = $false
$changeType = "none"
foreach ($file in $editedFiles) {
switch -Wildcard ($file) {
"README.md" { Continue }
".gitignore" { Continue }
"econml/_version.py" { Continue }
"prototypes/*" { Continue }
"images/*" { Continue }
"doc/*" { $docChanges = $true; Continue }
"notebooks/*" { $nbChanges = $true; Continue }
default { $codeChanges = $true; Continue }
}
}
}
echo "buildDocs=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or ($docChanges -or $codeChanges))" >> $env:GITHUB_OUTPUT
echo "buildNbs=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or ($nbChanges -or $codeChanges))" >> $env:GITHUB_OUTPUT
echo "testCode=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or $codeChanges)" >> $env:GITHUB_OUTPUT
shell: pwsh
name: Determine type of code change
id: eval
outputs:
buildDocs: ${{ steps.eval.outputs.buildDocs }}
buildNbs: ${{ steps.eval.outputs.buildNbs }}
testCode: ${{ steps.eval.outputs.testCode }}
lint:
name: Lint code
needs: [eval]
if: ${{ needs.eval.outputs.testCode == 'True' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
name: Checkout repository
with:
ref: ${{ env.ref }}
- uses: actions/setup-python@v4
name: Setup Python
with:
python-version: '3.9'
- run: python -m pip install --upgrade pip && pip install --upgrade setuptools
name: Ensure latest pip and setuptools
- run: 'pip install pycodestyle && pycodestyle econml'
notebooks:
name: Run notebooks
needs: [eval]
if: ${{ needs.eval.outputs.buildNbs == 'True' }}
runs-on: ubuntu-latest
strategy:
matrix:
kind: [except-customer-scenarios, customer-scenarios]
include:
- kind: "except-customer-scenarios"
extras: "[tf,plt]"
pattern: "(?!CustomerScenarios)"
install_graphviz: true
version: '3.8' # no supported version of tensorflow for 3.9
- kind: "customer-scenarios"
extras: "[plt,dowhy]"
pattern: "CustomerScenarios"
version: '3.9'
install_graphviz: false
fail-fast: false
steps:
- uses: actions/checkout@v3
name: Checkout repository
with:
ref: ${{ env.ref }}
- uses: actions/setup-python@v4
name: Setup Python
with:
python-version: ${{ matrix.version }}
- run: python -m pip install --upgrade pip && pip install --upgrade setuptools
name: Ensure latest pip and setuptools
- run: sudo apt-get -yq install graphviz
name: Install graphviz
if: ${{ matrix.install_graphviz }}
# Add verbose flag to pip installation if in debug mode
- run: pip install -e .${{ matrix.extras }} ${{ fromJSON('["","-v"]')[runner.debug] }} ${{ env.use_lkg && '-r lkg-notebook.txt' }}
name: Install econml
# Install notebook requirements (if not already done as part of lkg)
- run: pip install jupyter jupyter-client nbconvert nbformat seaborn xgboost tqdm
name: Install notebook requirements
if: ${{ !env.use_lkg }}
- run: pip freeze --exclude-editable > notebooks-${{ matrix.version }}-${{ matrix.kind }}-requirements.txt
name: Save installed packages
- uses: actions/upload-artifact@v3
name: Upload installed packages
with:
name: requirements
path: notebooks-${{ matrix.version }}-${{ matrix.kind }}-requirements.txt
- run: pip install pytest pytest-runner coverage
name: Install pytest
- run: python setup.py pytest
name: Run notebook tests
id: run_tests
env:
PYTEST_ADDOPTS: '-m "notebook"'
NOTEBOOK_DIR_PATTERN: ${{ matrix.pattern }}
COVERAGE_PROCESS_START: 'setup.cfg'
- run: mv .coverage .coverage.${{ matrix.kind }}
# Run whether or not the tests passed, but only if they ran at all
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
name: Make coverage filename unique
- uses: actions/upload-artifact@v3
name: Upload coverage report
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: coverage
path: .coverage.${{ matrix.kind }}
tests:
name: "Run tests"
needs: [eval]
if: ${{ needs.eval.outputs.testCode == 'True' }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.7', '3.8', '3.9', '3.10']
kind: [serial, other, dml, main, treatment]
exclude:
# Serial tests fail randomly on mac sometimes, so we don't run them there
- os: macos-latest
kind: serial
# Python 3.7 is broken on the mac runner image, see https://github.com/actions/runner-images/issues/7764
- os: macos-latest
python-version: '3.7'
# Assign the correct package and testing options for each kind of test
include:
- kind: serial
opts: '-m "serial" -n 1'
extras: "[tf,plt]"
- kind: other
opts: '-m "cate_api" -n auto'
extras: "[tf,plt]"
- kind: dml
opts: '-m "dml"'
extras: "[tf,plt]"
- kind: main
opts: '-m "not (notebook or automl or dml or serial or cate_api or treatment_featurization)" -n 2'
extras: "[tf,plt,dowhy]"
- kind: treatment
opts: '-m "treatment_featurization" -n auto'
extras: "[tf,plt]"
fail-fast: false
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
name: Checkout repository
with:
ref: ${{ env.ref }}
- uses: actions/setup-python@v4
name: Setup Python
with:
python-version: ${{ matrix.python-version }}
- run: python -m pip install --upgrade pip && pip install --upgrade setuptools
name: Ensure latest pip and setuptools
# Add verbose flag to pip installation if in debug mode
- run: pip install -e .${{ matrix.extras }} ${{ fromJSON('["","-v"]')[runner.debug] }} ${{ env.use_lkg && '-r lkg.txt' }}
name: Install econml
- run: pip freeze --exclude-editable > tests-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}-requirements.txt
name: Save installed packages
- uses: actions/upload-artifact@v3
name: Upload installed packages
with:
name: requirements
path: tests-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}-requirements.txt
- run: pip install pytest pytest-runner coverage
name: Install pytest
- run: python setup.py pytest
name: Run tests
id: run_tests
env:
PYTEST_ADDOPTS: ${{ matrix.opts }}
COVERAGE_PROCESS_START: 'setup.cfg'
- run: mv .coverage .coverage.${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}
# Run whether or not the tests passed, but only if they ran at all
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
name: Make coverage filename unique
- uses: actions/upload-artifact@v3
name: Upload coverage report
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
with:
name: coverage
path: .coverage.${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}
coverage-report:
name: "Coverage report"
needs: [tests, notebooks]
if: success() || failure()
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
name: Checkout repository
with:
ref: ${{ env.ref }}
- uses: actions/download-artifact@v3
name: Get coverage reports
with:
name: coverage
path: coverage
- uses: actions/setup-python@v4
name: Setup Python
with:
python-version: '3.8'
- run: pip install coverage
name: Install coverage
- run: coverage combine coverage/
name: Combine coverage reports
- run: coverage report -m --format=markdown > $GITHUB_STEP_SUMMARY
name: Generate coverage report
- run: coverage html
name: Generate coverage html --fail-under=86
- uses: actions/upload-artifact@v3
name: Upload coverage report
with:
name: coverage
path: htmlcov
build:
name: Build package
needs: [eval]
if: ${{ needs.eval.outputs.testCode == 'True' }}
uses: ./.github/workflows/publish-package.yml
with:
publish: false
repository: testpypi
# don't have access to env context here for some reason
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
docs:
name: Build documentation
needs: [eval]
if: ${{ needs.eval.outputs.buildDocs == 'True' }}
uses: ./.github/workflows/publish-documentation.yml
with:
publish: false
environment: test
# don't have access to env context here for some reason
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
verify:
name: Verify CI checks
needs: [lint, notebooks, tests, build, docs]
if: always()
runs-on: ubuntu-latest
steps:
- run: exit 1
name: At least one check failed or was cancelled
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
- run: exit 0
name: All checks passed
if: ${{ !(contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')) }}