-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathterraform.go
294 lines (250 loc) · 8.22 KB
/
terraform.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
package pkg
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"os"
"text/template"
"github.com/app-sre/terraform-repo-executor/pkg/vaultutil"
"github.com/hashicorp/terraform-exec/tfexec"
)
// TfCreds is made up of AWS credentials and configuration for using an S3 backend with Terraform
type TfCreds struct {
AccessKey string
SecretKey string
Region string
Key string // set when initializing backend
Bucket string
}
// standardized AppSRE terraform secret keys
const (
AwsAccessKeyID = "aws_access_key_id"
AwsSecretAccessKey = "aws_secret_access_key"
AwsRegion = "region"
AwsBucket = "bucket"
)
func extractTfCreds(secret vaultutil.VaultKvData, repo Repo) (TfCreds, error) {
secretKeys := []string{AwsAccessKeyID, AwsSecretAccessKey}
errStr := "Required terraform key `%s` missing from Vault secret."
// handle cases where a bucket & region is already defined for the AWS account via terraform-state-1.yml
if len(repo.Bucket) > 0 && len(repo.Region) > 0 {
for _, key := range secretKeys {
if secret[key] == nil {
return TfCreds{}, fmt.Errorf(errStr, key)
}
}
return TfCreds{
AccessKey: secret[AwsAccessKeyID].(string),
SecretKey: secret[AwsSecretAccessKey].(string),
Bucket: repo.Bucket,
Region: repo.Region,
}, nil
}
secretKeys = append(secretKeys, []string{AwsBucket, AwsRegion}...)
for _, key := range secretKeys {
if secret[key] == nil {
return TfCreds{}, fmt.Errorf(errStr, key)
}
}
return TfCreds{
AccessKey: secret[AwsAccessKeyID].(string),
SecretKey: secret[AwsSecretAccessKey].(string),
Bucket: secret[AwsBucket].(string),
Region: secret[AwsRegion].(string),
}, nil
}
// terraform specific filenames
// the "auto" vars files will automatically be loaded by the tf binary
const (
AWSVarsFile = "aws.auto.tfvars"
InputVarsFile = "input.auto.tfvars"
BackendFile = "s3.tfbackend"
)
// generates a .tfbackend file to be utilized as partial backend config input file
// the generated backend file will provide credentials for an s3 backend config
func (e *Executor) generateBackendFile(creds TfCreds, repo Repo) error {
backendTemplate := `access_key = "{{.AccessKey}}"
{{- "\n"}}secret_key = "{{.SecretKey}}"
{{- "\n"}}region = "{{.Region}}"
{{- "\n"}}key = "{{.Key}}"
{{- "\n"}}bucket = "{{.Bucket}}"`
err := WriteTemplate(creds, backendTemplate, fmt.Sprintf("%s/%s/%s/%s", e.workdir, repo.Name, repo.Path, BackendFile))
if err != nil {
return err
}
return nil
}
// TfVars are secrets and IDs required for setting up a Terraform S3 backend
type TfVars struct {
AccessKey string
SecretKey string
Region string
VaultAddress string
VaultRoleID string
VaultSecretID string
}
// generates a .tfvars file including Vault & S3 backend credentials
// TODO: add test case around this function
func (e *Executor) generateCredVarsFile(creds TfCreds, repo Repo) error {
// first create a *.tfvars file for S3 backend credentials
body := `access_key = "{{.AccessKey}}"
{{- "\n"}}secret_key = "{{.SecretKey}}"
{{- "\n"}}region = "{{.Region}}"
{{- "\n"}}vault_addr = "{{.VaultAddress}}"
{{- "\n"}}vault_role_id = "{{.VaultRoleID}}"
{{- "\n"}}vault_secret_id = "{{.VaultSecretID}}"`
tfVars := TfVars{
AccessKey: creds.AccessKey,
SecretKey: creds.SecretKey,
Region: creds.Region,
VaultAddress: e.vaultAddr,
VaultRoleID: e.vaultRoleID,
VaultSecretID: e.vaultSecretID,
}
err := WriteTemplate(tfVars, body, fmt.Sprintf("%s/%s/%s/%s", e.workdir, repo.Name, repo.Path, AWSVarsFile))
if err != nil {
return err
}
return nil
}
// generates a .tfvars file including input variables from Vault
func (e *Executor) generateInputVarsFile(data vaultutil.VaultKvData, repo Repo) error {
body := `{{ range $k, $v := . }}{{ $k }} = "{{ $v }}"{{- "\n"}}{{ end }}`
err := WriteTemplate(data, body, fmt.Sprintf("%s/%s/%s/%s", e.workdir, repo.Name, repo.Path, InputVarsFile))
if err != nil {
return err
}
return nil
}
// WriteTemplate is responsible for templating a file and writing it to the location specified at out
// note that this is not a struct method as generics are incompatible with methods
func WriteTemplate[T TfVars | vaultutil.VaultKvData | TfCreds | StateVars](inputs T, body string, out string) error {
tmpl, err := template.New(out).Parse(body)
if err != nil {
return err
}
f, err := os.Create(out)
if err != nil {
return err
}
defer f.Close()
err = tmpl.Execute(f, inputs)
if err != nil {
return err
}
return nil
}
// checks the generated terraform plan file to ensure that the fips endpoint is enabled in the AWS provider configuration
func (e *Executor) fipsComplianceCheck(repo Repo, planFile string, tf *tfexec.Terraform) error {
out, err := tf.ShowPlanFile(context.Background(), planFile)
if err != nil {
log.Println("Unable to determine FIPS compatibility")
return err
}
compliant := false
for _, provider := range out.Config.ProviderConfigs {
if provider.Name == "aws" {
for k, v := range provider.Expressions {
if k == "use_fips_endpoint" && v.ConstantValue == true {
compliant = true
}
}
}
}
if !compliant {
return fmt.Errorf("repository '%s' is not using 'use_fips_endpoint = true' for the AWS provider despite the repo requiring fips", repo.Name)
}
return nil
}
// performs a terraform show without the `-json` flag to workaround the fact that the tfexec package
// only supports outputting the state as JSON which exposes sensitive values
func (e *Executor) showRaw(dir string, tfBinaryLocation string) (string, error) {
out, err := executeCommand(dir, tfBinaryLocation, []string{"show"})
if err != nil {
return "", err
}
return out, err
}
// performs a terraform plan and then apply if not running in dry run mode
// additionally captures any tf outputs if necessary
func (e *Executor) processTfPlan(repo Repo, dryRun bool, envVars map[string]string) (map[string]tfexec.OutputMeta, error) {
dir := fmt.Sprintf("%s/%s/%s", e.workdir, repo.Name, repo.Path)
// each repo can use a different version of the TF binary, specified in App Interface
tfBinaryLocation := fmt.Sprintf("/usr/bin/Terraform/%s/terraform", repo.TfVersion)
tf, err := tfexec.NewTerraform(dir, tfBinaryLocation)
if err != nil {
return nil, err
}
log.Printf("Initializing terraform config for %s\n", repo.Name)
err = tf.Init(
context.Background(),
tfexec.BackendConfig(BackendFile),
)
if err != nil {
return nil, err
}
var stdout, stderr, blackhole bytes.Buffer
tf.SetStdout(&stdout)
tf.SetStderr(&stderr)
// supply aws access key, secret key variables to the terraform executable for remote_backend_state
err = tf.SetEnv(envVars)
if err != nil {
return nil, err
}
planFile := fmt.Sprintf("%s/%s-plan", e.workdir, repo.Name)
var output map[string]tfexec.OutputMeta
if dryRun {
log.Printf("Performing terraform plan for %s", repo.Name)
_, err = tf.Plan(
context.Background(),
tfexec.Destroy(repo.Delete),
tfexec.Out(planFile), // this plan file will be useful to have in a later improvement as well
)
} else {
// tf.exec.Destroy flag cannot be passed to tf.Apply in same fashion as above Plan() logic
if repo.Delete {
log.Printf("Performing terraform destroy for %s", repo.Name)
err = tf.Destroy(
context.Background(),
)
} else {
log.Printf("Performing terraform apply for %s", repo.Name)
err = tf.Apply(
context.Background(),
)
if repo.TfVariables.Outputs.Path != "" {
log.Printf("Capturing Output values to save to %s in Vault", repo.TfVariables.Outputs.Path)
// don't log the results of `terraform output -json` as that can leak sensitive credentials
tf.SetStdout(&blackhole)
tf.SetStderr(&blackhole)
output, err = tf.Output(
context.Background(),
)
}
}
}
if err != nil {
return nil, errors.New(stderr.String())
}
if !dryRun {
rawState, err := e.showRaw(dir, tfBinaryLocation)
if err != nil {
return nil, err
}
err = e.commitAndPushState(repo, rawState)
if err != nil {
log.Printf("Unable to commit state file to Git, error: %s", err)
}
}
log.Printf("Output for %s\n", repo.Name)
log.Println(RemoveUndeclaredWarnings(stdout.String()))
if repo.RequireFips && dryRun {
err = e.fipsComplianceCheck(repo, planFile, tf)
if err != nil {
return nil, err
}
}
return output, nil
}