@@ -30,16 +30,16 @@ module.exports.insert = insert
30
30
function insert ( cache , key , digest , opts ) {
31
31
opts = opts || { }
32
32
const bucket = bucketPath ( cache , key )
33
+ const entry = {
34
+ key : key ,
35
+ digest : digest ,
36
+ hashAlgorithm : opts . hashAlgorithm || 'sha512' ,
37
+ time : + ( new Date ( ) ) ,
38
+ metadata : opts . metadata
39
+ }
33
40
return fixOwner . mkdirfix (
34
41
path . dirname ( bucket ) , opts . uid , opts . gid
35
42
) . then ( ( ) => {
36
- const entry = {
37
- key : key ,
38
- digest : digest ,
39
- hashAlgorithm : opts . hashAlgorithm || 'sha512' ,
40
- time : + ( new Date ( ) ) ,
41
- metadata : opts . metadata
42
- }
43
43
const stringified = JSON . stringify ( entry )
44
44
// NOTE - Cleverness ahoy!
45
45
//
@@ -50,12 +50,18 @@ function insert (cache, key, digest, opts) {
50
50
// Thanks to @isaacs for the whiteboarding session that ended up with this.
51
51
return appendFileAsync (
52
52
bucket , `\n${ hashEntry ( stringified ) } \t${ stringified } `
53
- ) . then ( ( ) => entry )
54
- } ) . then ( entry => (
55
- fixOwner . chownr ( bucket , opts . uid , opts . gid ) . then ( ( ) => (
56
- formatEntry ( cache , entry )
57
- ) )
58
- ) )
53
+ )
54
+ } ) . then (
55
+ ( ) => fixOwner . chownr ( bucket , opts . uid , opts . gid )
56
+ ) . catch ( { code : 'ENOENT' } , ( ) => {
57
+ // There's a class of race conditions that happen when things get deleted
58
+ // during fixOwner, or between the two mkdirfix/chownr calls.
59
+ //
60
+ // It's perfectly fine to just not bother in those cases and lie
61
+ // that the index entry was written. Because it's a cache.
62
+ } ) . then ( ( ) => {
63
+ return formatEntry ( cache , entry )
64
+ } )
59
65
}
60
66
61
67
module . exports . find = find
0 commit comments