/* * crypto_test.go - tests for the crypto package * * Copyright 2017 Google Inc. * Author: Joe Richey (joerichey@google.com) * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package crypto import ( "bytes" "compress/zlib" "crypto/aes" "crypto/sha256" "encoding/hex" "fmt" "io" "os" "testing" "github.com/google/fscrypt/metadata" ) // Reader that always returns the same byte type ConstReader byte func (r ConstReader) Read(b []byte) (n int, err error) { for i := range b { b[i] = byte(r) } return len(b), nil } // Makes a key of the same repeating byte func makeKey(b byte, n int) (*Key, error) { return NewFixedLengthKeyFromReader(ConstReader(b), n) } var ( fakeSalt = bytes.Repeat([]byte{'a'}, metadata.SaltLen) fakePassword = []byte("password") fakeValidPolicyKey, _ = makeKey(42, metadata.PolicyKeyLen) fakeWrappingKey, _ = makeKey(17, metadata.InternalKeyLen) ) // As the passphrase hashing function clears the passphrase, we need to make // a new passphrase key for each test func fakePassphraseKey() (*Key, error) { return NewFixedLengthKeyFromReader(bytes.NewReader(fakePassword), len(fakePassword)) } // Values for test cases pulled from argon2 command line tool. // To generate run: // echo "password" | argon2 "aaaaaaaaaaaaaaaa" -id -t -m -p

-l 32 // where costs.Time = , costs.Memory = 2^, and costs.Parallelism =

. type hashTestCase struct { costs *metadata.HashingCosts hexHash string } var hashTestCases = []hashTestCase{ { costs: &metadata.HashingCosts{Time: 1, Memory: 1 << 10, Parallelism: 1}, hexHash: "a66f5398e33761bf161fdf1273e99b148f07d88d12d85b7673fddd723f95ec34", }, // Make sure we maintain our backwards compatible behavior, where // Parallelism is truncated to 8-bits unless TruncationFixed is true. { costs: &metadata.HashingCosts{Time: 1, Memory: 1 << 10, Parallelism: 257}, hexHash: "a66f5398e33761bf161fdf1273e99b148f07d88d12d85b7673fddd723f95ec34", }, { costs: &metadata.HashingCosts{Time: 10, Memory: 1 << 10, Parallelism: 1}, hexHash: "5fa2cb89db1f7413ba1776258b7c8ee8c377d122078d28fe1fd645c353787f50", }, { costs: &metadata.HashingCosts{Time: 1, Memory: 1 << 15, Parallelism: 1}, hexHash: "f474a213ed14d16ead619568000939b938ddfbd2ac4a82d253afa81b5ebaef84", }, { costs: &metadata.HashingCosts{Time: 1, Memory: 1 << 10, Parallelism: 10}, hexHash: "b7c3d7a0be222680b5ea3af3fb1a0b7b02b92cbd7007821dc8b84800c86c7783", }, { costs: &metadata.HashingCosts{Time: 1, Memory: 1 << 11, Parallelism: 255}, hexHash: "d51af3775bbdd0cba31d96fd6d921d9de27f521ceffe667618cd7624f6643071", }, // Adding TruncationFixed shouldn't matter if Parallelism < 256. { costs: &metadata.HashingCosts{Time: 1, Memory: 1 << 11, Parallelism: 255, TruncationFixed: true}, hexHash: "d51af3775bbdd0cba31d96fd6d921d9de27f521ceffe667618cd7624f6643071", }, } // Checks that len(array) == expected func lengthCheck(name string, array []byte, expected int) error { if len(array) != expected { return fmt.Errorf("length of %s should be %d", name, expected) } return nil } // Tests the two ways of making keys func TestMakeKeys(t *testing.T) { data := []byte("1234\n6789") key1, err := NewKeyFromReader(bytes.NewReader(data)) if err != nil { t.Fatal(err) } defer key1.Wipe() if !bytes.Equal(data, key1.data) { t.Error("Key from reader contained incorrect data") } key2, err := NewFixedLengthKeyFromReader(bytes.NewReader(data), 6) if err != nil { t.Fatal(err) } defer key2.Wipe() if !bytes.Equal([]byte("1234\n6"), key2.data) { t.Error("Fixed length key from reader contained incorrect data") } } // Tests that wipe succeeds func TestWipe(t *testing.T) { key, err := makeKey(1, 1000) if err != nil { t.Fatal(err) } if err := key.Wipe(); err != nil { t.Error(err) } } // Making keys with negative length should fail func TestInvalidLength(t *testing.T) { key, err := NewFixedLengthKeyFromReader(ConstReader(1), -1) if err == nil { key.Wipe() t.Error("Negative lengths should cause failure") } } // Test making keys of zero length func TestZeroLength(t *testing.T) { key1, err := NewFixedLengthKeyFromReader(os.Stdin, 0) if err != nil { t.Fatal(err) } defer key1.Wipe() if key1.data != nil { t.Error("Fixed length key from reader contained data") } key2, err := NewKeyFromReader(bytes.NewReader(nil)) if err != nil { t.Fatal(err) } defer key2.Wipe() if key2.data != nil { t.Error("Key from empty reader contained data") } } // Test that enabling then disabling memory locking succeeds even if a key is // active when the variable changes. func TestEnableDisableMemoryLocking(t *testing.T) { // Mlock on for creation, off for wiping key, err := NewRandomKey(metadata.InternalKeyLen) UseMlock = false defer func() { UseMlock = true }() if err != nil { t.Fatal(err) } if err := key.Wipe(); err != nil { t.Error(err) } } // Test that disabling then enabling memory locking succeeds even if a key is // active when the variable changes. func TestDisableEnableMemoryLocking(t *testing.T) { // Mlock off for creation, on for wiping UseMlock = false key2, err := NewRandomKey(metadata.InternalKeyLen) UseMlock = true if err != nil { t.Fatal(err) } if err := key2.Wipe(); err != nil { t.Error(err) } } // Test making keys long enough that the keys will have to resize func TestKeyResize(t *testing.T) { // Key will have to resize once r := io.LimitReader(ConstReader(1), int64(os.Getpagesize())+1) key, err := NewKeyFromReader(r) if err != nil { t.Fatal(err) } defer key.Wipe() for i, b := range key.data { if b != 1 { t.Fatalf("Byte %d contained invalid data %q", i, b) } } } // Test making keys so long that many resizes are necessary func TestKeyLargeResize(t *testing.T) { // Key will have to resize 7 times r := io.LimitReader(ConstReader(1), int64(os.Getpagesize())*65) // Turn off Mlocking as the key will exceed the limit on some systems. UseMlock = false key, err := NewKeyFromReader(r) UseMlock = true if err != nil { t.Fatal(err) } defer key.Wipe() for i, b := range key.data { if b != 1 { t.Fatalf("Byte %d contained invalid data %q", i, b) } } } // Check that we can create random keys. All this test does to test the // "randomness" is generate a page of random bytes and attempts compression. // If the data can be compressed it is probably not very random. This isn't // intended to be a sufficient test for randomness (which is impossible), but a // way to catch simple regressions (key is all zeros or contains a repeating // pattern). func TestRandomKeyGen(t *testing.T) { key, err := NewRandomKey(os.Getpagesize()) if err != nil { t.Fatal(err) } defer key.Wipe() if didCompress(key.data) { t.Errorf("Random key (%d bytes) should not be compressible", key.Len()) } } func TestBigKeyGen(t *testing.T) { key, err := NewRandomKey(4096 * 4096) switch err { case nil: key.Wipe() return case ErrMlockUlimit: // Don't fail just because "ulimit -l" is too low. return default: t.Fatal(err) } } // didCompress checks if the given data can be compressed. Specifically, it // returns true if running zlib on the provided input produces a shorter output. func didCompress(input []byte) bool { var output bytes.Buffer w := zlib.NewWriter(&output) _, err := w.Write(input) w.Close() return err == nil && len(input) > output.Len() } // Checks that the input arrays are all distinct func buffersDistinct(buffers ...[]byte) bool { for i := 0; i < len(buffers); i++ { for j := i + 1; j < len(buffers); j++ { if bytes.Equal(buffers[i], buffers[j]) { // Different entry, but equal arrays return false } } } return true } // Checks that our cryptographic operations all produce distinct data func TestKeysAndOutputsDistinct(t *testing.T) { data, err := Wrap(fakeWrappingKey, fakeValidPolicyKey) if err != nil { t.Fatal(err) } encKey, authKey := stretchKey(fakeWrappingKey) defer encKey.Wipe() defer authKey.Wipe() if !buffersDistinct(fakeWrappingKey.data, fakeValidPolicyKey.data, encKey.data, authKey.data, data.IV, data.EncryptedKey, data.Hmac) { t.Error("Key wrapping produced duplicate data") } } // Check that Wrap() works with fixed keys func TestWrapSucceeds(t *testing.T) { data, err := Wrap(fakeWrappingKey, fakeValidPolicyKey) if err != nil { t.Fatal(err) } if err = lengthCheck("IV", data.IV, aes.BlockSize); err != nil { t.Error(err) } if err = lengthCheck("Encrypted Key", data.EncryptedKey, metadata.PolicyKeyLen); err != nil { t.Error(err) } if err = lengthCheck("HMAC", data.Hmac, sha256.Size); err != nil { t.Error(err) } } // Checks that applying Wrap then Unwrap gives the original data func testWrapUnwrapEqual(wrappingKey *Key, secretKey *Key) error { data, err := Wrap(wrappingKey, secretKey) if err != nil { return err } secret, err := Unwrap(wrappingKey, data) if err != nil { return err } defer secret.Wipe() if !bytes.Equal(secretKey.data, secret.data) { return fmt.Errorf("Got %x after wrap/unwrap with w=%x and s=%x", secret.data, wrappingKey.data, secretKey.data) } return nil } // Check that Unwrap(Wrap(x)) == x with fixed keys func TestWrapUnwrapEqual(t *testing.T) { if err := testWrapUnwrapEqual(fakeWrappingKey, fakeValidPolicyKey); err != nil { t.Error(err) } } // Check that Unwrap(Wrap(x)) == x with random keys func TestRandomWrapUnwrapEqual(t *testing.T) { for i := 0; i < 10; i++ { wk, err := NewRandomKey(metadata.InternalKeyLen) if err != nil { t.Fatal(err) } sk, err := NewRandomKey(metadata.InternalKeyLen) if err != nil { t.Fatal(err) } if err = testWrapUnwrapEqual(wk, sk); err != nil { t.Error(err) } wk.Wipe() sk.Wipe() } } // Check that Unwrap(Wrap(x)) == x with differing lengths of secret key func TestDifferentLengthSecretKey(t *testing.T) { wk, err := makeKey(1, metadata.InternalKeyLen) if err != nil { t.Fatal(err) } defer wk.Wipe() for i := 0; i < 100; i++ { sk, err := makeKey(2, i) if err != nil { t.Fatal(err) } if err = testWrapUnwrapEqual(wk, sk); err != nil { t.Error(err) } sk.Wipe() } } // Wrong length of wrapping key should fail func TestWrongWrappingKeyLength(t *testing.T) { _, err := Wrap(fakeValidPolicyKey, fakeWrappingKey) if err == nil { t.Fatal("using a policy key for wrapping should fail") } } // Wrong length of unwrapping key should fail func TestWrongUnwrappingKeyLength(t *testing.T) { data, err := Wrap(fakeWrappingKey, fakeWrappingKey) if err != nil { t.Fatal(err) } if k, err := Unwrap(fakeValidPolicyKey, data); err == nil { k.Wipe() t.Fatal("using a policy key for unwrapping should fail") } } // Wrapping twice with the same keys should give different components func TestWrapTwiceDistinct(t *testing.T) { data1, err := Wrap(fakeWrappingKey, fakeValidPolicyKey) if err != nil { t.Fatal(err) } data2, err := Wrap(fakeWrappingKey, fakeValidPolicyKey) if err != nil { t.Fatal(err) } if !buffersDistinct(data1.IV, data1.EncryptedKey, data1.Hmac, data2.IV, data2.EncryptedKey, data2.Hmac) { t.Error("Wrapping same keys twice should give distinct results") } } // Attempts to Unwrap data with key after altering tweak, should fail func testFailWithTweak(key *Key, data *metadata.WrappedKeyData, tweak []byte) error { tweak[0]++ key, err := Unwrap(key, data) if err == nil { key.Wipe() } tweak[0]-- return err } // Wrapping then unwrapping with different components altered func TestUnwrapWrongKey(t *testing.T) { data, err := Wrap(fakeWrappingKey, fakeValidPolicyKey) if err != nil { t.Fatal(err) } if testFailWithTweak(fakeWrappingKey, data, fakeWrappingKey.data) == nil { t.Error("using a different wrapping key should make unwrap fail") } } func TestUnwrapWrongData(t *testing.T) { data, err := Wrap(fakeWrappingKey, fakeValidPolicyKey) if err != nil { t.Fatal(err) } if testFailWithTweak(fakeWrappingKey, data, data.EncryptedKey) == nil { t.Error("changing encryption key should make unwrap fail") } if testFailWithTweak(fakeWrappingKey, data, data.IV) == nil { t.Error("changing IV should make unwrap fail") } if testFailWithTweak(fakeWrappingKey, data, data.Hmac) == nil { t.Error("changing HMAC should make unwrap fail") } } func TestComputeKeyDescriptorV1(t *testing.T) { descriptor, err := ComputeKeyDescriptor(fakeValidPolicyKey, 1) if err != nil { t.Fatal(err) } if descriptor != "8290608a029c5aae" { t.Errorf("wrong v1 descriptor: %s", descriptor) } } func TestComputeKeyDescriptorV2(t *testing.T) { descriptor, err := ComputeKeyDescriptor(fakeValidPolicyKey, 2) if err != nil { t.Fatal(err) } if descriptor != "2139f52bf8386ee99845818ac7e91c4a" { t.Errorf("wrong v2 descriptor: %s", descriptor) } } func TestComputeKeyDescriptorBadVersion(t *testing.T) { _, err := ComputeKeyDescriptor(fakeValidPolicyKey, 0) if err == nil { t.Error("computing key descriptor with bad version should fail") } } // Run our test cases for passphrase hashing func TestPassphraseHashing(t *testing.T) { pk, err := fakePassphraseKey() if err != nil { t.Fatal(err) } defer pk.Wipe() for i, testCase := range hashTestCases { if err := testCase.costs.CheckValidity(); err != nil { t.Errorf("Hash test %d: for costs=%+v hashing failed: %v", i, testCase.costs, err) continue } hash, err := PassphraseHash(pk, fakeSalt, testCase.costs) if err != nil { t.Errorf("Hash test %d: for costs=%+v hashing failed: %v", i, testCase.costs, err) continue } defer hash.Wipe() actual := hex.EncodeToString(hash.data) if actual != testCase.hexHash { t.Errorf("Hash test %d: for costs=%+v expected hash of %q got %q", i, testCase.costs, testCase.hexHash, actual) } } } var badCosts = []*metadata.HashingCosts{ // Bad Time costs {Time: 0, Memory: 1 << 11, Parallelism: 1}, {Time: 1 << 33, Memory: 1 << 11, Parallelism: 1}, // Bad Memory costs {Time: 1, Memory: 5, Parallelism: 1}, {Time: 1, Memory: 1 << 33, Parallelism: 1}, // Bad Parallelism costs {Time: 1, Memory: 1 << 11, Parallelism: 0, TruncationFixed: false}, {Time: 1, Memory: 1 << 11, Parallelism: 0, TruncationFixed: true}, {Time: 1, Memory: 1 << 11, Parallelism: 256, TruncationFixed: false}, {Time: 1, Memory: 1 << 11, Parallelism: 256, TruncationFixed: true}, {Time: 1, Memory: 1 << 11, Parallelism: 257, TruncationFixed: true}, } func TestBadParameters(t *testing.T) { for i, costs := range badCosts { if costs.CheckValidity() == nil { t.Errorf("Hash test %d: expected error for costs=%+v", i, costs) } } } func BenchmarkWrap(b *testing.B) { for n := 0; n < b.N; n++ { Wrap(fakeWrappingKey, fakeValidPolicyKey) } } func BenchmarkUnwrap(b *testing.B) { b.StopTimer() data, _ := Wrap(fakeWrappingKey, fakeValidPolicyKey) b.StartTimer() for n := 0; n < b.N; n++ { key, err := Unwrap(fakeWrappingKey, data) if err != nil { b.Fatal(err) } key.Wipe() } } func BenchmarkUnwrapNolock(b *testing.B) { b.StopTimer() UseMlock = false defer func() { UseMlock = true }() data, _ := Wrap(fakeWrappingKey, fakeValidPolicyKey) b.StartTimer() for n := 0; n < b.N; n++ { key, err := Unwrap(fakeWrappingKey, data) if err != nil { b.Fatal(err) } key.Wipe() } } func BenchmarkRandomWrapUnwrap(b *testing.B) { for n := 0; n < b.N; n++ { wk, _ := NewRandomKey(metadata.InternalKeyLen) sk, _ := NewRandomKey(metadata.InternalKeyLen) testWrapUnwrapEqual(wk, sk) // Must manually call wipe here, or test will use too much memory. wk.Wipe() sk.Wipe() } } func benchmarkPassphraseHashing(b *testing.B, costs *metadata.HashingCosts) { b.StopTimer() pk, err := fakePassphraseKey() if err != nil { b.Fatal(err) } defer pk.Wipe() b.StartTimer() for n := 0; n < b.N; n++ { hash, err := PassphraseHash(pk, fakeSalt, costs) hash.Wipe() if err != nil { b.Fatal(err) } } } func BenchmarkPassphraseHashing_1MB_1Thread(b *testing.B) { benchmarkPassphraseHashing(b, &metadata.HashingCosts{Time: 1, Memory: 1 << 10, Parallelism: 1}) } func BenchmarkPassphraseHashing_1GB_1Thread(b *testing.B) { benchmarkPassphraseHashing(b, &metadata.HashingCosts{Time: 1, Memory: 1 << 20, Parallelism: 1}) } func BenchmarkPassphraseHashing_128MB_1Thread(b *testing.B) { benchmarkPassphraseHashing(b, &metadata.HashingCosts{Time: 1, Memory: 1 << 17, Parallelism: 1}) } func BenchmarkPassphraseHashing_128MB_8Thread(b *testing.B) { benchmarkPassphraseHashing(b, &metadata.HashingCosts{Time: 1, Memory: 1 << 17, Parallelism: 8}) } func BenchmarkPassphraseHashing_128MB_8Pass(b *testing.B) { benchmarkPassphraseHashing(b, &metadata.HashingCosts{Time: 8, Memory: 1 << 17, Parallelism: 1}) }