Add support for DNSSEC types from RFC4034 (#38)

* Add support for DNSSEC types from RFC4034

* Fix bug with wrong signature length decoded

* Sort RRTypes in the Readme, and add new ones

* Add support for nsec3
This commit is contained in:
Nick Johnson 2018-06-01 08:25:07 +01:00 committed by silverwind
parent eddb7ab7e4
commit 4e540801ab
5 changed files with 598 additions and 65 deletions

189
README.md
View File

@ -146,15 +146,72 @@ Currently the different available records are
} }
``` ```
#### `TXT` #### `CAA`
``` js ``` js
{ {
data: 'text' || Buffer || [ Buffer || 'text' ] flags: 128, // octet
tag: 'issue|issuewild|iodef',
value: 'ca.example.net'
} }
``` ```
When encoding, scalar values are converted to an array and strings are converted to UTF-8 encoded Buffers. When decoding, the return value will always be an array of Buffer. #### `CNAME`
``` js
{
data: 'cname.to.another.record'
}
```
#### `DNAME`
``` js
{
data: 'dname.to.another.record'
}
```
#### `DNSKEY`
``` js
{
flags: 257, // 16 bits
algorithm: 1, // octet
key: buffer
}
```
#### `DS`
``` js
{
keyTag: 12345,
algorithm: 8,
digestType: 1,
digest: buffer
}
```
#### `HINFO`
``` js
{
data: {
cpu: 'cpu info',
os: 'os info'
}
}
```
#### `MX`
``` js
{
preference: 10,
exchange: 'mail.example.net'
}
```
#### `NS` #### `NS`
@ -164,6 +221,28 @@ When encoding, scalar values are converted to an array and strings are converted
} }
``` ```
#### `NSEC`
``` js
{
nextDomain: 'a.domain',
rrtypes: ['A', 'TXT', 'RRSIG']
}
```
#### `NSEC3`
``` js
{
algorithm: 1,
flags: 0,
iterations: 2,
salt: buffer,
nextDomain: buffer, // Hashed per RFC5155
rrtypes: ['A', 'TXT', 'RRSIG']
}
```
#### `NULL` #### `NULL`
``` js ``` js
@ -172,6 +251,45 @@ When encoding, scalar values are converted to an array and strings are converted
} }
``` ```
#### `OPT`
``` js
{
type: 'OPT',
name: '.',
udpPayloadSize: 4096,
flags: packet.DNSSEC_OK,
options: [{
code: 12,
data: Buffer.alloc(31)
}]
}
```
#### `PTR`
``` js
{
data: 'points.to.another.record'
}
```
#### `RRSIG`
``` js
{
typeCovered: 'A',
algorithm: 8,
labels: 1,
originalTTL: 3600,
expiration: timestamp,
inception: timestamp,
keyTag: 12345,
signersName: 'a.name',
signature: buffer
}
```
#### `SOA` #### `SOA`
``` js ``` js
@ -202,74 +320,15 @@ When encoding, scalar values are converted to an array and strings are converted
} }
``` ```
#### `HINFO` #### `TXT`
``` js ``` js
{ {
data: { data: 'text' || Buffer || [ Buffer || 'text' ]
cpu: 'cpu info',
os: 'os info'
}
} }
``` ```
#### `PTR` When encoding, scalar values are converted to an array and strings are converted to UTF-8 encoded Buffers. When decoding, the return value will always be an array of Buffer.
``` js
{
data: 'points.to.another.record'
}
```
#### `CNAME`
``` js
{
data: 'cname.to.another.record'
}
```
#### `DNAME`
``` js
{
data: 'dname.to.another.record'
}
```
#### `CAA`
``` js
{
flags: 128, // octet
tag: 'issue|issuewild|iodef',
value: 'ca.example.net'
}
```
#### `MX`
``` js
{
preference: 10,
exchange: 'mail.example.net'
}
```
#### `OPT`
``` js
{
type: 'OPT',
name: '.',
udpPayloadSize: 4096,
flags: packet.DNSSEC_OK,
options: [{
code: 12,
data: Buffer.alloc(31)
}]
}
```
If you need another one, open an issue and we'll try to add it. If you need another one, open an issue and we'll try to add it.

397
index.js
View File

@ -743,6 +743,398 @@ ropt.encodingLength = function (options) {
return 2 + encodingLengthList(options || [], roption) return 2 + encodingLengthList(options || [], roption)
} }
const rdnskey = exports.dnskey = {}
rdnskey.PROTOCOL_DNSSEC = 3
rdnskey.ZONE_KEY = 0x80
rdnskey.SECURE_ENTRYPOINT = 0x8000
rdnskey.encode = function (key, buf, offset) {
if (!buf) buf = Buffer.allocUnsafe(rdnskey.encodingLength(key))
if (!offset) offset = 0
const oldOffset = offset
const keydata = key.key
if (!Buffer.isBuffer(keydata)) {
throw new Error('Key must be a Buffer')
}
offset += 2 // Leave space for length
buf.writeUInt16BE(key.flags, offset)
offset += 2
buf.writeUInt8(rdnskey.PROTOCOL_DNSSEC, offset)
offset += 1
buf.writeUInt8(key.algorithm, offset)
offset += 1
keydata.copy(buf, offset, 0, keydata.length)
offset += keydata.length
rdnskey.encode.bytes = offset - oldOffset
buf.writeUInt16BE(rdnskey.encode.bytes - 2, oldOffset)
return buf
}
rdnskey.encode.bytes = 0
rdnskey.decode = function (buf, offset) {
if (!offset) offset = 0
const oldOffset = offset
var key = {}
var length = buf.readUInt16BE(offset)
offset += 2
key.flags = buf.readUInt16BE(offset)
offset += 2
if (buf.readUInt8(offset) !== rdnskey.PROTOCOL_DNSSEC) {
throw new Error('Protocol must be 3')
}
offset += 1
key.algorithm = buf.readUInt8(offset)
offset += 1
key.key = buf.slice(offset, oldOffset + length + 2)
offset += key.key.length
rdnskey.decode.bytes = offset - oldOffset
return key
}
rdnskey.decode.bytes = 0
rdnskey.encodingLength = function (key) {
return 6 + Buffer.byteLength(key.key)
}
const rrrsig = exports.rrsig = {}
rrrsig.encode = function (sig, buf, offset) {
if (!buf) buf = Buffer.allocUnsafe(rrrsig.encodingLength(sig))
if (!offset) offset = 0
const oldOffset = offset
const signature = sig.signature
if (!Buffer.isBuffer(signature)) {
throw new Error('Signature must be a Buffer')
}
offset += 2 // Leave space for length
buf.writeUInt16BE(types.toType(sig.typeCovered), offset)
offset += 2
buf.writeUInt8(sig.algorithm, offset)
offset += 1
buf.writeUInt8(sig.labels, offset)
offset += 1
buf.writeUInt32BE(sig.originalTTL, offset)
offset += 4
buf.writeUInt32BE(sig.expiration, offset)
offset += 4
buf.writeUInt32BE(sig.inception, offset)
offset += 4
buf.writeUInt16BE(sig.keyTag, offset)
offset += 2
name.encode(sig.signersName, buf, offset)
offset += name.encode.bytes
signature.copy(buf, offset, 0, signature.length)
offset += signature.length
rrrsig.encode.bytes = offset - oldOffset
buf.writeUInt16BE(rrrsig.encode.bytes - 2, oldOffset)
return buf
}
rrrsig.encode.bytes = 0
rrrsig.decode = function (buf, offset) {
if (!offset) offset = 0
const oldOffset = offset
var sig = {}
var length = buf.readUInt16BE(offset)
offset += 2
sig.typeCovered = types.toString(buf.readUInt16BE(offset))
offset += 2
sig.algorithm = buf.readUInt8(offset)
offset += 1
sig.labels = buf.readUInt8(offset)
offset += 1
sig.originalTTL = buf.readUInt32BE(offset)
offset += 4
sig.expiration = buf.readUInt32BE(offset)
offset += 4
sig.inception = buf.readUInt32BE(offset)
offset += 4
sig.keyTag = buf.readUInt16BE(offset)
offset += 2
sig.signersName = name.decode(buf, offset)
offset += name.decode.bytes
sig.signature = buf.slice(offset, oldOffset + length + 2)
offset += sig.signature.length
rrrsig.decode.bytes = offset - oldOffset
return sig
}
rrrsig.decode.bytes = 0
rrrsig.encodingLength = function (sig) {
return 20 +
name.encodingLength(sig.signersName) +
Buffer.byteLength(sig.signature)
}
const typebitmap = {}
typebitmap.encode = function (typelist, buf, offset) {
if (!buf) buf = Buffer.allocUnsafe(typebitmap.encodingLength(typelist))
if (!offset) offset = 0
const oldOffset = offset
var typesByWindow = []
for (var i = 0; i < typelist.length; i++) {
var typeid = types.toType(typelist[i])
if (typesByWindow[typeid >> 8] === undefined) {
typesByWindow[typeid >> 8] = []
}
typesByWindow[typeid >> 8][(typeid >> 3) & 0x1F] |= 1 << (7 - (typeid & 0x7))
}
for (i = 0; i < typesByWindow.length; i++) {
if (typesByWindow[i] !== undefined) {
var windowBuf = Buffer.from(typesByWindow[i])
buf.writeUInt8(i, offset)
offset += 1
buf.writeUInt8(windowBuf.length, offset)
offset += 1
windowBuf.copy(buf, offset)
offset += windowBuf.length
}
}
typebitmap.encode.bytes = offset - oldOffset
return buf
}
typebitmap.encode.bytes = 0
typebitmap.decode = function (buf, offset, length) {
if (!offset) offset = 0
const oldOffset = offset
var typelist = []
while (offset - oldOffset < length) {
var window = buf.readUInt8(offset)
offset += 1
var windowLength = buf.readUInt8(offset)
offset += 1
for (var i = 0; i < windowLength; i++) {
var b = buf.readUInt8(offset + i)
for (var j = 0; j < 8; j++) {
if (b & (1 << (7 - j))) {
var typeid = types.toString((window << 8) | (i << 3) | j)
typelist.push(typeid)
}
}
}
offset += windowLength
}
typebitmap.decode.bytes = offset - oldOffset
return typelist
}
typebitmap.decode.bytes = 0
typebitmap.encodingLength = function (typelist) {
var extents = []
for (var i = 0; i < typelist.length; i++) {
var typeid = types.toType(typelist[i])
extents[typeid >> 8] |= Math.max(extents[typeid >> 8] || 0, typeid & 0xFF)
}
var len = 0
for (i = 0; i < extents.length; i++) {
if (extents[i] !== undefined) {
len += 2 + Math.ceil((extents[i] + 1) / 8)
}
}
return len
}
const rnsec = exports.nsec = {}
rnsec.encode = function (record, buf, offset) {
if (!buf) buf = Buffer.allocUnsafe(rnsec.encodingLength(record))
if (!offset) offset = 0
const oldOffset = offset
offset += 2 // Leave space for length
name.encode(record.nextDomain, buf, offset)
offset += name.encode.bytes
typebitmap.encode(record.rrtypes, buf, offset)
offset += typebitmap.encode.bytes
rnsec.encode.bytes = offset - oldOffset
buf.writeUInt16BE(rnsec.encode.bytes - 2, oldOffset)
return buf
}
rnsec.encode.bytes = 0
rnsec.decode = function (buf, offset) {
if (!offset) offset = 0
const oldOffset = offset
var record = {}
var length = buf.readUInt16BE(offset)
offset += 2
record.nextDomain = name.decode(buf, offset)
offset += name.decode.bytes
record.rrtypes = typebitmap.decode(buf, offset, length - (offset - oldOffset))
offset += typebitmap.decode.bytes
rnsec.decode.bytes = offset - oldOffset
return record
}
rnsec.decode.bytes = 0
rnsec.encodingLength = function (record) {
return 2 +
name.encodingLength(record.nextDomain) +
typebitmap.encodingLength(record.rrtypes)
}
const rnsec3 = exports.nsec3 = {}
rnsec3.encode = function (record, buf, offset) {
if (!buf) buf = Buffer.allocUnsafe(rnsec3.encodingLength(record))
if (!offset) offset = 0
const oldOffset = offset
const salt = record.salt
if (!Buffer.isBuffer(salt)) {
throw new Error('salt must be a Buffer')
}
const nextDomain = record.nextDomain
if (!Buffer.isBuffer(nextDomain)) {
throw new Error('nextDomain must be a Buffer')
}
offset += 2 // Leave space for length
buf.writeUInt8(record.algorithm, offset)
offset += 1
buf.writeUInt8(record.flags, offset)
offset += 1
buf.writeUInt16BE(record.iterations, offset)
offset += 2
buf.writeUInt8(salt.length, offset)
offset += 1
salt.copy(buf, offset, 0, salt.length)
offset += salt.length
buf.writeUInt8(nextDomain.length, offset)
offset += 1
nextDomain.copy(buf, offset, 0, nextDomain.length)
offset += nextDomain.length
typebitmap.encode(record.rrtypes, buf, offset)
offset += typebitmap.encode.bytes
rnsec3.encode.bytes = offset - oldOffset
buf.writeUInt16BE(rnsec3.encode.bytes - 2, oldOffset)
return buf
}
rnsec3.encode.bytes = 0
rnsec3.decode = function (buf, offset) {
if (!offset) offset = 0
const oldOffset = offset
var record = {}
var length = buf.readUInt16BE(offset)
offset += 2
record.algorithm = buf.readUInt8(offset)
offset += 1
record.flags = buf.readUInt8(offset)
offset += 1
record.iterations = buf.readUInt16BE(offset)
offset += 2
const saltLength = buf.readUInt8(offset)
offset += 1
record.salt = buf.slice(offset, offset + saltLength)
offset += saltLength
const hashLength = buf.readUInt8(offset)
offset += 1
record.nextDomain = buf.slice(offset, offset + hashLength)
offset += hashLength
record.rrtypes = typebitmap.decode(buf, offset, length - (offset - oldOffset))
offset += typebitmap.decode.bytes
rnsec3.decode.bytes = offset - oldOffset
return record
}
rnsec3.decode.bytes = 0
rnsec3.encodingLength = function (record) {
return 8 +
record.salt.length +
record.nextDomain.length +
typebitmap.encodingLength(record.rrtypes)
}
const rds = exports.ds = {}
rds.encode = function (digest, buf, offset) {
if (!buf) buf = Buffer.allocUnsafe(rds.encodingLength(digest))
if (!offset) offset = 0
const oldOffset = offset
const digestdata = digest.digest
if (!Buffer.isBuffer(digestdata)) {
throw new Error('Digest must be a Buffer')
}
offset += 2 // Leave space for length
buf.writeUInt16BE(digest.keyTag, offset)
offset += 2
buf.writeUInt8(digest.algorithm, offset)
offset += 1
buf.writeUInt8(digest.digestType, offset)
offset += 1
digestdata.copy(buf, offset, 0, digestdata.length)
offset += digestdata.length
rds.encode.bytes = offset - oldOffset
buf.writeUInt16BE(rds.encode.bytes - 2, oldOffset)
return buf
}
rds.encode.bytes = 0
rds.decode = function (buf, offset) {
if (!offset) offset = 0
const oldOffset = offset
var digest = {}
var length = buf.readUInt16BE(offset)
offset += 2
digest.keyTag = buf.readUInt16BE(offset)
offset += 2
digest.algorithm = buf.readUInt8(offset)
offset += 1
digest.digestType = buf.readUInt8(offset)
offset += 1
digest.digest = buf.slice(offset, oldOffset + length + 2)
offset += digest.digest.length
rds.decode.bytes = offset - oldOffset
return digest
}
rds.decode.bytes = 0
rds.encodingLength = function (digest) {
return 6 + Buffer.byteLength(digest.digest)
}
const renc = exports.record = function (type) { const renc = exports.record = function (type) {
switch (type.toUpperCase()) { switch (type.toUpperCase()) {
case 'A': return ra case 'A': return ra
@ -759,6 +1151,11 @@ const renc = exports.record = function (type) {
case 'SOA': return rsoa case 'SOA': return rsoa
case 'MX': return rmx case 'MX': return rmx
case 'OPT': return ropt case 'OPT': return ropt
case 'DNSKEY': return rdnskey
case 'RRSIG': return rrrsig
case 'NSEC': return rnsec
case 'NSEC3': return rnsec3
case 'DS': return rds
} }
return runknown return runknown
} }

View File

@ -13,6 +13,7 @@
"test": "eslint --color *.js && tape test.js" "test": "eslint --color *.js && tape test.js"
}, },
"dependencies": { "dependencies": {
"axios": "^0.18.0",
"ip": "^1.1.5", "ip": "^1.1.5",
"safe-buffer": "^5.1.1" "safe-buffer": "^5.1.1"
}, },

75
test.js
View File

@ -359,6 +359,81 @@ tape('opt', function (t) {
t.end() t.end()
}) })
tape('dnskey', function (t) {
testEncoder(t, packet.dnskey, {
flags: packet.dnskey.SECURE_ENTRYPOINT | packet.dnskey.ZONE_KEY,
algorithm: 1,
key: Buffer.from([0, 1, 2, 3, 4, 5])
})
t.end()
})
tape('rrsig', function (t) {
const testRRSIG = {
typeCovered: 'A',
algorithm: 1,
labels: 2,
originalTTL: 3600,
expiration: 1234,
inception: 1233,
keyTag: 2345,
signersName: 'foo.com',
signature: Buffer.from([0, 1, 2, 3, 4, 5])
}
testEncoder(t, packet.rrsig, testRRSIG)
// Check the signature length is correct with extra junk at the end
const buf = Buffer.allocUnsafe(packet.rrsig.encodingLength(testRRSIG) + 4)
packet.rrsig.encode(testRRSIG, buf)
const val2 = packet.rrsig.decode(buf)
t.ok(compare(t, testRRSIG, val2))
t.end()
})
tape('nsec', function (t) {
testEncoder(t, packet.nsec, {
nextDomain: 'foo.com',
rrtypes: ['A', 'DNSKEY', 'CAA', 'DLV']
})
// Test with the sample NSEC from https://tools.ietf.org/html/rfc4034#section-4.3
var sampleNSEC = Buffer.from('003704686f7374076578616d706c6503636f6d00' +
'0006400100000003041b000000000000000000000000000000000000000000000' +
'000000020', 'hex')
var decoded = packet.nsec.decode(sampleNSEC)
t.ok(compare(t, decoded, {
nextDomain: 'host.example.com',
rrtypes: ['A', 'MX', 'RRSIG', 'NSEC', 'UNKNOWN_1234']
}))
var reencoded = packet.nsec.encode(decoded)
t.same(sampleNSEC.length, reencoded.length)
t.same(sampleNSEC, reencoded)
t.end()
})
tape('nsec3', function (t) {
testEncoder(t, packet.nsec3, {
algorithm: 1,
flags: 0,
iterations: 257,
salt: new Buffer([42, 42, 42]),
nextDomain: new Buffer([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
rrtypes: ['A', 'DNSKEY', 'CAA', 'DLV']
})
t.end()
})
tape('ds', function (t) {
testEncoder(t, packet.ds, {
keyTag: 1234,
algorithm: 1,
digestType: 1,
digest: Buffer.from([0, 1, 2, 3, 4, 5])
})
t.end()
})
tape('unpack', function (t) { tape('unpack', function (t) {
const buf = Buffer.from([ const buf = Buffer.from([
0x00, 0x79, 0x00, 0x79,

View File

@ -98,5 +98,6 @@ exports.toType = function (name) {
case 'ANY': return 255 case 'ANY': return 255
case '*': return 255 case '*': return 255
} }
if (name.toUpperCase().startsWith('UNKNOWN_')) return parseInt(name.slice(8))
return 0 return 0
} }