Implement hash encoding
parent
3ca182db50
commit
9c5afcd34d
|
@ -77,11 +77,11 @@ tests = [
|
||||||
{ %([1, 2, 3]), "83 01 02 03" },
|
{ %([1, 2, 3]), "83 01 02 03" },
|
||||||
{ %([1, [2, 3], [4, 5]]), "83 01 82 02 03 82 04 05" },
|
{ %([1, [2, 3], [4, 5]]), "83 01 82 02 03 82 04 05" },
|
||||||
{ %([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), "98 19 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 18 18 19" },
|
{ %([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), "98 19 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 18 18 19" },
|
||||||
# { %({}), "a0" },
|
{ %({}), "a0" },
|
||||||
# { %({1: 2, 3: 4}), "a2 01 02 03 04" },
|
{ %({1: 2, 3: 4}), "a2 01 02 03 04" },
|
||||||
# { %({"a": 1, "b": [2, 3]}), "a2 61 61 01 61 62 82 02 03" },
|
{ %({"a": 1, "b": [2, 3]}), "a2 61 61 01 61 62 82 02 03" },
|
||||||
# { %(["a", {"b": "c"}]), "82 61 61 a1 61 62 61 63" },
|
{ %(["a", {"b": "c"}]), "82 61 61 a1 61 62 61 63" },
|
||||||
# { %({"a": "A", "b": "B", "c": "C", "d": "D", "e": "E"}), "a5 61 61 61 41 61 62 61 42 61 63 61 43 61 64 61 44 61 65 61 45" },
|
{ %({"a": "A", "b": "B", "c": "C", "d": "D", "e": "E"}), "a5 61 61 61 41 61 62 61 42 61 63 61 43 61 64 61 44 61 65 61 45" },
|
||||||
{ %((_ h'0102', h'030405')), "5f 42 01 02 43 03 04 05 ff" },
|
{ %((_ h'0102', h'030405')), "5f 42 01 02 43 03 04 05 ff" },
|
||||||
{ %((_ "strea", "ming")), "7f 65 73 74 72 65 61 64 6d 69 6e 67 ff" },
|
{ %((_ "strea", "ming")), "7f 65 73 74 72 65 61 64 6d 69 6e 67 ff" },
|
||||||
{ %([_ ]), "9f ff" },
|
{ %([_ ]), "9f ff" },
|
||||||
|
@ -90,9 +90,9 @@ tests = [
|
||||||
{ %([1, [2, 3], [_ 4, 5]]), "83 01 82 02 03 9f 04 05 ff" },
|
{ %([1, [2, 3], [_ 4, 5]]), "83 01 82 02 03 9f 04 05 ff" },
|
||||||
{ %([1, [_ 2, 3], [4, 5]]), "83 01 9f 02 03 ff 82 04 05" },
|
{ %([1, [_ 2, 3], [4, 5]]), "83 01 9f 02 03 ff 82 04 05" },
|
||||||
{ %([_ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), "9f 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 18 18 19 ff" },
|
{ %([_ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), "9f 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 18 18 19 ff" },
|
||||||
# { %({_ "a": 1, "b": [_ 2, 3]}), "bf 61 61 01 61 62 9f 02 03 ff ff" },
|
{ %({_ "a": 1, "b": [_ 2, 3]}), "bf 61 61 01 61 62 9f 02 03 ff ff" },
|
||||||
# { %(["a", {_ "b": "c"}]), "82 61 61 bf 61 62 61 63 ff" },
|
{ %(["a", {_ "b": "c"}]), "82 61 61 bf 61 62 61 63 ff" },
|
||||||
# { %({_ "Fun": true, "Amt": -2}), "bf 63 46 75 6e f5 63 41 6d 74 21 ff" },
|
{ %({_ "Fun": true, "Amt": -2}), "bf 63 46 75 6e f5 63 41 6d 74 21 ff" },
|
||||||
]
|
]
|
||||||
|
|
||||||
describe "Examples from RFC7049 Appendix A" do
|
describe "Examples from RFC7049 Appendix A" do
|
||||||
|
|
|
@ -46,6 +46,13 @@ class CBOR::Diagnostic
|
||||||
arr = read_array(token.size)
|
arr = read_array(token.size)
|
||||||
return "[#{arr.join(", ")}]" if token.size
|
return "[#{arr.join(", ")}]" if token.size
|
||||||
"[_ #{arr.join(", ")}]"
|
"[_ #{arr.join(", ")}]"
|
||||||
|
when Token::MapT
|
||||||
|
hash_body = read_hash(token.size).join(", ")
|
||||||
|
return "{#{hash_body}}" if token.size
|
||||||
|
"{_ #{hash_body}}"
|
||||||
|
when Token::BoolT
|
||||||
|
return "true" if token.value
|
||||||
|
"false"
|
||||||
else
|
else
|
||||||
token.inspect
|
token.inspect
|
||||||
end
|
end
|
||||||
|
@ -67,6 +74,24 @@ class CBOR::Diagnostic
|
||||||
arr
|
arr
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Reads the hash, returning an array of key-pairs strings already
|
||||||
|
# correctly formatted in the diagnostic notation
|
||||||
|
private def read_hash(size : Int32?) : Array(String)
|
||||||
|
key_pairs = Array(String).new
|
||||||
|
|
||||||
|
if size
|
||||||
|
size.times { key_pairs << key_value(*@lexer.next_pair) }
|
||||||
|
else
|
||||||
|
@lexer.pairs_until_break { |pairs| key_pairs << key_value(*pairs) }
|
||||||
|
end
|
||||||
|
|
||||||
|
key_pairs
|
||||||
|
end
|
||||||
|
|
||||||
|
private def key_value(key : Token::T, value : Token::T) : String
|
||||||
|
"#{to_diagnostic(key)}: #{to_diagnostic(value)}"
|
||||||
|
end
|
||||||
|
|
||||||
private def chunks(value : Bytes, chunks : Array(Int32)) : Array(Bytes)
|
private def chunks(value : Bytes, chunks : Array(Int32)) : Array(Bytes)
|
||||||
res = Array(Bytes).new
|
res = Array(Bytes).new
|
||||||
bytes = value.to_a
|
bytes = value.to_a
|
||||||
|
|
|
@ -20,6 +20,17 @@ class CBOR::Lexer
|
||||||
decode(byte)
|
decode(byte)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Read the next pair of tokens, useful for maps.
|
||||||
|
# Raises an exception if there are no two pairs left.
|
||||||
|
def next_pair : Tuple(Token::T, Token::T)
|
||||||
|
pairs = Array(Token::T).new(2) do
|
||||||
|
token = next_token
|
||||||
|
raise ParseError.new("Unexpected EOF while reading next pair") unless token
|
||||||
|
token
|
||||||
|
end
|
||||||
|
Tuple.new(pairs[0], pairs[1])
|
||||||
|
end
|
||||||
|
|
||||||
def until_break(&block : Token::T ->)
|
def until_break(&block : Token::T ->)
|
||||||
loop do
|
loop do
|
||||||
byte = next_byte
|
byte = next_byte
|
||||||
|
@ -29,6 +40,23 @@ class CBOR::Lexer
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Read a pair of values until a break is reached
|
||||||
|
def pairs_until_break(&block : Tuple(Token::T, Token::T) ->)
|
||||||
|
loop do
|
||||||
|
key_byte = next_byte
|
||||||
|
raise ParseError.new("Unexpected EOF while searching for break") unless key_byte
|
||||||
|
break if key_byte == BREAK
|
||||||
|
|
||||||
|
key = decode(key_byte)
|
||||||
|
raise ParseError.new("Unexpected EOF while reading key in pairs") unless key
|
||||||
|
|
||||||
|
value = next_token
|
||||||
|
raise ParseError.new("Unexpected EOF while reading value in pairs") unless value
|
||||||
|
|
||||||
|
yield Tuple.new(key, value)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
private def decode(byte : UInt8) : Token::T
|
private def decode(byte : UInt8) : Token::T
|
||||||
case byte
|
case byte
|
||||||
when 0x00..0x1b
|
when 0x00..0x1b
|
||||||
|
@ -47,6 +75,15 @@ class CBOR::Lexer
|
||||||
array_start(read_size(byte - 0x80))
|
array_start(read_size(byte - 0x80))
|
||||||
when 0x9f
|
when 0x9f
|
||||||
Token::ArrayT.new
|
Token::ArrayT.new
|
||||||
|
when 0xa0..0xbb
|
||||||
|
map_start(read_size(byte - 0xa0))
|
||||||
|
when 0xbf
|
||||||
|
Token::MapT.new
|
||||||
|
##################
|
||||||
|
when 0xf4
|
||||||
|
Token::BoolT.new(value: false)
|
||||||
|
when 0xf5
|
||||||
|
Token::BoolT.new(value: true)
|
||||||
else
|
else
|
||||||
raise ParseError.new("Unexpected first byte 0x#{byte.to_s(16)}")
|
raise ParseError.new("Unexpected first byte 0x#{byte.to_s(16)}")
|
||||||
end
|
end
|
||||||
|
@ -128,6 +165,11 @@ class CBOR::Lexer
|
||||||
Token::ArrayT.new(size: size.to_i32)
|
Token::ArrayT.new(size: size.to_i32)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
private def map_start(size)
|
||||||
|
raise ParseError.new("Maximum size for array exeeded") if size > Int32::MAX
|
||||||
|
Token::MapT.new(size: size.to_i32)
|
||||||
|
end
|
||||||
|
|
||||||
# Creates a method overloaded for each UInt sizes to convert the UInt into
|
# Creates a method overloaded for each UInt sizes to convert the UInt into
|
||||||
# the respective Int capable of containing the value
|
# the respective Int capable of containing the value
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue