Implement hash and set

dev
Alberto Restifo 2020-04-25 17:27:01 +02:00
parent 74c71a57c3
commit 6a70810667
3 changed files with 67 additions and 41 deletions

View File

@ -21,14 +21,17 @@ describe "CBOR helpers on basic types" do
{Nil, Bytes[0xf7], nil}, {Nil, Bytes[0xf7], nil},
{Float32, Bytes[0xfb, 0x3f, 0xf1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a], 1.1_f32}, {Float32, Bytes[0xfb, 0x3f, 0xf1, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a], 1.1_f32},
{Float64, Bytes[0xfa, 0x47, 0xc3, 0x50, 0x00], 100000.0_f64}, {Float64, Bytes[0xfa, 0x47, 0xc3, 0x50, 0x00], 100000.0_f64},
{Set(Int8), Bytes[0x83, 0x01, 0x02, 0x03], Set(Int8){1, 2, 3}},
{Array(Int8), Bytes[0x83, 0x01, 0x02, 0x03], [1_i8, 2_i8, 3_i8]}, {Array(Int8), Bytes[0x83, 0x01, 0x02, 0x03], [1_i8, 2_i8, 3_i8]},
{Array(Array(Int8) | Int8), # {Array(Array(Int8) | Int8),
Bytes[0x83, 0x01, 0x82, 0x02, 0x03, 0x82, 0x04, 0x05], # Bytes[0x83, 0x01, 0x82, 0x02, 0x03, 0x82, 0x04, 0x05],
[1_i8, [2_i8, 3_i8], [4_i8, 5_i8]]}, # [1_i8, [2_i8, 3_i8], [4_i8, 5_i8]]},
{Array(UInt8), Bytes[0x9f, 0xff], [] of UInt8}, {Array(UInt8), Bytes[0x9f, 0xff], [] of UInt8},
# {Array(Array(Int8) | Int8), # {Array(Array(Int8) | Int8),
# Bytes[0x9f, 0x01, 0x82, 0x02, 0x03, 0x9f, 0x04, 0x05, 0xff, 0xff], # Bytes[0x9f, 0x01, 0x82, 0x02, 0x03, 0x9f, 0x04, 0x05, 0xff, 0xff],
# [1_i8, [2_i8, 3_i8], [4_i8, 5_i8]]}, # [1_i8, [2_i8, 3_i8], [4_i8, 5_i8]]},
{Hash(UInt8, UInt8), Bytes[0xa0], {} of UInt8 => UInt8},
{Hash(UInt8, UInt8), Bytes[0xa2, 0x01, 0x02, 0x03, 0x04], Hash(UInt8, UInt8){1 => 2, 3 => 4}},
] ]
tests.each do |tt| tests.each do |tt|

View File

@ -76,12 +76,19 @@ class CBOR::Decoder
end end
end end
def consume_hash(&block)
read_type(Token::MapT, finish_token: false) do |token|
read(token.size) { yield }
end
end
private def finish_token! private def finish_token!
@current_token = @lexer.next_token @current_token = @lexer.next_token
end end
def read(size : Int32?, &block) private def read(size : Int32?, &block)
if size if size
finish_token!
size.times { yield } size.times { yield }
else else
@lexer.until_break do |token| @lexer.until_break do |token|

View File

@ -45,6 +45,21 @@ def Array.new(decoder : CBOR::Decoder)
arr arr
end end
def Set.new(decoder : CBOR::Decoder)
set = new
decoder.consume_array { set << T.new(decoder) }
set
end
def Hash.new(decoder : CBOR::Decoder)
hash = new
decoder.consume_hash do
k = K.new(decoder)
hash[k] = V.new(decoder)
end
hash
end
# Reads the CBOR values as a time. The value must be surrounded by a time tag as # Reads the CBOR values as a time. The value must be surrounded by a time tag as
# specified by [Section 2.4.1 of RFC 7049][1]. # specified by [Section 2.4.1 of RFC 7049][1].
# #
@ -66,50 +81,51 @@ def Time.new(decoder : CBOR::Decoder)
end end
def Union.new(decoder : CBOR::Decoder) def Union.new(decoder : CBOR::Decoder)
token = decoder.current_token
# Optimization: use fast path for primitive types
{% begin %} {% begin %}
# Here we store types that are not primitive types case decoder.current_token
{% non_primitives = [] of Nil %} {% if T.includes? Nil %}
when CBOR::Token::SimpleValueT
return decoder.read_nil
{% end %}
{% if T.includes? Bool %}
when CBOR::Token::BoolT
return decoder.read_bool
{% end %}
{% if T.includes? String %}
when CBOR::Token::StringT
return decoder.read_string
{% end %}
when CBOR::Token::IntT
{% type_order = [Int64, UInt64, Int32, UInt32, Int16, UInt16, Int8, UInt8, Float64, Float32] %}
{% for type in type_order.select { |t| T.includes? t } %}
return {{type}}.new(decoder)
{% end %}
when CBOR::Token::FloatT
{% type_order = [Float64, Float32] %}
{% for type in type_order.select { |t| T.includes? t } %}
return {{type}}.new(decoder)
{% end %}
end
{% end %}
{% for type, index in T %} {% begin %}
{% if type == Nil %} {% primitive_types = [Nil, Bool, String] + Number::Primitive.union_types %}
return decoder.read_nil if token.is_a?(CBOR::Token::SimpleValueT) {% non_primitives = T.reject { |t| primitive_types.includes? t } %}
{% elsif type == Bool %}
return decoder.read_bool if token.is_a?(CBOR::Token::SimpleValueT)
{% elsif type == String %}
return decoder.read_string if token.is_a?(CBOR::Token::StringT)
{% elsif type == Int8 || type == Int16 || type == Int32 || type == Int64 ||
type == UInt8 || type == UInt16 || type == UInt32 || type == UInt64 %}
return {{type}}.new(decoder) if token.is_a?(CBOR::Token::IntT)
{% elsif type == Float32 || type == Float64 %}
return {{type}}.new(decoder) if token.is_a?(CBOR::Token::FloatT)
{% unless T.any? { |t| t < Int } %}
return {{type}}.new(decoder) if token.is_a?(CBOR::Token::IntT)
{% end %}
{% else %}
{% non_primitives << type %}
{% end %}
{% end %}
# If after traversing all the types we are left with just one # If after traversing all the types we are left with just one
# non-primitive type, we can parse it directly (no need to use `read_raw`) # non-primitive type, we can parse it directly (no need to use `read_raw`)
{% if non_primitives.size == 1 %} {% if non_primitives.size == 1 %}
return {{non_primitives[0]}}.new(decoder) return {{non_primitives[0]}}.new(decoder)
{% else %} {% else %}
raise "What is this?" string = pull.read_raw
# node = decoder.read_node {% for type in non_primitives %}
# {% for type in non_primitives %} begin
# unpacker = CBOR::NodeUnpacker.new(node) return {{type}}.from_json(string)
# begin rescue CBOR::ParseError
# return {{type}}.new(unpacker) # Ignore
# rescue e : CBOR::TypeCastError end
# # ignore {% end %}
# end raise CBOR::ParseError.new("Couldn't parse #{self} from #{string}", *location)
# {% end %} {% end %}
# {% end %}
{% end %} {% end %}
raise CBOR::ParseError.new("Couldn't parse data as " + {{T.stringify}})
end end