WIP: enable to clear the cache of storage and triggers.
This commit is contained in:
parent
5602a6a7ef
commit
cd1ff4de79
9 changed files with 123 additions and 6 deletions
|
|
@ -120,4 +120,69 @@ describe "SPECDB::Common" do
|
|||
index4_color.data.size.should eq 1 # trigger cache: 1, the color "red"
|
||||
index4_color.data["red"].size.should eq 3 # 3 values in "red"
|
||||
end
|
||||
|
||||
it "testing caches: add some values, clear cache, check the values" do
|
||||
car0 = Car.new "Corvet-0", "red", [] of String
|
||||
car1 = Car.new "Corvet-1", "red", [] of String
|
||||
car2 = Car.new "Corvet-2", "blue", [] of String
|
||||
car3 = Car.new "Corvet-3", "violet", [] of String
|
||||
|
||||
db = SPECDB::Common(Car).new "reindex", 2
|
||||
index_name = db.new_index "name", &.name
|
||||
index_color = db.new_partition "color", &.color
|
||||
|
||||
db << car0
|
||||
|
||||
# First entry, each cache should have one item.
|
||||
index_name.data.size.should eq 1
|
||||
index_color.data.size.should eq 1
|
||||
|
||||
db << car1
|
||||
db << car2
|
||||
db << car3
|
||||
|
||||
# Storage cache should only have the maximum number of allowed entries (2).
|
||||
db.data.keys.size.should eq 2
|
||||
|
||||
# Trigger caches don't have limits on the number of entries, therefore they should have:
|
||||
# 4 entries for the name index (each entry = 1 car).
|
||||
index_name.data.size.should eq 4
|
||||
# 3 entries for the color index (each entry = 1 color).
|
||||
index_color.data.size.should eq 3
|
||||
|
||||
# All cached entries are dropped.
|
||||
db.clear_cache!
|
||||
|
||||
# Caches should be empty.
|
||||
db.data.keys.sort.should eq([] of Int32)
|
||||
index_name.data.size.should eq 0
|
||||
index_color.data.size.should eq 0
|
||||
|
||||
# Get a car from stored data based on its name.
|
||||
index_name.get?("Corvet-2").should eq car2
|
||||
index_name.data.size.should eq 1
|
||||
# This doesn't fill up the cache in the color index.
|
||||
index_color.data.size.should eq 0
|
||||
|
||||
# Get a car from stored data based on its color.
|
||||
index_color.get?("violet").size.should eq 1
|
||||
|
||||
# The "Storage" database should have both found items in cache.
|
||||
db.data.keys.sort.should eq([2, 3] of Int32)
|
||||
|
||||
# Trigger caches should have a single value given that we searched a single item through each one.
|
||||
index_name.data.size.should eq 1
|
||||
index_color.data.size.should eq 1
|
||||
|
||||
# Loop over entries, filling up the cache for Storage.
|
||||
total_values = 0
|
||||
db.each_with_key do |v, k|
|
||||
total_values += 1
|
||||
end
|
||||
total_values.should eq 4
|
||||
|
||||
# Caches should still have a single entry since we only searched through them only once.
|
||||
index_name.data.size.should eq 1
|
||||
index_color.data.size.should eq 1
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -350,10 +350,26 @@ abstract class DODB::Storage(V)
|
|||
end
|
||||
end
|
||||
|
||||
private def clear_storage_cache!
|
||||
puts "DODB::Storage(V) clear_storage_cache! (no cache)"
|
||||
# There's no cache by default.
|
||||
# This function has to be changed in storage implementations.
|
||||
end
|
||||
|
||||
# NOTE: clears all caches (`storage` and `triggers`).
|
||||
def clear_cache!
|
||||
clear_storage_cache!
|
||||
@triggers.each do |trigger|
|
||||
trigger.clear_cache!
|
||||
end
|
||||
end
|
||||
|
||||
# Removes all indices and then rewrites them all.
|
||||
#
|
||||
# WARNING: slow operation.
|
||||
# NOTE: clears all caches (`storage` and `triggers`).
|
||||
def reindex_everything!
|
||||
clear_cache!
|
||||
nuke_triggers!
|
||||
|
||||
each_with_key() do |item, key|
|
||||
|
|
|
|||
|
|
@ -52,6 +52,11 @@ class DODB::Storage::Cached(V) < DODB::Storage(V)
|
|||
end
|
||||
end
|
||||
|
||||
private def clear_storage_cache!
|
||||
puts "DODB::Storage::Cached(V) clear_storage_cache!"
|
||||
data.clear
|
||||
end
|
||||
|
||||
# Gets the data with the *key*.
|
||||
# In case the data is missing, returns an exception `DODB::MissingEntry`.
|
||||
#
|
||||
|
|
|
|||
|
|
@ -46,6 +46,12 @@ class DODB::Storage::Common(V) < DODB::Storage::Cached(V)
|
|||
@cached_last_key = init_last_key
|
||||
end
|
||||
|
||||
private def clear_storage_cache!
|
||||
puts "DODB::Storage::Common(V) clear_storage_cache!"
|
||||
data.clear
|
||||
@lru = EfficientLRU(Int32).new lru.max_entries
|
||||
end
|
||||
|
||||
# Verifies that the value is in cache, or read it on disk.
|
||||
# Pushes the key in the lru.
|
||||
def [](key : Int32) : V
|
||||
|
|
|
|||
|
|
@ -29,9 +29,16 @@ abstract class DODB::Trigger(V)
|
|||
# NOTE: used for internal operations.
|
||||
abstract def trigger_directory : String
|
||||
|
||||
# Removes all cached values.
|
||||
def clear_cache!
|
||||
puts "DODB::Trigger(V) no cache"
|
||||
# By default, there is no cache.
|
||||
end
|
||||
|
||||
# Removes all the index entries, removes the `#trigger_directory` by default.
|
||||
#
|
||||
# NOTE: used for internal operations.
|
||||
# NOTE: automatically clears cache (for triggers that do have a cache).
|
||||
def nuke_trigger
|
||||
FileUtils.rm_rf trigger_directory
|
||||
end
|
||||
|
|
|
|||
|
|
@ -344,10 +344,15 @@ class DODB::Trigger::IndexCached(V) < DODB::Trigger::Index(V)
|
|||
end
|
||||
end
|
||||
|
||||
def clear_cache!
|
||||
puts "DODB::Trigger::IndexCached(V) clear_cache!"
|
||||
data.clear
|
||||
end
|
||||
|
||||
# Clears the cache and removes the `#trigger_directory`.
|
||||
def nuke_trigger
|
||||
super
|
||||
data.clear
|
||||
clear_cache!
|
||||
end
|
||||
|
||||
# Indexes the value on the file-system as `DODB::Trigger::Index#index` but also puts the index in a cache.
|
||||
|
|
@ -436,6 +441,6 @@ class DODB::Trigger::IndexRAMOnly(V) < DODB::Trigger::IndexCached(V)
|
|||
|
||||
# Clears the index.
|
||||
def nuke_trigger
|
||||
data.clear
|
||||
clear_cache!
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -260,6 +260,11 @@ class DODB::Trigger::PartitionCached(V) < DODB::Trigger::Partition(V)
|
|||
@data[partition] = array
|
||||
end
|
||||
|
||||
def clear_cache!
|
||||
puts "DODB::Trigger::PartitionCached(V) clear_cache!"
|
||||
data.clear
|
||||
end
|
||||
|
||||
# Removes the index of a value on the file-system as `DODB::Trigger::Partition#deindex` but also from
|
||||
# the cache, used for **internal operations**.
|
||||
#
|
||||
|
|
@ -309,7 +314,7 @@ class DODB::Trigger::PartitionCached(V) < DODB::Trigger::Partition(V)
|
|||
# Clears the cache and removes the `#trigger_directory`.
|
||||
def nuke_trigger
|
||||
super
|
||||
data.clear
|
||||
clear_cache!
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -379,6 +384,6 @@ class DODB::Trigger::PartitionRAMOnly(V) < DODB::Trigger::PartitionCached(V)
|
|||
|
||||
# Clears the cache.
|
||||
def nuke_trigger
|
||||
data.clear
|
||||
clear_cache!
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -300,6 +300,11 @@ class DODB::Trigger::TagsCached(V) < DODB::Trigger::Tags(V)
|
|||
end
|
||||
end
|
||||
|
||||
def clear_cache!
|
||||
puts "DODB::Trigger::TagsCached(V) clear_cache!"
|
||||
data.clear
|
||||
end
|
||||
|
||||
# :inherit:
|
||||
# TODO: in case the tag is left empty, should it be removed from the cache?
|
||||
def deindex(key : String, value : V)
|
||||
|
|
@ -347,7 +352,7 @@ class DODB::Trigger::TagsCached(V) < DODB::Trigger::Tags(V)
|
|||
# Clears the cache and removes the `#trigger_directory`.
|
||||
def nuke_trigger
|
||||
super
|
||||
data.clear
|
||||
clear_cache!
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -421,6 +426,6 @@ class DODB::Trigger::TagsRAMOnly(V) < DODB::Trigger::TagsCached(V)
|
|||
|
||||
# Clears the cache.
|
||||
def nuke_trigger
|
||||
data.clear
|
||||
clear_cache!
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -88,6 +88,9 @@ class EfficientLRU(V)
|
|||
property list : DoubleLinkedList(V)
|
||||
property hash : Hash(V, DoubleLinkedList::Node(V))
|
||||
|
||||
# Maximum allowed entries in the structure.
|
||||
property max_entries : UInt32
|
||||
|
||||
def initialize(@max_entries : UInt32)
|
||||
@list = DoubleLinkedList(V).new
|
||||
@hash = Hash(V, DoubleLinkedList::Node(V)).new
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue