From a2d80ce2367c3fa5c8db74cfc6b7f9c841e0a07e Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Thu, 6 Jun 2024 12:48:08 -0700 Subject: [PATCH 1/9] Implement Postgres DB operations with SQL errors Write an initial implementation of PG DB operations that compile. The goal here is to port the existing SQLite operations into the correct types/DSL of `postgresql-simple` without worrying about whether or not there are SQL incompatibilities with Postgres. --- kupo.cabal | 208 +++-- src/Kupo/App/Database.hs | 2 +- src/Kupo/App/Database/Postgres.hs | 1172 ++++++++------------------- src/Kupo/App/Database/SQLite.hs | 27 +- src/Kupo/App/Database/Types.hs | 23 +- test/Test/Kupo/Data/DatabaseSpec.hs | 39 +- 6 files changed, 534 insertions(+), 937 deletions(-) diff --git a/kupo.cabal b/kupo.cabal index d9c4a19..28a3187 100644 --- a/kupo.cabal +++ b/kupo.cabal @@ -164,74 +164,144 @@ library TypeSynonymInstances ViewPatterns ghc-options: -Wall -Wcompat -Widentities -Wincomplete-record-updates -Wincomplete-uni-patterns -Wpartial-fields -Wredundant-constraints -fno-warn-unticked-promoted-constructors -fno-warn-partial-fields - build-depends: - aeson - , attoparsec-aeson - , base >=4.7 && <5 - , base16 - , base58-bytestring - , base64 - , bech32 - , bech32-th - , binary - , bytestring - , cardano-crypto-class - , cardano-crypto-wrapper - , cardano-ledger-allegra - , cardano-ledger-alonzo - , cardano-ledger-api - , cardano-ledger-babbage - , cardano-ledger-binary - , cardano-ledger-byron - , cardano-ledger-conway - , cardano-ledger-core - , cardano-ledger-mary - , cardano-ledger-shelley - , cardano-slotting - , cardano-strict-containers - , cborg - , containers - , contra-tracer - , contra-tracers - , directory - , fast-bech32 - , file-embed - , filepath - , generic-lens - , http-client - , http-types - , io-classes - , lens - , lens-aeson - , modern-uri - , network-mux - , optparse-applicative - , ouroboros-consensus - , ouroboros-consensus-cardano - , ouroboros-consensus-diffusion - , ouroboros-network - , ouroboros-network-api - , ouroboros-network-framework - , ouroboros-network-protocols - , prometheus - , relude - , resource-pool - , safe - , safe-exceptions - , scientific - , sqlite-simple - , template-haskell - , text - , text-ansi - , time - , transformers - , typed-protocols - , unix - , wai - , warp - , websockets - , websockets-json - , yaml + if flag(postgres) + build-depends: + aeson + , attoparsec-aeson + , base >=4.7 && <5 + , base16 + , base58-bytestring + , base64 + , bech32 + , bech32-th + , binary + , bytestring + , cardano-crypto-class + , cardano-crypto-wrapper + , cardano-ledger-allegra + , cardano-ledger-alonzo + , cardano-ledger-api + , cardano-ledger-babbage + , cardano-ledger-binary + , cardano-ledger-byron + , cardano-ledger-conway + , cardano-ledger-core + , cardano-ledger-mary + , cardano-ledger-shelley + , cardano-slotting + , cardano-strict-containers + , cborg + , containers + , contra-tracer + , contra-tracers + , directory + , fast-bech32 + , file-embed + , filepath + , generic-lens + , http-client + , http-types + , io-classes + , lens + , lens-aeson + , modern-uri + , network-mux + , optparse-applicative + , ouroboros-consensus + , ouroboros-consensus-cardano + , ouroboros-consensus-diffusion + , ouroboros-network + , ouroboros-network-api + , ouroboros-network-framework + , ouroboros-network-protocols + , postgresql-simple + , prometheus + , relude + , resource-pool + , safe + , safe-exceptions + , scientific + , template-haskell + , text + , text-ansi + , time + , transformers + , typed-protocols + , unix + , wai + , warp + , websockets + , websockets-json + , yaml + else + build-depends: + aeson + , attoparsec-aeson + , base >=4.7 && <5 + , base16 + , base58-bytestring + , base64 + , bech32 + , bech32-th + , binary + , bytestring + , cardano-crypto-class + , cardano-crypto-wrapper + , cardano-ledger-allegra + , cardano-ledger-alonzo + , cardano-ledger-api + , cardano-ledger-babbage + , cardano-ledger-binary + , cardano-ledger-byron + , cardano-ledger-conway + , cardano-ledger-core + , cardano-ledger-mary + , cardano-ledger-shelley + , cardano-slotting + , cardano-strict-containers + , cborg + , containers + , contra-tracer + , contra-tracers + , directory + , fast-bech32 + , file-embed + , filepath + , generic-lens + , http-client + , http-types + , io-classes + , lens + , lens-aeson + , modern-uri + , network-mux + , optparse-applicative + , ouroboros-consensus + , ouroboros-consensus-cardano + , ouroboros-consensus-diffusion + , ouroboros-network + , ouroboros-network-api + , ouroboros-network-framework + , ouroboros-network-protocols + , prometheus + , relude + , resource-pool + , safe + , safe-exceptions + , scientific + , sqlite-simple + , template-haskell + , text + , text-ansi + , time + , transformers + , typed-protocols + , unix + , wai + , warp + , websockets + , websockets-json + , yaml default-language: Haskell2010 if flag(postgres) other-modules: @@ -362,6 +432,8 @@ test-suite unit TypeSynonymInstances ViewPatterns ghc-options: -Wall -Wcompat -Widentities -Wincomplete-record-updates -Wincomplete-uni-patterns -Wpartial-fields -Wredundant-constraints -Wunused-packages -threaded -rtsopts -with-rtsopts=-N + if flag(postgres) + cpp-options: -Dpostgres build-tool-depends: hspec-discover:hspec-discover build-depends: diff --git a/src/Kupo/App/Database.hs b/src/Kupo/App/Database.hs index 6409ce6..fbcb627 100644 --- a/src/Kupo/App/Database.hs +++ b/src/Kupo/App/Database.hs @@ -7,7 +7,7 @@ {-# LANGUAGE DuplicateRecordFields #-} module Kupo.App.Database - ( + ( -- // TODO: Fix documentation headers -- ** Queries -- *** Inputs deleteInputsQry diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index 6c6b554..a7d393e 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -4,16 +4,15 @@ {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE DuplicateRecordFields #-} -{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE TemplateHaskell #-} module Kupo.App.Database.Postgres ( - + -- // TODO: Fix documentation headers -- ** Queries -- *** Inputs - deleteInputsQry + deleteInputsQry , markInputsQry , pruneInputsQry , foldInputsQry @@ -50,66 +49,34 @@ import Kupo.Prelude import Control.Exception ( IOException , handle - , mask - , onException , throwIO ) +import Control.Monad + ( foldM + ) import Control.Tracer ( Tracer , traceWith ) -import Data.FileEmbed - ( embedFile - ) -import Data.Scientific - ( scientific - ) -import Data.Text.Lazy.Builder.Scientific - ( FPFormat (Fixed) - , formatScientificBuilder +import qualified Data.Char as Char +import qualified Data.Set as Set + ( map ) -import Database.SQLite.Simple +import Database.PostgreSQL.Simple ( Connection - , Error (..) , Only (..) - , Query (..) - , SQLData (..) - , SQLError (..) - , SQLOpenFlag (..) - , ToRow (..) - , changes + , Query , execute + , executeMany , execute_ - , fold_ - , nextRow + , query , query_ - , totalChanges - , withConnection' - , withStatement ) +import qualified Database.PostgreSQL.Simple as PG import GHC.TypeLits ( KnownSymbol , symbolVal ) -import Kupo.Control.MonadAsync - ( concurrently_ - ) -import Kupo.Control.MonadCatch - ( catch - ) -import Kupo.Control.MonadDelay - ( threadDelay - ) -import Kupo.Control.MonadSTM - ( MonadSTM (..) - ) -import Kupo.Control.MonadThrow - ( bracket - , bracket_ - ) -import Kupo.Control.MonadTime - ( DiffTime - ) import Kupo.Data.Cardano ( SlotNo (..) , slotNoToText @@ -137,21 +104,6 @@ import Kupo.Data.Pattern import Numeric ( Floating (..) ) -import System.Directory - ( Permissions (..) - , copyFile - , createDirectoryIfMissing - , doesFileExist - , getCurrentDirectory - , getPermissions - , removePathForcibly - ) -import System.FilePath - ( () - ) -import System.IO.Error - ( isAlreadyExistsError - ) import Control.Concurrent ( getNumCapabilities @@ -164,6 +116,8 @@ import Data.Pool , tryWithResource , withResource ) +import qualified Data.Text as T +import qualified Data.Text.Lazy.Builder as TL import Kupo.App.Database.Types ( ConnectionType (..) , DBPool (..) @@ -173,286 +127,71 @@ import Kupo.App.Database.Types ) import Kupo.Control.MonadLog ( TraceProgress (..) - , nullTracer ) -import Text.URI - ( URI - ) - -import qualified Data.Char as Char -import qualified Data.Text as T -import qualified Data.Text.Lazy.Builder as T -import qualified Data.Text.Lazy.Builder as TL -import qualified Database.SQLite.Simple as Sqlite import qualified Kupo.Data.Configuration as Configuration import qualified Kupo.Data.Database as DB -data DatabaseFile = OnDisk !FilePath | InMemory !(Maybe FilePath) - deriving (Generic, Eq, Show) - -data NewDatabaseFileException - = FailedToAccessOrCreateDatabaseFile { reason :: FailedToCreateDatabaseFileReason } - deriving (Show) - -instance Exception NewDatabaseFileException - -data FailedToCreateDatabaseFileReason - = SpecifiedPathIsAFile { path :: !FilePath } - | SpecifiedPathIsReadOnly { path :: !FilePath } - | RemoteURLSpecifiedForSQLite { url :: URI } - | SomeUnexpectedErrorOccured { error :: !IOException } - deriving (Show) - --- | Create a new 'DatabaseFile' in the expected workding directory. Create the target --- directory (recursively) if it doesn't exist. -newDatabaseFile - :: (MonadIO m) - => Tracer IO TraceDatabase - -> Configuration.DatabaseLocation - -> m DatabaseFile -newDatabaseFile tr = \case - Configuration.InMemory path -> do - return $ InMemory path - Configuration.Dir dir -> - OnDisk <$> newDatabaseOnDiskFile tr (traceWith tr . DatabaseCreateNew) dir - Configuration.Remote url -> liftIO $ do - traceWith tr $ DatabaseMustBeLocal - { errorMessage = - "This binary was compiled to use SQLite. \ - \You must specify either a working directory or in-memory configuration. \ - \Using a remote URL is only allowed on binaries compiled to use PostgreSQL." - } - throwIO (FailedToAccessOrCreateDatabaseFile $ RemoteURLSpecifiedForSQLite url) - -newDatabaseOnDiskFile - :: (MonadIO m) - => Tracer IO TraceDatabase - -> (FilePath -> IO ()) - -> FilePath - -> m FilePath -newDatabaseOnDiskFile tr onFileMissing dir = liftIO $ do - absoluteDir <- ( dir) <$> getCurrentDirectory - handle (onAlreadyExistsError absoluteDir) $ createDirectoryIfMissing True dir - permissions <- getPermissions absoluteDir - unless (writable permissions) $ bail (SpecifiedPathIsReadOnly absoluteDir) - let dbFile = absoluteDir "kupo.sqlite3" - unlessM (doesFileExist dbFile) $ onFileMissing dbFile - return dbFile - where - bail absoluteDir = do - traceWith tr $ DatabasePathMustBeDirectory - { hint = "The path you've specified as working directory is a file; you probably meant to \ - \point to the parent directory instead. Don't worry about the database file, \ - \I'll manage it myself." - } - throwIO (FailedToAccessOrCreateDatabaseFile absoluteDir) - onAlreadyExistsError absoluteDir e - | isAlreadyExistsError e = do - bail (SpecifiedPathIsAFile absoluteDir) - | otherwise = - bail (SomeUnexpectedErrorOccured e) - --- | Construct a connection string for the SQLite database. This utilizes (and assumes) the URI --- recognition from SQLite to choose between read-only or read-write database. By default also, when --- no filepath is provided, the database is created in-memory with a shared cache. --- --- For testing purpose however, it is also possible to create a in-memory database in isolation by --- simply passing `:memory:` as a filepath. -mkConnectionString - :: DatabaseFile - -> ConnectionType - -> (String, [SQLOpenFlag]) -mkConnectionString filePath mode = - case filePath of - OnDisk fp -> - ("file:" <> fp, SQLOpenNoMutex : openFlags) - InMemory Nothing -> - ("file::kupo:?mode=memory&cache=shared", openFlags) - InMemory (Just fp) -> - (fp, SQLOpenMemory : openFlags) - where - openFlags = case mode of - ReadOnly -> [SQLOpenReadOnly] - ReadWrite -> [SQLOpenReadWrite, SQLOpenCreate] - WriteOnly -> [SQLOpenReadWrite, SQLOpenCreate] - --- | A short-lived connection meant to be used in a resource-pool. These connections can be opened --- either as read-only connection or read-write; depending on the client needs. Read-only connections --- are non-blocking and can access data even when the database is being written concurrently. -createShortLivedConnection - :: Tracer IO TraceDatabase - -> ConnectionType - -> DBLock IO - -> LongestRollback - -> DatabaseFile - -> IO (Database IO) -createShortLivedConnection tr mode (DBLock shortLived longLived) k file = do - traceWith tr $ DatabaseConnection ConnectionCreateShortLived{mode} - - let (str, flags) = mkConnectionString file mode +data FailedToCreateConnection = FailedToCreateConnection { reason :: FailedToCreateConnectionReason } + deriving (Show) - !conn <- Sqlite.open' str flags +instance Exception FailedToCreateConnection - forM_ ["PRAGMA cache_size = 1024"] $ \pragma -> - handle - (\(_ :: SomeException) -> traceWith trConn ConnectionFailedPragma{pragma}) - (execute_ conn (Query pragma)) - - return $ mkDatabase trConn mode k (bracketConnection conn) - where - trConn :: Tracer IO TraceConnection - trConn = contramap DatabaseConnection tr - - bracketConnection :: Connection -> (forall a. ((Connection -> IO a) -> IO a)) - bracketConnection conn between = - case mode of - WriteOnly -> - between conn - ReadOnly -> - between conn - ReadWrite -> - bracket_ - -- read-write connections only run when the longLived isn't busy working. Multiple - -- short-lived read-write connections may still conflict with one another, but - -- since they mostly are one-off requests, we simply retry them when busy/locked. - (atomically $ do - readTVar longLived >>= check . not - modifyTVar' shortLived next - ) - (atomically (modifyTVar' shortLived prev)) - (between conn) - -withShortLivedConnection - :: Tracer IO TraceDatabase - -> ConnectionType - -> DBLock IO - -> LongestRollback - -> DatabaseFile - -> (Database IO -> IO a) - -> IO a -withShortLivedConnection tr mode lock k file action = do - bracket - (createShortLivedConnection tr mode lock k file) - (\Database{close} -> close) - action - --- | A resource acquisition bracket for a single long-lived connection. The system is assumed to use --- with only once, at the application start-up and provide this connection to a privileged one which --- takes priority over any other connections. --- --- It is therefore also the connection from which we check for and run database migrations when --- needed. Note that this bracket will also create the database if it doesn't exist. -withLongLivedConnection - :: Tracer IO TraceDatabase - -> DBLock IO - -> LongestRollback - -> DatabaseFile - -> DeferIndexesInstallation - -> (Database IO -> IO a) - -> IO a -withLongLivedConnection tr (DBLock shortLived longLived) k file deferIndexes action = do - let (str, flags) = mkConnectionString file ReadWrite - withConnection' str flags $ \conn -> do - execute_ conn "PRAGMA page_size = 32768" - execute_ conn "PRAGMA cache_size = 1024" - execute_ conn "PRAGMA synchronous = NORMAL" - execute_ conn "PRAGMA journal_mode = WAL" - execute_ conn "PRAGMA optimize" - databaseVersion conn >>= runMigrations tr conn - installIndexes tr conn deferIndexes - execute_ conn "PRAGMA foreign_keys = ON" - action (mkDatabase (contramap DatabaseConnection tr) ReadWrite k (bracketConnection conn)) - where - bracketConnection :: Connection -> (forall a. ((Connection -> IO a) -> IO a)) - bracketConnection conn between = - bracket_ - (do - atomically (writeTVar longLived True) -- acquire - atomically (readTVar shortLived >>= check . (== 0)) -- wait for read-write short-lived - ) - (atomically $ writeTVar longLived False) - (between conn) +data FailedToCreateConnectionReason + = SQLiteDirSpecified { path :: !FilePath } + | SQLiteInMemorySpecified { inMemoryPath :: !(Maybe FilePath) } + | SomeUnexpectedErrorOccured { error :: !IOException } + deriving (Show) -- | Create a Database pool that uses separate pools for `ReadOnly` and `ReadWrite` connections. -- This function creates a database file if it does not already exist. -newDBPool - :: (Tracer IO TraceDatabase) - -> Bool - -> Configuration.DatabaseLocation - -> LongestRollback - -> IO (DBPool IO) +newDBPool :: (Tracer IO TraceDatabase) -> Bool -> Configuration.DatabaseLocation -> LongestRollback -> IO (DBPool IO) newDBPool tr isReadOnly dbLocation longestRollback = do - dbFile <- newDatabaseFile tr dbLocation - lock <- liftIO newLock - - (maxConcurrentWriters, maxConcurrentReaders) <- - liftIO getNumCapabilities <&> \n -> (n, 5 * n) - - readOnlyPool <- liftIO $ newPool $ defaultPoolConfig - (createShortLivedConnection tr ReadOnly lock longestRollback dbFile) - (\Database{close} -> close) - 600 - maxConcurrentReaders + maxConnections <- + (5*) <$> liftIO getNumCapabilities - readWritePool <- liftIO $ newPool $ defaultPoolConfig - (createShortLivedConnection tr ReadWrite lock longestRollback dbFile) + connectionPool <- liftIO $ newPool $ defaultPoolConfig + mkConnection (\Database{close} -> close) - 30 - maxConcurrentWriters + 600 -- // TODO: Review concurrency requirements + maxConnections let withDB :: forall a b. (Pool (Database IO) -> (Database IO -> IO a) -> IO b) -> ConnectionType -> (Database IO -> IO a) -> IO b withDB withRes connType dbAction = case connType of - ReadOnly -> withRes readOnlyPool dbAction ReadWrite | isReadOnly -> fail "Cannot acquire a read/write connection on read-only replica" - ReadWrite -> withRes readWritePool dbAction WriteOnly -> fail "Impossible: tried to acquire a WriteOnly database?" + _ -> withRes connectionPool dbAction - return DBPool - { tryWithDatabase = - withDB tryWithResource - , withDatabaseBlocking = - withDB withResource - , withDatabaseExclusiveWriter = - withLongLivedConnection tr lock longestRollback dbFile - , destroyResources = do - destroyAllResources readOnlyPool - destroyAllResources readWritePool - , maxConcurrentReaders - , maxConcurrentWriters = - if isReadOnly then 0 else maxConcurrentWriters - } - --- It is therefore also the connection from which we check for and run database migrations when --- needed. Note that this bracket will also create the database if it doesn't exist. -withWriteOnlyConnection - :: DatabaseFile - -> (Sqlite.Connection -> Database IO -> IO a) - -> IO a -withWriteOnlyConnection file action = do - let (str, flags) = mkConnectionString file WriteOnly - withConnection' str flags $ \conn -> do - databaseVersion conn >>= runMigrations nullTracer conn - installIndexes nullTracer conn SkipNonEssentialIndexes - execute_ conn "PRAGMA synchronous = OFF" - execute_ conn "PRAGMA journal_mode = OFF" - execute_ conn "PRAGMA locking_mode = EXCLUSIVE" - action conn (mkDatabase nullTracer ReadWrite k (bracketConnection conn)) - where - k = LongestRollback maxBound + -- // TODO: Acutally do something with defer indexes! And possibly actually provide a preferred connection? + withDatabaseExclusiveWriter :: DeferIndexesInstallation -> (Database IO -> IO a) -> (IO a) + + destroyResources = destroyAllResources connectionPool + withDatabaseExclusiveWriter _deferIndexes = withResource connectionPool + + return DBPool { tryWithDatabase = withDB tryWithResource, withDatabaseBlocking = withDB withResource, withDatabaseExclusiveWriter, maxConcurrentReaders = 0, maxConcurrentWriters = maxConnections, destroyResources } - bracketConnection :: Connection -> (forall a. ((Connection -> IO a) -> IO a)) - bracketConnection conn between = - between conn + where + mkConnection = case dbLocation of + Configuration.Remote uri -> do + traceWith tr $ DatabaseConnection ConnectionCreateGeneric + conn <- PG.connectPostgreSQL (encodeUtf8 $ render uri) + return $ mkDatabase trConn longestRollback (\dbAction -> dbAction conn) + Configuration.Dir dir -> liftIO $ do + traceLocationError + throwIO (FailedToCreateConnection $ SQLiteDirSpecified dir) + Configuration.InMemory path -> liftIO $ do + traceLocationError + throwIO (FailedToCreateConnection $ SQLiteInMemorySpecified path) -data CopyException - = ErrCopyEmptyPatterns { hint :: Text } - | ErrTargetAlreadyExists { target :: FilePath } - | ErrMissingSourceDatabase { source :: FilePath } - deriving (Show) + where + trConn :: Tracer IO TraceConnection + trConn = contramap DatabaseConnection tr + + traceLocationError = traceWith tr $ DatabaseLocationInvalid + { errorMessage = "This binary was compiled to use PostgreSQL and requires a Postgres connection URI. \ + \Local file paths and in-memory configurations are only valid for binaries compiled for SQLite." } -instance Exception CopyException -- Copy from an existing database into another, using the provided patterns -- as filter. Note that this only makes sense when the source database's patterns @@ -467,108 +206,7 @@ copyDatabase -> FilePath -> Set Pattern -> IO () -copyDatabase (tr, progress) fromDir intoDir patterns = do - when (null patterns) $ do - throwIO ErrCopyEmptyPatterns - { hint = "No patterns provided for copy. At least one is required." } - - fromFile <- newDatabaseOnDiskFile tr (throwIO . ErrMissingSourceDatabase) fromDir - intoFile <- newDatabaseOnDiskFile tr (traceWith tr . DatabaseCreateNew) intoDir - - cleanupFile <- newCleanupAction intoFile - - handle cleanupFile $ do - traceWith tr DatabaseCloneSourceDatabase - copyFile fromFile intoFile - lock <- newLock - withShortLivedConnection tr ReadOnly lock longestRollback (OnDisk fromFile) $ \from -> do - withWriteOnlyConnection (OnDisk intoFile) $ \conn into -> do - execute_ conn "PRAGMA foreign_keys = OFF" - mapM_ (cleanup conn) ["inputs", "policies", "patterns"] - runTransaction into (insertPatterns into patterns) - forM_ patterns $ \pattern_ -> do - traceWith tr $ DatabaseImportTable { table = "inputs", pattern = patternToText pattern_ } - copyTable - (runTransaction from $ countInputs from pattern_) - (runTransaction from . foldInputs from pattern_ Whole NoStatusFlag Asc) - (runTransaction into . insertInputs into) - DB.resultToRow - traceWith tr $ DatabaseImportTable { table = "policies", pattern = patternToText pattern_ } - copyTable - (runTransaction from $ countPolicies from pattern_) - (runTransaction from . foldPolicies from pattern_) - (runTransaction into . insertPolicies into . fromList) - identity - traceWith tr DatabaseCopyFinalize - execute_ conn "VACUUM" - execute_ conn "PRAGMA optimize" - where - longestRollback :: LongestRollback - longestRollback = - LongestRollback maxBound - - cleanup :: Connection -> Text -> IO () - cleanup conn table = do - traceWith tr $ DatabaseCleanupOldData { table } - execute_ conn $ Query $ "DELETE FROM " <> table - - newCleanupAction :: FilePath -> IO (SomeException -> IO a) - newCleanupAction filePath = do - whenM (doesFileExist filePath) (throwIO $ ErrTargetAlreadyExists { target = filePath }) - return $ \(e :: SomeException) -> do - traceWith tr DatabaseRemoveIncompleteCopy { filePath } - removePathForcibly filePath - throwIO e - - copyTable - :: IO Integer - -> ((result -> IO ()) -> IO ()) - -> ([row] -> IO ()) - -> (result -> row) - -> IO () - copyTable countTable foldTable insertTable mkRow = do - queue <- newTBQueueIO 10_000 - done <- newTVarIO False - total <- countTable - concurrently_ - (do - foldTable $ \result -> - atomically $ writeTBQueue queue (mkRow result) - atomically $ writeTVar done True - ) - ( let loop n = do - results <- atomically $ do - isDone <- readTVar done - isEmpty <- isEmptyTBQueue queue - check (not isEmpty || isDone) - flushTBQueue queue - insertTable results - let len = toInteger (length results) - unless (len == 0) $ do - traceWith progress $ ProgressStep (mkProgress total (n + len)) - loop (n + len) - in loop 0 >> do - traceWith progress ProgressDone - traceWith tr $ DatabaseImported { rows = total } - ) - - mkProgress :: Integer -> Integer -> Text - mkProgress total n = - scientific (round (double (n * 10000) / double total)) (-2) - & formatScientificBuilder Fixed (Just 2) - & T.toLazyText - & toStrict - & (<> "%") - where - double :: Integer -> Double - double = fromIntegral - --- ** Lock - -data DBLock (m :: Type -> Type) = DBLock !(TVar m Word) !(TVar m Bool) - -newLock :: MonadSTM m => m (DBLock m) -newLock = DBLock <$> newTVarIO 0 <*> newTVarIO True +copyDatabase = undefined -- // TODO: Implement copyDatabase -- -- IO @@ -576,276 +214,226 @@ newLock = DBLock <$> newTVarIO 0 <*> newTVarIO True mkDatabase :: Tracer IO TraceConnection - -> ConnectionType -> LongestRollback -> (forall a. (Connection -> IO a) -> IO a) -> Database IO -mkDatabase tr mode longestRollback bracketConnection = Database - { longestRollback - - , optimize = ReaderT $ \conn -> do - -- NOTE: It is good to run the 'PRAGMA optimize' every now-and-then. The - -- SQLite's official documentation recommend to do so either upon - -- closing every connection, or, every few hours. - traceExecute_ tr conn "PRAGMA optimize" - - , close = do - traceWith tr ConnectionDestroyShortLived{mode} - bracketConnection Sqlite.close - - , insertInputs = \inputs -> ReaderT $ \conn -> do - mapM_ - (\DB.Input{..} -> do - insertRow @"inputs" conn - [ SQLBlob extendedOutputReference - , SQLText address - , SQLBlob value - , maybe SQLNull SQLBlob datumInfo - , maybe SQLNull SQLBlob refScriptHash - , SQLInteger (fromIntegral createdAtSlotNo) - , maybe SQLNull (SQLInteger . fromIntegral) spentAtSlotNo - ] - case datum of - Nothing -> - pure () - Just DB.BinaryData{..} -> - insertRow @"binary_data" conn - [ SQLBlob binaryDataHash - , SQLBlob binaryData - ] - case refScript of - Nothing -> - pure () - Just DB.ScriptReference{..} -> - insertRow @"scripts" conn - [ SQLBlob scriptHash - , SQLBlob script - ] +mkDatabase tr longestRollback bracketConnection = Database + { insertInputs = \inputs -> ReaderT $ \conn -> do + mapM_ (\DB.Input{..} -> do + insertRow @"inputs" conn 7 + ( extendedOutputReference + , address + , value + , datumInfo + , refScriptHash + , (fromIntegral createdAtSlotNo :: Int64) + , spentAtSlotNo + ) + case datum of + Nothing -> + pure () + Just DB.BinaryData{..} -> + insertRow @"binary_data" conn 2 + ( binaryDataHash + , binaryData + ) + case refScript of + Nothing -> + pure () + Just DB.ScriptReference{..} -> + insertRow @"scripts" conn 2 + ( scriptHash + , script + ) ) inputs , deleteInputs = \refs -> ReaderT $ \conn -> do - withTotalChanges conn $ - mapM_ (execute_ conn . deleteInputsQry) refs + withTotalChanges (\pattern -> execute_ conn (deleteInputsQry pattern)) refs , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> do - withTotalChanges conn $ - forM_ refs $ \ref -> do - execute conn (markInputsQry ref) - [ SQLInteger slotNo - ] + withTotalChanges (\ref -> + execute conn (markInputsQry ref) (Only (slotNo :: Int64))) + refs , pruneInputs = ReaderT $ \conn -> do withTemporaryIndex tr conn "inputsBySpentAt" "inputs(spent_at)" $ do - traceExecute tr conn pruneInputsQry [ SQLInteger (fromIntegral longestRollback) ] - changes conn + traceExecute tr conn pruneInputsQry [ fromIntegral longestRollback :: Int64 ] , foldInputs = \pattern_ slotRange statusFlag sortDirection yield -> ReaderT $ \conn -> do -- TODO: Allow resolving datums / scripts on demand through LEFT JOIN -- -- See [#21](https://github.com/CardanoSolutions/kupo/issues/21) let (datum, refScript) = (Nothing, Nothing) - Sqlite.fold_ conn (foldInputsQry pattern_ slotRange statusFlag sortDirection) () $ \() -> \case - [ SQLBlob extendedOutputReference - , SQLText address - , SQLBlob value - , matchMaybeBytes -> datumInfo - , matchMaybeBytes -> refScriptHash - , SQLInteger (fromIntegral -> createdAtSlotNo) - , SQLBlob createdAtHeaderHash - , matchMaybeWord64 -> spentAtSlotNo - , matchMaybeBytes -> spentAtHeaderHash - ] -> - yield (DB.resultFromRow DB.Input{..}) - (xs :: [SQLData]) -> - throwIO (UnexpectedRow (patternToText pattern_) [xs]) + (throwIO . UnexpectedRow "foldInputs") `handle` + PG.forEach_ conn (foldInputsQry pattern_ slotRange statusFlag sortDirection) + (\(extendedOutputReference + , address + , value + , datumInfo + , refScriptHash + , ((fromIntegral :: Int64 -> Word64) -> createdAtSlotNo) + , createdAtHeaderHash + , (fmap (fromIntegral :: Int64 -> Word64) -> spentAtSlotNo) + , spentAtHeaderHash + ) -> + yield (DB.resultFromRow DB.Input{..})) + , countInputs = \pattern_ -> ReaderT $ \conn -> do - query_ conn (countInputsQry pattern_) >>= \case - [[SQLInteger n]] -> - pure (toInteger n) - (xs :: [[SQLData]]) -> - throwIO $ UnexpectedRow (fromQuery $ countInputsQry pattern_) xs + handle (throwIO . UnexpectedRow "countInputs") $ + query_ conn (countInputsQry pattern_) >>= \case + [Only n] -> pure n + (length -> n) -> throwIO $ ExpectedSingletonResult "countInputs" n , countPolicies = \pattern_ -> ReaderT $ \conn -> do - query_ conn (countPoliciesQry pattern_) >>= \case - [[SQLInteger n]] -> - pure (toInteger n) - (xs :: [[SQLData]]) -> - throwIO $ UnexpectedRow (fromQuery $ countPoliciesQry pattern_) xs + handle (throwIO . UnexpectedRow "countPolicies") $ + query_ conn (countPoliciesQry pattern_) >>= \case + [Only n] -> pure n + (length -> n) -> throwIO $ ExpectedSingletonResult "countPolicies" n + , foldPolicies = \pattern_ yield -> ReaderT $ \conn -> do - Sqlite.fold_ conn (foldPoliciesQry pattern_) () $ \() -> \case - [SQLBlob outputReference, SQLBlob policyId] -> - yield DB.Policy{..} - (xs :: [SQLData]) -> - throwIO (UnexpectedRow "foldPolicies" [xs]) - - , insertPolicies = \policies -> ReaderT $ \conn -> - mapM_ - (\DB.Policy{..} -> do - insertRow @"policies" conn - [ SQLBlob outputReference - , SQLBlob policyId - ] - ) - policies + handle (throwIO . UnexpectedRow "foldPolicies") $ + PG.forEach_ conn (foldPoliciesQry pattern_) $ + \(outputReference, policyId) -> yield DB.Policy{..} + + , insertPolicies = \policies -> ReaderT $ \conn -> do + let + rows = flip Set.map policies $ \DB.Policy{..} -> + (outputReference, policyId) + insertRows @"policies" conn 2 rows , insertCheckpoints = \cps -> ReaderT $ \conn -> do - mapM_ - (\(DB.pointToRow -> DB.Checkpoint{..}) -> - insertRow @"checkpoints" conn - [ SQLBlob checkpointHeaderHash - , SQLInteger (fromIntegral checkpointSlotNo) - ] - ) - cps + let + rows = cps <&> \(DB.pointToRow -> DB.Checkpoint{..}) -> + (checkpointHeaderHash, ((fromIntegral :: Word64 -> Int64) checkpointSlotNo)) + insertRows @"checkpoints" conn 2 rows , listCheckpointsDesc = ReaderT $ \conn -> do - let k = fromIntegral longestRollback - let points = + let + k = fromIntegral longestRollback + points = [ 0, 10 .. k `div` 2 ^ n ] ++ [ k `div` (2 ^ e) | (e :: Integer) <- [ n-1, n-2 .. 0 ] ] where n = ceiling (log (fromIntegral @_ @Double k)) fmap (fmap DB.pointFromRow . nubOn DB.checkpointSlotNo . mconcat) $ forM points $ \pt -> - Sqlite.fold conn listCheckpointsQry [SQLInteger pt] [] $ - \xs (checkpointHeaderHash, checkpointSlotNo) -> + PG.fold conn listCheckpointsQry [pt :: Int64] [] $ + \xs (checkpointHeaderHash, (fromIntegral :: Int64 -> Word64) -> checkpointSlotNo) -> pure (DB.Checkpoint{..} : xs) , listAncestorsDesc = \(SlotNo slotNo) n -> ReaderT $ \conn -> do fmap reverse $ - Sqlite.fold conn listAncestorQry (SQLInteger <$> [fromIntegral slotNo, n]) [] $ - \xs (checkpointHeaderHash, checkpointSlotNo) -> + PG.fold conn listAncestorQry ((fromIntegral :: Word64 -> Int32) slotNo, n) [] $ + \xs (checkpointHeaderHash, (fromIntegral :: Int32 -> Word64) -> checkpointSlotNo) -> pure ((DB.pointFromRow DB.Checkpoint{..}) : xs) + , insertPatterns = \patterns -> ReaderT $ \conn -> do + insertRows @"patterns" conn 1 $ Set.map (Only . patternToText) patterns + + , deletePattern = \pattern_-> ReaderT $ \conn -> do + fromIntegral <$> + execute conn "DELETE FROM patterns WHERE pattern = ?" + (Only $ DB.patternToRow pattern_) + + , listPatterns = ReaderT $ \conn -> do + fmap fromList + $ PG.fold_ conn "SELECT * FROM patterns" [] + $ \xs (Only x) -> pure (DB.patternFromRow x:xs) + , insertBinaryData = \bin -> ReaderT $ \conn -> do - mapM_ - (\DB.BinaryData{..} -> - insertRow @"binary_data" conn - [ SQLBlob binaryDataHash - , SQLBlob binaryData - ] - ) - bin + let + rows = bin <&> \DB.BinaryData{..} -> + (binaryDataHash, binaryData) + insertRows @"binary_data" conn 2 rows , getBinaryData = \(DB.datumHashToRow -> binaryDataHash) -> ReaderT $ \conn -> do - Sqlite.query conn getBinaryDataQry (Only (SQLBlob binaryDataHash)) <&> \case - [[SQLBlob binaryData]] -> - Just (DB.binaryDataFromRow DB.BinaryData{..}) - _notSQLBlob -> - Nothing + handle (throwIO . UnexpectedRow "getBinaryData") $ + query conn getBinaryDataQry (Only binaryDataHash) >>= \case + [Only binaryData] -> + pure $ Just (DB.binaryDataFromRow DB.BinaryData{..}) + [] -> + pure $ Nothing + (length -> n) -> + throwIO $ ExpectedSingletonResult "getBinaryData" n , pruneBinaryData = ReaderT $ \conn -> do traceExecute_ tr conn pruneBinaryDataQry - changes conn , insertScripts = \scripts -> ReaderT $ \conn -> do - mapM_ - (\DB.ScriptReference{..} -> - insertRow @"scripts" conn - [ SQLBlob scriptHash - , SQLBlob script - ] - ) - scripts + let + rows = scripts <&> \DB.ScriptReference{..} -> + (scriptHash, script) + insertRows @"scripts" conn 2 rows , getScript = \(DB.scriptHashToRow -> scriptHash)-> ReaderT $ \conn -> do - Sqlite.query conn getScriptQry (Only (SQLBlob scriptHash)) <&> \case - [[SQLBlob script]] -> - Just (DB.scriptFromRow DB.ScriptReference{..}) - _notSQLBlob -> - Nothing - - , insertPatterns = \patterns -> ReaderT $ \conn -> do - mapM_ - (\pattern_ -> - insertRow @"patterns" conn - [ SQLText (patternToText pattern_) - ] - ) - patterns - - , deletePattern = \pattern_-> ReaderT $ \conn -> do - execute conn "DELETE FROM patterns WHERE pattern = ?" - [ SQLText (DB.patternToRow pattern_) - ] - changes conn - - , listPatterns = ReaderT $ \conn -> do - fmap fromList - $ fold_ conn "SELECT * FROM patterns" [] - $ \xs (Only x) -> pure (DB.patternFromRow x:xs) - - , rollbackTo = \(SQLInteger . fromIntegral . unSlotNo -> minSlotNo) -> ReaderT $ \conn -> do + handle (throwIO . UnexpectedRow "getScript") $ + query conn getScriptQry (Only scriptHash) >>= \case + [Only script] -> + pure $ Just (DB.scriptFromRow DB.ScriptReference{..}) + [] -> + pure $ Nothing + (length -> n) -> + throwIO $ ExpectedSingletonResult "getScript" n + + , rollbackTo = \(fromIntegral . unSlotNo -> minSlotNo) -> ReaderT $ \conn -> do query_ conn selectMaxCheckpointQry >>= \case -- NOTE: Rolling back takes quite a bit of time and, when restarting -- the application, we'll always be asked to rollback to the -- _current tip_. In this case, there's nothing to delete or update, -- so we can safely skip it. - [[currentSlotNo, _]] | currentSlotNo == minSlotNo -> do + [(currentSlotNo, _ :: ByteString)] | currentSlotNo == minSlotNo -> do pure () _otherwise -> do withTemporaryIndex tr conn "inputsByCreatedAt" "inputs(created_at)" $ do - withTemporaryIndex tr conn "inputsBySpentAt" "inputs(spent_at)" $ do + withTemporaryIndex tr conn "inputsBySpentAt" "inputs(spent_at)" $ void $ do deleteInputsIncrementally tr conn minSlotNo - traceExecute tr conn rollbackQryUpdateInputs [ minSlotNo ] + _ <- traceExecute tr conn rollbackQryUpdateInputs [ minSlotNo ] traceExecute tr conn rollbackQryDeleteCheckpoints [ minSlotNo ] - query_ conn selectMaxCheckpointQry >>= \case - [[SQLInteger (fromIntegral -> checkpointSlotNo), SQLBlob checkpointHeaderHash]] -> - return $ Just (DB.pointFromRow DB.Checkpoint{..}) - [[SQLNull, SQLNull]] -> - return Nothing - xs -> - throwIO $ UnexpectedRow (fromQuery selectMaxCheckpointQry) xs + handle (throwIO . UnexpectedRow (show selectMaxCheckpointQry)) $ + query_ conn selectMaxCheckpointQry >>= \case + [(fmap (fromIntegral :: Int64 -> Word64) -> Just checkpointSlotNo, Just checkpointHeaderHash)] -> + return $ Just (DB.pointFromRow DB.Checkpoint{..}) + [(Nothing, Nothing)] -> + return Nothing + res -> throwIO $ ExpectedSingletonResult (show selectMaxCheckpointQry) (length res) + + , optimize = return () -- // TODO: Review if optimize needs to happen with Postgres. Also determine if this can be hidden within the `Database` implementation. , runTransaction = \r -> bracketConnection $ \conn -> - retryWhenBusy tr (constantStrategy 0.1) 1 $ withTransaction conn mode (runReaderT r conn) - } + withTransaction conn (runReaderT r conn) + -- ^ // TODO: Check this. Do we need a retry in PG? -deleteInputsIncrementally :: Tracer IO TraceConnection -> Connection -> SQLData -> IO () -deleteInputsIncrementally tr conn minSlotNo = do - traceExecute tr conn rollbackQryDeleteInputs [ minSlotNo ] - deleted <- changes conn - unless (deleted < pruneInputsMaxIncrement) $ deleteInputsIncrementally tr conn minSlotNo + , longestRollback -insertRow - :: forall tableName. - ( KnownSymbol tableName - ) - => Connection - -> [SQLData] - -> IO () -insertRow conn r = - let - tableName = fromString (symbolVal (Proxy @tableName)) - values = mkPreparedStatement (length (toRow r)) - qry = "INSERT OR IGNORE INTO " <> tableName <> " VALUES " <> values - in - execute conn qry r + , close = do + traceWith tr ConnectionDestroyGeneric + bracketConnection PG.close + } + +-- +-- Queries +-- deleteInputsQry :: Pattern -> Query deleteInputsQry pattern_ = - Query $ unwords - [ "DELETE FROM inputs" - , additionalJoin - , "WHERE" - , whereClause - ] + "DELETE FROM inputs" + <> additionalJoin + <> "WHERE" + <> whereClause where - (whereClause, fromMaybe "" -> additionalJoin) = patternToSql pattern_ + (fromText -> whereClause, fromText . fromMaybe "" -> additionalJoin) = patternToSql pattern_ markInputsQry :: Pattern -> Query markInputsQry pattern_ = - Query $ unwords - [ "UPDATE inputs SET spent_at = ?" - , additionalJoin - , "WHERE" - , whereClause - ] + "UPDATE inputs SET spent_at = ?" + <> additionalJoin + <> "WHERE" + <> whereClause where - (whereClause, fromMaybe "" -> additionalJoin) = patternToSql pattern_ + (fromText -> whereClause, fromText . fromMaybe "" -> additionalJoin) = patternToSql pattern_ -- NOTE: This query only prune down a certain number of inputs at a time to keep his time bounded. The -- query in itself is quite expensive, and on large indexes, may takes several minutes. @@ -864,48 +452,14 @@ pruneInputsQry = \ LIMIT " <> show pruneInputsMaxIncrement <> "\ \)" -countPoliciesQry :: Pattern -> Query -countPoliciesQry pattern_ = Query $ - "SELECT COUNT(*) \ - \FROM policies \ - \JOIN inputs \ - \ON inputs.output_reference = policies.output_reference" - <> " WHERE " - <> patternWhereClause - where - (patternWhereClause, _) = - patternToSql pattern_ - -foldPoliciesQry :: Pattern -> Query -foldPoliciesQry pattern_ = Query $ - "SELECT policies.output_reference, policy_id \ - \FROM policies \ - \JOIN inputs \ - \ON inputs.output_reference = policies.output_reference" - <> " WHERE " - <> patternWhereClause - where - (patternWhereClause, _) = - patternToSql pattern_ - -countInputsQry :: Pattern -> Query -countInputsQry pattern_ = Query $ - "SELECT COUNT(*) FROM inputs " - <> additionalJoin - <> " WHERE " - <> patternWhereClause - where - (patternWhereClause, fromMaybe "" -> additionalJoin) = - patternToSql pattern_ - foldInputsQry :: Pattern -> Range SlotNo -> StatusFlag -> SortDirection -> Query -foldInputsQry pattern_ slotRange statusFlag sortDirection = - Query $ "SELECT \ +foldInputsQry pattern_ slotRange statusFlag sortDirection = fromText $ + "SELECT \ \inputs.ext_output_reference, inputs.address, inputs.value, \ \inputs.datum_info, inputs.script_hash, \ \inputs.created_at, createdAt.header_hash, \ @@ -970,6 +524,40 @@ foldInputsQry pattern_ slotRange statusFlag sortDirection = CreatedAt -> "created_at" SpentAt -> "spent_at" +countInputsQry :: Pattern -> Query +countInputsQry pattern_ = fromText $ + "SELECT COUNT(*) FROM inputs " + <> additionalJoin + <> " WHERE " + <> patternWhereClause + where + (patternWhereClause, fromMaybe "" -> additionalJoin) = + patternToSql pattern_ + +countPoliciesQry :: Pattern -> Query +countPoliciesQry pattern_ = + "SELECT COUNT(*) \ + \FROM policies \ + \JOIN inputs \ + \ON inputs.output_reference = policies.output_reference \ + \WHERE " + <> patternWhereClause + where + (fromText -> patternWhereClause, _) = + patternToSql pattern_ + +foldPoliciesQry :: Pattern -> Query +foldPoliciesQry pattern_ = + "SELECT policies.output_reference, policy_id \ + \FROM policies \ + \JOIN inputs \ + \ON inputs.output_reference = policies.output_reference \ + \WHERE " + <> patternWhereClause + where + (fromText -> patternWhereClause, _) = + patternToSql pattern_ + listCheckpointsQry :: Query listCheckpointsQry = "SELECT * FROM checkpoints \ @@ -990,6 +578,7 @@ getBinaryDataQry = \WHERE binary_data_hash = ? \ \LIMIT 1" +-- // TODO: Investigate if the 'ORDER BY' clause is necessary in PostgreSQL -- NOTE: This removes all binary_data that aren't associted with any -- known input. The 'ORDER BY' at the end may seem pointless but is -- actually CRUCIAL for the query performance as it forces SQLite to use @@ -1018,9 +607,16 @@ selectMaxCheckpointQry :: Query selectMaxCheckpointQry = "SELECT MAX(slot_no),header_hash FROM checkpoints" +deleteInputsIncrementally :: Tracer IO TraceConnection -> Connection -> Int64 -> IO () +deleteInputsIncrementally tr conn minSlotNo = do + deleted <- traceExecute tr conn rollbackQryDeleteInputs $ Only minSlotNo + unless (deleted < pruneInputsMaxIncrement) $ deleteInputsIncrementally tr conn minSlotNo + rollbackQryDeleteInputs :: Query rollbackQryDeleteInputs = - "DELETE FROM inputs WHERE rowid IN (SELECT rowid FROM inputs WHERE created_at > ? LIMIT " <> show pruneInputsMaxIncrement <> ")" + "DELETE FROM inputs WHERE rowid IN \ + \(SELECT rowid FROM inputs WHERE created_at > ? LIMIT " + <> show pruneInputsMaxIncrement <> ")" rollbackQryUpdateInputs :: Query rollbackQryUpdateInputs = @@ -1030,91 +626,11 @@ rollbackQryDeleteCheckpoints :: Query rollbackQryDeleteCheckpoints = "DELETE FROM checkpoints WHERE slot_no > ?" --- --- Helpers --- - -mkPreparedStatement :: Int -> Query -mkPreparedStatement n = - Query ("(" <> T.intercalate "," (replicate n "?") <> ")") -{-# INLINABLE mkPreparedStatement #-} - -type RetryPolicy = Word -> DiffTime - -constantStrategy :: DiffTime -> RetryPolicy -constantStrategy = const - -retryWhenBusy :: Tracer IO TraceConnection -> RetryPolicy -> Word -> IO a -> IO a -retryWhenBusy tr retryPolicy attempts action = - action `catch` (\e@SQLError{sqlError} -> case sqlError of - ErrorLocked -> do - traceWith tr $ ConnectionLocked { attempts, retryingIn } - threadDelay retryingIn - retryWhenBusy tr retryPolicy (next attempts) action - ErrorBusy -> do - traceWith tr $ ConnectionBusy { attempts, retryingIn } - threadDelay retryingIn - retryWhenBusy tr retryPolicy (next attempts) action - ErrorCan'tOpen -> do - let hint = "Failed to open the database file; this is usually due to \ - \the operating system limiting the number of file descriptors \ - \opened at the same time. Depending on your OS, you may want \ - \to increase it (see 'ulimit' and 'limit')." - traceWith tr $ ConnectionFailedToOpenDatabase { hint } - throwIO e - _otherError -> do - throwIO e - ) - where - retryingIn = retryPolicy attempts - --- NOTE: Not using sqlite-simple's version because it lacks the crucial --- 'onException' on commits; The commit operation may throw an 'SQLiteBusy' --- exception when concurrent transactions are begin executed. --- Yet, because it doesn't rollback in this case, it leaves the transaction in --- an odd shrodinger state and makes it hard for the caller to properly handle --- the exception (was the transaction rolled back or not? Is it safe to retry --- it?). So, this slightly modified version makes sure to also rollback on a --- failed commit; allowing caller to simply retry the whole transaction on --- failure. -withTransaction :: Connection -> ConnectionType -> IO a -> IO a -withTransaction conn mode action = - mask $ \restore -> do - begin mode - r <- restore action `onException` rollback - commit `onException` rollback - return r - where - begin = \case - ReadOnly -> - execute_ conn "BEGIN DEFERRED TRANSACTION" - ReadWrite -> - execute_ conn "BEGIN IMMEDIATE TRANSACTION" - WriteOnly -> - execute_ conn "BEGIN EXCLUSIVE TRANSACTION" - commit = execute_ conn "COMMIT TRANSACTION" - rollback = execute_ conn "ROLLBACK TRANSACTION" - --- Run one or more effectful queries (DELETE, UPDATE, ...) and return the total number --- of changes. -withTotalChanges :: Connection -> IO () -> IO Int -withTotalChanges conn between = do - n1 <- totalChanges conn - between - n2 <- totalChanges conn - return (n2 - n1) - -matchMaybeBytes :: SQLData -> Maybe ByteString -matchMaybeBytes = \case - SQLBlob bytes -> Just bytes - _notSQLBlob -> Nothing -{-# INLINABLE matchMaybeBytes #-} - -matchMaybeWord64 :: SQLData -> Maybe Word64 -matchMaybeWord64 = \case - SQLInteger (fromIntegral -> wrd) -> Just wrd - _notSQLInteger -> Nothing -{-# INLINABLE matchMaybeWord64 #-} +-- // TODO: Header comment for this section +withTotalChanges :: forall t a. (Foldable t) => (a -> IO Int64) -> t a -> IO Int +withTotalChanges io t = + fromIntegral <$> + foldM (\accum a -> (accum +) <$> io a) 0 t -- -- Indexes @@ -1163,28 +679,15 @@ installIndex tr conn name definition = do indexDoesExist conn name >>= \case False -> do traceWith tr (DatabaseCreateIndex name) - execute_ conn $ Query $ unwords - [ "CREATE INDEX IF NOT EXISTS" - , name - , "ON" - , definition - ] + void $ execute conn "CREATE INDEX IF NOT EXISTS ? ON ?" (T.unpack name, T.unpack definition) True -> traceWith tr (DatabaseIndexAlreadyExists name) --- This creates an index on-the-fly if it is missing to make the subsequent queries fast-enough on --- large databases. If the index was not there, it is removed afterwards. Otherwise, it is simply used --- as such. withTemporaryIndex :: Tracer IO TraceConnection -> Connection -> Text -> Text -> IO a -> IO a withTemporaryIndex tr conn name definition action = do exists <- indexDoesExist conn name unless exists $ traceWith tr (ConnectionCreateTemporaryIndex name) - execute_ conn $ Query $ unwords - [ "CREATE INDEX IF NOT EXISTS" - , name - , "ON" - , definition - ] + _ <- execute conn "CREATE INDEX IF NOT EXISTS ? ON ?" (name, definition) unless exists $ traceWith tr (ConnectionCreatedTemporaryIndex name) a <- action unless exists (dropIndexIfExists tr conn name True) @@ -1193,117 +696,99 @@ withTemporaryIndex tr conn name definition action = do -- | Check whether an index exists in the database. Handy to customize the behavior (e.g. logging) -- depending on whether or not indexes are already there since 'CREATE INDEX IF NOT EXISTS' will not -- tell whether or not it has indeed created something. +-- // TODO: Validate that this works indexDoesExist :: Connection -> Text -> IO Bool -indexDoesExist conn name = - query_ @[SQLData] conn (Query $ "PRAGMA index_info('" <> name <> "')") <&> \case - [] -> False - _doesExist -> True +indexDoesExist conn name = do + query conn qry [name] <&> \case + [Only n] | n > (0 :: Int64) -> True + _doesNotExist -> False + where + qry = "SELECT COUNT(*) FROM pg_indexes WHERE indexname = ?;" dropIndexIfExists :: Tracer IO TraceConnection -> Connection -> Text -> Bool -> IO () dropIndexIfExists tr conn indexName wasTemporary = do whenM (indexDoesExist conn indexName) $ traceWith tr $ if wasTemporary then ConnectionRemoveTemporaryIndex{indexName} else ConnectionRemoveIndex{indexName} - execute_ conn $ Query $ unwords - [ "DROP INDEX IF EXISTS" - , indexName - ] + void $ execute conn "DROP INDEX IF EXISTS ?" [indexName] -- --- Migrations +-- Helpers -- -type MigrationRevision = Int - -type Migration = [Query] - -databaseVersion :: Connection -> IO MigrationRevision -databaseVersion conn = - withStatement conn "PRAGMA user_version" $ \stmt -> do - nextRow stmt >>= \case - Just (Only version) -> - pure version - _unexpectedVersion -> - throwIO UnexpectedUserVersion - -runMigrations :: Tracer IO TraceDatabase -> Connection -> MigrationRevision -> IO () -runMigrations tr conn currentVersion = do - let missingMigrations = drop currentVersion migrations - traceWith tr (DatabaseCurrentVersion currentVersion) - if null missingMigrations then - traceWith tr DatabaseNoMigrationNeeded - else do - let targetVersion = currentVersion + length missingMigrations - traceWith tr $ DatabaseRunningMigration currentVersion targetVersion - executeMigrations missingMigrations - where - executeMigrations = \case - [] -> do - pure () - (instructions):rest -> do - void $ withTransaction conn ReadWrite $ traverse (execute_ conn) instructions - executeMigrations rest - -migrations :: [Migration] -migrations = - [ mkMigration ix (decodeUtf8 migration) - | (ix, migration) <- zip - [1..] - [ $(embedFile "db/v1.0.0-beta/001.sql") - , $(embedFile "db/v1.0.0/001.sql") - , $(embedFile "db/v1.0.0/002.sql") - , $(embedFile "db/v1.0.1/001.sql") - , $(embedFile "db/v2.0.0-beta/001.sql") - , $(embedFile "db/v2.1.0/001.sql") - , $(embedFile "db/v2.1.0/002.sql") - , $(embedFile "db/v2.1.0/003.sql") - , $(embedFile "db/v2.2.0/001.sql") - ] - ] +insertRow + :: forall tableName r. + (KnownSymbol tableName, PG.ToRow r) + => Connection + -> Int + -> r + -> IO () +insertRow conn len row = insertRows @tableName conn len [row] + +insertRows + :: forall tableName r t. + (KnownSymbol tableName, PG.ToRow r, Foldable t) + => Connection + -> Int + -> t r + -> IO () +insertRows conn len rows = + void $ executeMany conn (fromString qry) (toList rows) + where + qry = + "INSERT OR IGNORE INTO " + <> symbolVal (Proxy @tableName) + <> " VALUES " + <> mkValuePlaceholders len + +mkValuePlaceholders :: Int -> String +mkValuePlaceholders n = + fromString $ "(" <> intercalate "," (replicate n "?") <> ")" +{-# INLINABLE mkValuePlaceholders #-} + +-- See comments in `Kupo.App.Database.SQLite` to see why the +-- postgresql-simple/sqlite-simple `withTransaction` is insufficient +withTransaction :: Connection -> IO a -> IO a +withTransaction conn action = + mask $ \restore -> do + _ <- execute_ conn "BEGIN TRANSACTION" + r <- restore action `onException` rollback + _ <- commit `onException` rollback + return r where - mkMigration :: Int -> Text -> Migration - mkMigration i sql = - ("PRAGMA user_version = " <> show i <> ";") - : (fmap Query . filter (not . T.null . T.strip) . T.splitOn ";") sql + commit = execute_ conn "COMMIT TRANSACTION" + rollback = execute_ conn "ROLLBACK TRANSACTION" + +fromText :: Text -> Query +fromText = fromString . T.unpack -- --- Exceptions +-- Exceptions & Tracing -- --- | Somehow, a 'PRAGMA user_version' didn't yield a number but, either nothing --- or something else? -data UnexpectedUserVersionException - = UnexpectedUserVersion - deriving Show -instance Exception UnexpectedUserVersionException - --- | Something went wrong when unmarshalling data from the database. -data UnexpectedRowException - = UnexpectedRow !Text ![[SQLData]] - deriving Show -instance Exception UnexpectedRowException - traceExecute - :: ToRow q + :: PG.ToRow q => Tracer IO TraceConnection -> Connection -> Query -> q - -> IO () + -> IO Int traceExecute tr conn template qs = do traceWith tr $ ConnectionBeginQuery (trim template) - execute conn template qs + n <- execute conn template qs traceWith tr $ ConnectionExitQuery (trim template) + return $ fromIntegral n traceExecute_ :: Tracer IO TraceConnection -> Connection -> Query - -> IO () + -> IO Int traceExecute_ tr conn template = do traceWith tr $ ConnectionBeginQuery (trim template) - execute_ conn template + n <- execute_ conn template traceWith tr $ ConnectionExitQuery (trim template) + return $ fromIntegral n trim :: Query -> LText trim = @@ -1322,4 +807,19 @@ trim = ) (False, mempty) . - fromQuery + fromString + . + show + +-- | Something went wrong when unmarshalling data from the database. +data UnexpectedRowException + = UnexpectedRow !Text !PG.ResultError + deriving Show +instance Exception UnexpectedRowException + +data ExpectedSingletonResultException + = ExpectedSingletonResult + { context :: !Text + , received :: !Int + } deriving Show +instance Exception ExpectedSingletonResultException diff --git a/src/Kupo/App/Database/SQLite.hs b/src/Kupo/App/Database/SQLite.hs index 5059bfb..fec90dc 100644 --- a/src/Kupo/App/Database/SQLite.hs +++ b/src/Kupo/App/Database/SQLite.hs @@ -9,8 +9,7 @@ {-# LANGUAGE TemplateHaskell #-} module Kupo.App.Database.SQLite - ( - + ( -- // TODO: Fix documentation headers -- ** Queries -- *** Inputs deleteInputsQry @@ -38,6 +37,7 @@ module Kupo.App.Database.SQLite , copyDatabase -- * Internal + , Connection , installIndexes , installIndex @@ -216,13 +216,12 @@ newDatabaseFile tr = \case Configuration.Dir dir -> OnDisk <$> newDatabaseOnDiskFile tr (traceWith tr . DatabaseCreateNew) dir Configuration.Remote url -> liftIO $ do - traceWith tr $ DatabaseMustBeLocal - { errorMessage = - "This binary was compiled to use SQLite. \ - \You must specify either a working directory or in-memory configuration. \ - \Using a remote URL is only allowed on binaries compiled to use PostgreSQL." - } - throwIO (FailedToAccessOrCreateDatabaseFile $ RemoteURLSpecifiedForSQLite url) + traceWith tr $ DatabaseLocationInvalid + { errorMessage = "This binary was compiled to use SQLite. \ + \You must specify either a working directory or in-memory configuration. \ + \Using a remote URL is only allowed on binaries compiled to use PostgreSQL." + } + throwIO (FailedToAccessOrCreateDatabaseFile $ RemoteURLSpecifiedForSQLite url) newDatabaseOnDiskFile :: (MonadIO m) @@ -629,11 +628,11 @@ mkDatabase tr mode longestRollback bracketConnection = Database mapM_ (execute_ conn . deleteInputsQry) refs , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> do - withTotalChanges conn $ - forM_ refs $ \ref -> do - execute conn (markInputsQry ref) - [ SQLInteger slotNo - ] + withTotalChanges conn $ + forM_ refs $ \ref -> do + execute conn (markInputsQry ref) + [ SQLInteger slotNo + ] , pruneInputs = ReaderT $ \conn -> do withTemporaryIndex tr conn "inputsBySpentAt" "inputs(spent_at)" $ do diff --git a/src/Kupo/App/Database/Types.hs b/src/Kupo/App/Database/Types.hs index aa7cd19..536bd4e 100644 --- a/src/Kupo/App/Database/Types.hs +++ b/src/Kupo/App/Database/Types.hs @@ -20,13 +20,21 @@ module Kupo.App.Database.Types import Kupo.Prelude +import Control.Monad.Fail + () import Data.Severity ( HasSeverityAnnotation , Severity (..) ) +#if postgres +import Database.PostgreSQL.Simple + ( Connection + ) +#else import Database.SQLite.Simple ( Connection ) +#endif import Kupo.Control.MonadLog ( HasSeverityAnnotation (..) ) @@ -59,9 +67,6 @@ import Kupo.Data.Pattern , Result ) -import Control.Monad.Fail - () - import qualified Kupo.Data.Database as DB data ConnectionType = ReadOnly | ReadWrite | WriteOnly @@ -162,7 +167,7 @@ data Database (m :: Type -> Type) = Database -> DBTransaction m (Maybe Point) , optimize - :: DBTransaction m () + :: DBTransaction m () , runTransaction :: forall a. () @@ -243,7 +248,7 @@ data TraceDatabase where DatabasePathMustBeDirectory :: { hint :: Text } -> TraceDatabase - DatabaseMustBeLocal + DatabaseLocationInvalid :: { errorMessage :: Text } -> TraceDatabase DatabaseCloneSourceDatabase @@ -282,7 +287,7 @@ instance HasSeverityAnnotation TraceDatabase where DatabaseCreateIndex{} -> Notice DatabaseIndexAlreadyExists{} -> Debug DatabasePathMustBeDirectory{} -> Error - DatabaseMustBeLocal{} -> Error + DatabaseLocationInvalid{} -> Error DatabaseDeferIndexes{} -> Warning DatabaseCloneSourceDatabase{} -> Notice DatabaseCleanupOldData{} -> Info @@ -299,6 +304,10 @@ data TraceConnection where ConnectionDestroyShortLived :: { mode :: ConnectionType } -> TraceConnection + ConnectionCreateGeneric + :: TraceConnection + ConnectionDestroyGeneric + :: TraceConnection ConnectionLocked :: { attempts :: Word, retryingIn :: DiffTime } -> TraceConnection @@ -339,6 +348,8 @@ instance HasSeverityAnnotation TraceConnection where getSeverityAnnotation = \case ConnectionCreateShortLived{} -> Debug ConnectionDestroyShortLived{} -> Debug + ConnectionCreateGeneric{} -> Debug + ConnectionDestroyGeneric{} -> Debug ConnectionLocked{attempts, retryingIn} | retryingIn * fromIntegral attempts > 60 -> Warning ConnectionLocked{} -> Debug diff --git a/test/Test/Kupo/Data/DatabaseSpec.hs b/test/Test/Kupo/Data/DatabaseSpec.hs index 246d95c..5c1bc62 100644 --- a/test/Test/Kupo/Data/DatabaseSpec.hs +++ b/test/Test/Kupo/Data/DatabaseSpec.hs @@ -2,6 +2,7 @@ -- License, v. 2.0. If a copy of the MPL was not distributed with this -- file, You can obtain one at http://mozilla.org/MPL/2.0/. +{-# LANGUAGE CPP #-} {-# LANGUAGE RecordWildCards #-} module Test.Kupo.Data.DatabaseSpec @@ -126,7 +127,11 @@ import Test.Hspec , Spec , around , context + , describe + , hspec + , it , parallel + , pendingWith , shouldBe , specify ) @@ -191,6 +196,14 @@ import qualified Data.Set as Set import qualified Data.Text as T import qualified Prelude +#if postgres +spec :: Spec +spec = describe "DatabaseSpec" $ + it "Not yet implemented for Postgres" $ + pendingWith $ "DB tests need to be split into DB-agnostic " + <> "and DB-specific tests and conditionally compiled" +#else + spec :: Spec spec = parallel $ do context "fromRow ↔ toRow" $ do @@ -1313,18 +1326,18 @@ explainQuery conn query = do withFixtureDatabase :: (Connection -> IO ()) -> IO () withFixtureDatabase action = withConnection ":memory:" $ \conn -> do withTransaction conn $ do - execute_ conn - "CREATE TABLE IF NOT EXISTS inputs (\ - \ address TEXT NOT NULL,\ - \ payment_credential TEXT NOT NULL GENERATED ALWAYS AS (substr(address, -56)) VIRTUAL,\ - \ ext_output_reference BLOB NOT NULL,\ - \ output_reference BLOB NOT NULL GENERATED ALWAYS AS (substr(ext_output_reference, 1, 34)) VIRTUAL\ - \)" - execute_ conn - "CREATE TABLE IF NOT EXISTS policies (\ - \ output_reference BLOB NOT NULL,\ - \ policy_id BLOB NOT NULL\ - \)" + execute_ conn $ + "CREATE TABLE IF NOT EXISTS inputs (" + <> " address TEXT NOT NULL," + <> " payment_credential TEXT NOT NULL GENERATED ALWAYS AS (substr(address, -56)) VIRTUAL," + <> " ext_output_reference BLOB NOT NULL," + <> " output_reference BLOB NOT NULL GENERATED ALWAYS AS (substr(ext_output_reference, 1, 34)) VIRTUAL" + <> ")" + execute_ conn $ + "CREATE TABLE IF NOT EXISTS policies (" + <> " output_reference BLOB NOT NULL," + <> " policy_id BLOB NOT NULL" + <> ")" executeMany conn "INSERT INTO inputs VALUES (?, ?)" $ flip map matches $ \(outRef, out) -> ( SQLText (addressToRow (getAddress out)) @@ -1383,3 +1396,5 @@ forAllCheckpoints k = forAllShow (genPointsBetween (0, SlotNo (10 * k))) (show . fmap getPointSlotNo) + +#endif From c62c037f6396f1c7c4b3aa8f34262b10661f745c Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Mon, 10 Jun 2024 14:24:22 -0700 Subject: [PATCH 2/9] Port migrations to PostgreSQL --- db/postgres/v1.0.0-beta/001.sql | 14 +++ db/postgres/v1.0.0/001.sql | 13 ++ db/{ => postgres}/v1.0.0/002.sql | 0 db/postgres/v2.0.0-beta/001.sql | 26 ++++ db/{ => postgres}/v2.1.0/001.sql | 0 db/postgres/v2.1.0/002.sql | 1 + db/postgres/v2.1.0/003.sql | 17 +++ db/postgres/v2.2.0/001.sql | 12 ++ db/{ => sqlite}/v1.0.0-beta/001.sql | 0 db/{ => sqlite}/v1.0.0/001.sql | 0 db/sqlite/v1.0.0/002.sql | 4 + db/{ => sqlite}/v1.0.1/001.sql | 0 db/{ => sqlite}/v2.0.0-beta/001.sql | 2 +- db/sqlite/v2.1.0/001.sql | 4 + db/{ => sqlite}/v2.1.0/002.sql | 0 db/{ => sqlite}/v2.1.0/003.sql | 0 db/{ => sqlite}/v2.2.0/001.sql | 0 kupo.cabal | 4 +- src/Kupo/App/Database/Postgres.hs | 183 ++++++++++++++++++++-------- src/Kupo/App/Database/SQLite.hs | 18 +-- 20 files changed, 237 insertions(+), 61 deletions(-) create mode 100644 db/postgres/v1.0.0-beta/001.sql create mode 100644 db/postgres/v1.0.0/001.sql rename db/{ => postgres}/v1.0.0/002.sql (100%) create mode 100644 db/postgres/v2.0.0-beta/001.sql rename db/{ => postgres}/v2.1.0/001.sql (100%) create mode 100644 db/postgres/v2.1.0/002.sql create mode 100644 db/postgres/v2.1.0/003.sql create mode 100644 db/postgres/v2.2.0/001.sql rename db/{ => sqlite}/v1.0.0-beta/001.sql (100%) rename db/{ => sqlite}/v1.0.0/001.sql (100%) create mode 100644 db/sqlite/v1.0.0/002.sql rename db/{ => sqlite}/v1.0.1/001.sql (100%) rename db/{ => sqlite}/v2.0.0-beta/001.sql (92%) create mode 100644 db/sqlite/v2.1.0/001.sql rename db/{ => sqlite}/v2.1.0/002.sql (100%) rename db/{ => sqlite}/v2.1.0/003.sql (100%) rename db/{ => sqlite}/v2.2.0/001.sql (100%) diff --git a/db/postgres/v1.0.0-beta/001.sql b/db/postgres/v1.0.0-beta/001.sql new file mode 100644 index 0000000..3c47d52 --- /dev/null +++ b/db/postgres/v1.0.0-beta/001.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS inputs ( + output_reference BYTEA NOT NULL, + address TEXT NOT NULL, + value BYTEA NOT NULL, + datum_hash BYTEA, + slot_no INTEGER NOT NULL, + PRIMARY KEY (output_reference) +); + +CREATE TABLE IF NOT EXISTS checkpoints ( + header_hash BYTEA NOT NULL, + slot_no INTEGER NOT NULL, + PRIMARY KEY (slot_no) +); diff --git a/db/postgres/v1.0.0/001.sql b/db/postgres/v1.0.0/001.sql new file mode 100644 index 0000000..4fbfa0d --- /dev/null +++ b/db/postgres/v1.0.0/001.sql @@ -0,0 +1,13 @@ +DROP TABLE inputs; + +CREATE TABLE IF NOT EXISTS inputs ( + output_reference BYTEA NOT NULL, + address TEXT NOT NULL, + value BYTEA NOT NULL, + datum_hash BYTEA, + header_hash BYTEA NOT NULL, + slot_no INTEGER NOT NULL, + PRIMARY KEY (output_reference) +); + +DELETE FROM checkpoints; diff --git a/db/v1.0.0/002.sql b/db/postgres/v1.0.0/002.sql similarity index 100% rename from db/v1.0.0/002.sql rename to db/postgres/v1.0.0/002.sql diff --git a/db/postgres/v2.0.0-beta/001.sql b/db/postgres/v2.0.0-beta/001.sql new file mode 100644 index 0000000..442ab9b --- /dev/null +++ b/db/postgres/v2.0.0-beta/001.sql @@ -0,0 +1,26 @@ +DROP TABLE inputs; + +CREATE TABLE IF NOT EXISTS inputs ( + output_reference BYTEA NOT NULL, + address TEXT NOT NULL, + value BYTEA NOT NULL, + datum_hash BYTEA, + script_hash BYTEA, + created_at INTEGER NOT NULL, + spent_at INTEGER, + PRIMARY KEY (output_reference) +); + +DELETE FROM checkpoints; + +CREATE TABLE IF NOT EXISTS binary_data ( + binary_data_hash BYTEA NOT NULL, + binary_data BYTEA NOT NULL, + PRIMARY KEY (binary_data_hash) +); + +CREATE TABLE IF NOT EXISTS scripts ( + script_hash BYTEA NOT NULL, + script BYTEA NOT NULL, + PRIMARY KEY (script_hash) +); diff --git a/db/v2.1.0/001.sql b/db/postgres/v2.1.0/001.sql similarity index 100% rename from db/v2.1.0/001.sql rename to db/postgres/v2.1.0/001.sql diff --git a/db/postgres/v2.1.0/002.sql b/db/postgres/v2.1.0/002.sql new file mode 100644 index 0000000..5edd502 --- /dev/null +++ b/db/postgres/v2.1.0/002.sql @@ -0,0 +1 @@ +ALTER TABLE inputs ADD COLUMN payment_credential TEXT GENERATED ALWAYS AS (substr(address, -56)) STORED; diff --git a/db/postgres/v2.1.0/003.sql b/db/postgres/v2.1.0/003.sql new file mode 100644 index 0000000..065e3e5 --- /dev/null +++ b/db/postgres/v2.1.0/003.sql @@ -0,0 +1,17 @@ +ALTER TABLE inputs RENAME COLUMN output_reference TO ext_output_reference; + +ALTER TABLE inputs ADD COLUMN output_reference BYTEA NOT NULL GENERATED ALWAYS AS (substr(ext_output_reference, 1, 34)) STORED; +ALTER TABLE inputs ADD COLUMN output_index BYTEA NOT NULL GENERATED ALWAYS AS (substr(ext_output_reference, -4, 2)) STORED; +ALTER TABLE inputs ADD COLUMN transaction_index BYTEA NOT NULL GENERATED ALWAYS AS (substr(ext_output_reference, -2)) STORED; + +CREATE UNIQUE INDEX IF NOT EXISTS inputsByOutputReference ON inputs(output_reference); + +CREATE TABLE IF NOT EXISTS policies ( + output_reference BYTEA NOT NULL, + policy_id BYTEA NOT NULL, + PRIMARY KEY (output_reference, policy_id), + CONSTRAINT fk_policies_inputs + FOREIGN KEY (output_reference) + REFERENCES inputs(output_reference) + ON DELETE CASCADE +); diff --git a/db/postgres/v2.2.0/001.sql b/db/postgres/v2.2.0/001.sql new file mode 100644 index 0000000..92d2478 --- /dev/null +++ b/db/postgres/v2.2.0/001.sql @@ -0,0 +1,12 @@ +DELETE FROM checkpoints; +DELETE FROM policies; +DELETE FROM inputs; + +DROP INDEX IF EXISTS inputsByCreatedAt; +DROP INDEX IF EXISTS inputsBySpentAt; +DROP INDEX IF EXISTS inputsByDatumHash; +DROP INDEX IF EXISTS inputsByAddress; +DROP INDEX IF EXISTS inputsByPaymentCredential; + +ALTER TABLE inputs RENAME COLUMN datum_hash TO datum_info; +ALTER TABLE inputs ADD COLUMN datum_hash BYTEA GENERATED ALWAYS AS (substr(datum_info, 2)) STORED; diff --git a/db/v1.0.0-beta/001.sql b/db/sqlite/v1.0.0-beta/001.sql similarity index 100% rename from db/v1.0.0-beta/001.sql rename to db/sqlite/v1.0.0-beta/001.sql diff --git a/db/v1.0.0/001.sql b/db/sqlite/v1.0.0/001.sql similarity index 100% rename from db/v1.0.0/001.sql rename to db/sqlite/v1.0.0/001.sql diff --git a/db/sqlite/v1.0.0/002.sql b/db/sqlite/v1.0.0/002.sql new file mode 100644 index 0000000..3fe492e --- /dev/null +++ b/db/sqlite/v1.0.0/002.sql @@ -0,0 +1,4 @@ +CREATE TABLE IF NOT EXISTS patterns ( + pattern TEXT NOT NULL, + PRIMARY KEY (pattern) +); diff --git a/db/v1.0.1/001.sql b/db/sqlite/v1.0.1/001.sql similarity index 100% rename from db/v1.0.1/001.sql rename to db/sqlite/v1.0.1/001.sql diff --git a/db/v2.0.0-beta/001.sql b/db/sqlite/v2.0.0-beta/001.sql similarity index 92% rename from db/v2.0.0-beta/001.sql rename to db/sqlite/v2.0.0-beta/001.sql index 23763b3..4a2cb44 100644 --- a/db/v2.0.0-beta/001.sql +++ b/db/sqlite/v2.0.0-beta/001.sql @@ -2,7 +2,7 @@ DROP TABLE inputs; CREATE TABLE IF NOT EXISTS inputs ( output_reference BLOB NOT NULL, - address TEXT COLLATE NOCASE NOT NULL, + address TEXT NOT NULL, value BLOB NOT NULL, datum_hash BLOB, script_hash BLOB, diff --git a/db/sqlite/v2.1.0/001.sql b/db/sqlite/v2.1.0/001.sql new file mode 100644 index 0000000..602bbb5 --- /dev/null +++ b/db/sqlite/v2.1.0/001.sql @@ -0,0 +1,4 @@ +DELETE FROM inputs; +DELETE FROM checkpoints; +DELETE FROM binary_data; +DELETE FROM scripts; diff --git a/db/v2.1.0/002.sql b/db/sqlite/v2.1.0/002.sql similarity index 100% rename from db/v2.1.0/002.sql rename to db/sqlite/v2.1.0/002.sql diff --git a/db/v2.1.0/003.sql b/db/sqlite/v2.1.0/003.sql similarity index 100% rename from db/v2.1.0/003.sql rename to db/sqlite/v2.1.0/003.sql diff --git a/db/v2.2.0/001.sql b/db/sqlite/v2.2.0/001.sql similarity index 100% rename from db/v2.2.0/001.sql rename to db/sqlite/v2.2.0/001.sql diff --git a/kupo.cabal b/kupo.cabal index 28a3187..5562cc6 100644 --- a/kupo.cabal +++ b/kupo.cabal @@ -233,8 +233,8 @@ library , websockets , websockets-json , yaml - else - build-depends: + else + build-depends: aeson , attoparsec-aeson , base >=4.7 && <5 diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index a7d393e..79823a2 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -6,6 +6,7 @@ {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE TemplateHaskell #-} module Kupo.App.Database.Postgres ( @@ -46,9 +47,14 @@ module Kupo.App.Database.Postgres import Kupo.Prelude +import Control.Concurrent + ( getNumCapabilities + ) import Control.Exception ( IOException , handle + , mask + , onException , throwIO ) import Control.Monad @@ -59,9 +65,22 @@ import Control.Tracer , traceWith ) import qualified Data.Char as Char +import Data.FileEmbed + ( embedFile + ) +import Data.Pool + ( Pool + , defaultPoolConfig + , destroyAllResources + , newPool + , tryWithResource + , withResource + ) import qualified Data.Set as Set ( map ) +import qualified Data.Text as T +import qualified Data.Text.Lazy.Builder as TL import Database.PostgreSQL.Simple ( Connection , Only (..) @@ -77,6 +96,19 @@ import GHC.TypeLits ( KnownSymbol , symbolVal ) +import Kupo.App.Database.Types + ( ConnectionType (..) + , DBPool (..) + , Database (..) + , TraceConnection (..) + , TraceDatabase (..) + ) +import Kupo.Control.MonadLog + ( TraceProgress (..) + ) +import Kupo.Control.MonadThrow + ( bracket + ) import Kupo.Data.Cardano ( SlotNo (..) , slotNoToText @@ -86,10 +118,12 @@ import Kupo.Data.Configuration , LongestRollback (..) , pruneInputsMaxIncrement ) +import qualified Kupo.Data.Configuration as Configuration import Kupo.Data.Database ( SortDirection (..) , patternToSql ) +import qualified Kupo.Data.Database as DB import Kupo.Data.Http.SlotRange ( Range (..) , RangeField (..) @@ -104,32 +138,7 @@ import Kupo.Data.Pattern import Numeric ( Floating (..) ) - -import Control.Concurrent - ( getNumCapabilities - ) -import Data.Pool - ( Pool - , defaultPoolConfig - , destroyAllResources - , newPool - , tryWithResource - , withResource - ) -import qualified Data.Text as T -import qualified Data.Text.Lazy.Builder as TL -import Kupo.App.Database.Types - ( ConnectionType (..) - , DBPool (..) - , Database (..) - , TraceConnection (..) - , TraceDatabase (..) - ) -import Kupo.Control.MonadLog - ( TraceProgress (..) - ) -import qualified Kupo.Data.Configuration as Configuration -import qualified Kupo.Data.Database as DB +import qualified Text.URI as URI data FailedToCreateConnection = FailedToCreateConnection { reason :: FailedToCreateConnectionReason } deriving (Show) @@ -150,10 +159,13 @@ newDBPool tr isReadOnly dbLocation longestRollback = do (5*) <$> liftIO getNumCapabilities connectionPool <- liftIO $ newPool $ defaultPoolConfig - mkConnection + connectDb (\Database{close} -> close) 600 -- // TODO: Review concurrency requirements maxConnections + + -- // TODO: Will running migrations here break things in a multi-client scenario? + bracket mkConnection PG.close $ runMigrations tr let withDB :: forall a b. (Pool (Database IO) -> (Database IO -> IO a) -> IO b) -> ConnectionType -> (Database IO -> IO a) -> IO b @@ -172,25 +184,25 @@ newDBPool tr isReadOnly dbLocation longestRollback = do return DBPool { tryWithDatabase = withDB tryWithResource, withDatabaseBlocking = withDB withResource, withDatabaseExclusiveWriter, maxConcurrentReaders = 0, maxConcurrentWriters = maxConnections, destroyResources } where - mkConnection = case dbLocation of - Configuration.Remote uri -> do - traceWith tr $ DatabaseConnection ConnectionCreateGeneric - conn <- PG.connectPostgreSQL (encodeUtf8 $ render uri) - return $ mkDatabase trConn longestRollback (\dbAction -> dbAction conn) - Configuration.Dir dir -> liftIO $ do - traceLocationError - throwIO (FailedToCreateConnection $ SQLiteDirSpecified dir) - Configuration.InMemory path -> liftIO $ do - traceLocationError - throwIO (FailedToCreateConnection $ SQLiteInMemorySpecified path) - - where - trConn :: Tracer IO TraceConnection - trConn = contramap DatabaseConnection tr - - traceLocationError = traceWith tr $ DatabaseLocationInvalid - { errorMessage = "This binary was compiled to use PostgreSQL and requires a Postgres connection URI. \ - \Local file paths and in-memory configurations are only valid for binaries compiled for SQLite." } + connectDb = mkConnection <&> \conn -> mkDatabase trConn longestRollback (\dbAction -> dbAction conn) + + mkConnection = case dbLocation of + Configuration.Remote uri -> do + traceWith tr $ DatabaseConnection ConnectionCreateGeneric + PG.connectPostgreSQL . encodeUtf8 $ URI.render uri + Configuration.Dir dir -> liftIO $ do + traceLocationError + throwIO (FailedToCreateConnection $ SQLiteDirSpecified dir) + Configuration.InMemory path -> liftIO $ do + traceLocationError + throwIO (FailedToCreateConnection $ SQLiteInMemorySpecified path) + + trConn :: Tracer IO TraceConnection + trConn = contramap DatabaseConnection tr + + traceLocationError = traceWith tr $ DatabaseLocationInvalid + { errorMessage = "This binary was compiled to use PostgreSQL and requires a Postgres connection URI. \ + \Local file paths and in-memory configurations are only valid for binaries compiled for SQLite." } -- Copy from an existing database into another, using the provided patterns @@ -656,13 +668,13 @@ installIndexes tr conn = \case InstallIndexesIfNotExist -> do installIndex tr conn "inputsByAddress" - "inputs(address COLLATE NOCASE)" + "inputs(address)" -- // TODO: I deleted the collate nocase clause installIndex tr conn "inputsByDatumHash" "inputs(datum_hash)" installIndex tr conn "inputsByPaymentCredential" - "inputs(payment_credential COLLATE NOCASE)" + "inputs(payment_credential)" -- // TODO: I deleted the nocase clause installIndex tr conn "inputsByCreatedAt" "inputs(created_at)" @@ -712,6 +724,77 @@ dropIndexIfExists tr conn indexName wasTemporary = do else ConnectionRemoveIndex{indexName} void $ execute conn "DROP INDEX IF EXISTS ?" [indexName] +-- +-- Migrations +-- + +type MigrationRevision = Int + +type Migration = [Query] + +databaseVersion :: Connection -> IO MigrationRevision +databaseVersion conn = do + _ <- execute_ conn createStatement + (throwIO . UnexpectedRow "databaseVersion") `handle` + query_ conn countStatement >>= \case + [(revision, _version :: String)] -> return revision + [] -> return 0 + (length -> n) -> throwIO $ ExpectedSingletonResult "databaseVersion" n + where + createStatement = + "CREATE TABLE IF NOT EXISTS migrations \ + \(\ + \id INTEGER PRIMARY KEY, \ + \version VARCHAR (50) \ + \);" + + countStatement = + "SELECT id, version FROM migrations ORDER BY id DESC LIMIT 1;" + +-- // TODO: Should there be a command line argument to determine whether or not to run migrations? +-- How will running migrations affect a DB that supports multiple Kupo instances? +runMigrations :: Tracer IO TraceDatabase -> Connection -> IO () +runMigrations tr conn = do + currentVersion <- databaseVersion conn + let missingMigrations = drop currentVersion migrations + traceWith tr (DatabaseCurrentVersion currentVersion) + if null missingMigrations then + traceWith tr DatabaseNoMigrationNeeded + else do + let targetVersion = currentVersion + length missingMigrations + traceWith tr $ DatabaseRunningMigration currentVersion targetVersion + executeMigrations missingMigrations + where + executeMigrations = \case + [] -> do + pure () + (instructions):rest -> do + withTransaction conn $ mapM_ (execute_ conn) instructions + executeMigrations rest + +migrations :: [Migration] +migrations = + map + (\(idx, (sql, fromString -> version)) -> mkMigration idx version $ decodeUtf8 sql) + $ zip [1..] files + where + mkMigration :: Int -> Query -> Text -> Migration + mkMigration idx version sql = + ("INSERT INTO migrations (id, version) VALUES (" <> show idx <> ",'" <> version <> "');") + : (map (fromString . T.unpack) . filter (not . T.null . T.strip) . T.splitOn ";") sql + + files = + [ ($(embedFile "db/postgres/v1.0.0-beta/001.sql"), "v1.0.0.-beta/001.sql") + , ($(embedFile "db/postgres/v1.0.0/001.sql"), "v1.0.0/001.sql") + , ($(embedFile "db/postgres/v1.0.0/002.sql"), "v1.0.0/002.sql") + -- // TODO: Confirm this is ok to delete!, ($(embedFile "db/postgres/v1.0.1/001.sql"), "v1.0.1/001.sql") + , ($(embedFile "db/postgres/v2.0.0-beta/001.sql"), "v2.0.0-beta/001.sql") + , ($(embedFile "db/postgres/v2.1.0/001.sql"), "v2.1.0/001.sql") + , ($(embedFile "db/postgres/v2.1.0/002.sql"), "v2.1.0/002.sql") + , ($(embedFile "db/postgres/v2.1.0/003.sql"), "v2.1.0/003.sql") + , ($(embedFile "db/postgres/v2.2.0/001.sql"), "v2.2.0/001.sql") + ] + -- -- Helpers -- @@ -736,16 +819,18 @@ insertRows conn len rows = void $ executeMany conn (fromString qry) (toList rows) where qry = - "INSERT OR IGNORE INTO " + "INSERT INTO " <> symbolVal (Proxy @tableName) <> " VALUES " <> mkValuePlaceholders len + <> "ON CONFLICT DO NOTHING" mkValuePlaceholders :: Int -> String mkValuePlaceholders n = fromString $ "(" <> intercalate "," (replicate n "?") <> ")" {-# INLINABLE mkValuePlaceholders #-} + -- See comments in `Kupo.App.Database.SQLite` to see why the -- postgresql-simple/sqlite-simple `withTransaction` is insufficient withTransaction :: Connection -> IO a -> IO a diff --git a/src/Kupo/App/Database/SQLite.hs b/src/Kupo/App/Database/SQLite.hs index fec90dc..eb630ee 100644 --- a/src/Kupo/App/Database/SQLite.hs +++ b/src/Kupo/App/Database/SQLite.hs @@ -1248,15 +1248,15 @@ migrations = [ mkMigration ix (decodeUtf8 migration) | (ix, migration) <- zip [1..] - [ $(embedFile "db/v1.0.0-beta/001.sql") - , $(embedFile "db/v1.0.0/001.sql") - , $(embedFile "db/v1.0.0/002.sql") - , $(embedFile "db/v1.0.1/001.sql") - , $(embedFile "db/v2.0.0-beta/001.sql") - , $(embedFile "db/v2.1.0/001.sql") - , $(embedFile "db/v2.1.0/002.sql") - , $(embedFile "db/v2.1.0/003.sql") - , $(embedFile "db/v2.2.0/001.sql") + [ $(embedFile "db/sqlite/v1.0.0-beta/001.sql") + , $(embedFile "db/sqlite/v1.0.0/001.sql") + , $(embedFile "db/sqlite/v1.0.0/002.sql") + , $(embedFile "db/sqlite/v1.0.1/001.sql") + , $(embedFile "db/sqlite/v2.0.0-beta/001.sql") + , $(embedFile "db/sqlite/v2.1.0/001.sql") + , $(embedFile "db/sqlite/v2.1.0/002.sql") + , $(embedFile "db/sqlite/v2.1.0/003.sql") + , $(embedFile "db/sqlite/v2.2.0/001.sql") ] ] where From b32f1c7624df57606b822c5febff44378e488198 Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 11 Jun 2024 15:20:42 -0700 Subject: [PATCH 3/9] Translate SQL commands in PG module to PG dialect Additionally, provide a function to `Data/Database` to convert a ByteString into a bytea/BLOB literal depending on the calling context. --- src/Kupo/App/Database/Postgres.hs | 344 ++++++++++++++++------------ src/Kupo/App/Database/SQLite.hs | 43 +++- src/Kupo/Data/Database.hs | 17 +- test/Test/Kupo/Data/DatabaseSpec.hs | 3 +- 4 files changed, 240 insertions(+), 167 deletions(-) diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index 79823a2..fb433dc 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -4,6 +4,7 @@ {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE DuplicateRecordFields #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TemplateHaskell #-} @@ -82,9 +83,11 @@ import qualified Data.Set as Set import qualified Data.Text as T import qualified Data.Text.Lazy.Builder as TL import Database.PostgreSQL.Simple - ( Connection + ( Binary (..) + , Connection , Only (..) , Query + , SqlError (..) , execute , executeMany , execute_ @@ -92,6 +95,9 @@ import Database.PostgreSQL.Simple , query_ ) import qualified Database.PostgreSQL.Simple as PG +import Database.PostgreSQL.Simple.Types + ( Identifier (..) + ) import GHC.TypeLits ( KnownSymbol , symbolVal @@ -99,6 +105,7 @@ import GHC.TypeLits import Kupo.App.Database.Types ( ConnectionType (..) , DBPool (..) + , DBTransaction , Database (..) , TraceConnection (..) , TraceDatabase (..) @@ -163,7 +170,7 @@ newDBPool tr isReadOnly dbLocation longestRollback = do (\Database{close} -> close) 600 -- // TODO: Review concurrency requirements maxConnections - + -- // TODO: Will running migrations here break things in a multi-client scenario? bracket mkConnection PG.close $ runMigrations tr @@ -176,7 +183,7 @@ newDBPool tr isReadOnly dbLocation longestRollback = do _ -> withRes connectionPool dbAction -- // TODO: Acutally do something with defer indexes! And possibly actually provide a preferred connection? - withDatabaseExclusiveWriter :: DeferIndexesInstallation -> (Database IO -> IO a) -> (IO a) + withDatabaseExclusiveWriter :: DeferIndexesInstallation -> (Database IO -> IO a) -> IO a destroyResources = destroyAllResources connectionPool withDatabaseExclusiveWriter _deferIndexes = withResource connectionPool @@ -230,47 +237,55 @@ mkDatabase -> (forall a. (Connection -> IO a) -> IO a) -> Database IO mkDatabase tr longestRollback bracketConnection = Database - { insertInputs = \inputs -> ReaderT $ \conn -> do - mapM_ (\DB.Input{..} -> do - insertRow @"inputs" conn 7 - ( extendedOutputReference - , address - , value - , datumInfo - , refScriptHash - , (fromIntegral createdAtSlotNo :: Int64) - , spentAtSlotNo + { insertInputs = \inputs -> ReaderT $ \conn -> + handle (throwIO . DatabaseException "insertInptus" ) $ do + mapM_ (\DB.Input{..} -> do + insertRow @"inputs" conn 7 + ( extendedOutputReference + , address + , value + , datumInfo + , refScriptHash + , (fromIntegral createdAtSlotNo :: Int64) + , spentAtSlotNo + ) + case datum of + Nothing -> + pure () + Just DB.BinaryData{..} -> + insertRow @"binary_data" conn 2 + ( binaryDataHash + , binaryData + ) + case refScript of + Nothing -> + pure () + Just DB.ScriptReference{..} -> + insertRow @"scripts" conn 2 + ( scriptHash + , script + ) ) - case datum of - Nothing -> - pure () - Just DB.BinaryData{..} -> - insertRow @"binary_data" conn 2 - ( binaryDataHash - , binaryData - ) - case refScript of - Nothing -> - pure () - Just DB.ScriptReference{..} -> - insertRow @"scripts" conn 2 - ( scriptHash - , script - ) - ) - inputs - - , deleteInputs = \refs -> ReaderT $ \conn -> do - withTotalChanges (\pattern -> execute_ conn (deleteInputsQry pattern)) refs - - , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> do - withTotalChanges (\ref -> - execute conn (markInputsQry ref) (Only (slotNo :: Int64))) - refs - - , pruneInputs = ReaderT $ \conn -> do - withTemporaryIndex tr conn "inputsBySpentAt" "inputs(spent_at)" $ do - traceExecute tr conn pruneInputsQry [ fromIntegral longestRollback :: Int64 ] + inputs + + , deleteInputs = \refs -> ReaderT $ \conn -> + (throwIO . DatabaseException "deleteInputs") `handle` do + withTotalChanges + (\pattern -> do + execute_ conn (markInputsQry pattern)) + refs + -- ^// TODO: Try to convert this to an `executeMany` call + + , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> + (throwIO . DatabaseException "markInputs") `handle` do + withTotalChanges (\pattern -> do + execute conn (markInputsQry pattern) $ Only (slotNo :: Int64)) + refs + + , pruneInputs = ReaderT $ \conn -> + (throwIO . DatabaseException "pruneInputs") `handle` do + withTemporaryIndex tr conn "inputsBySpentAt" "inputs" "spent_at" $ do + traceExecute tr conn pruneInputsQry [ fromIntegral longestRollback :: Int64 ] , foldInputs = \pattern_ slotRange statusFlag sortDirection yield -> ReaderT $ \conn -> do -- TODO: Allow resolving datums / scripts on demand through LEFT JOIN @@ -279,27 +294,27 @@ mkDatabase tr longestRollback bracketConnection = Database let (datum, refScript) = (Nothing, Nothing) (throwIO . UnexpectedRow "foldInputs") `handle` PG.forEach_ conn (foldInputsQry pattern_ slotRange statusFlag sortDirection) - (\(extendedOutputReference + (\(Binary extendedOutputReference , address - , value - , datumInfo - , refScriptHash + , Binary value + , fmap fromBinary -> datumInfo + , fmap fromBinary -> refScriptHash , ((fromIntegral :: Int64 -> Word64) -> createdAtSlotNo) - , createdAtHeaderHash + , Binary createdAtHeaderHash , (fmap (fromIntegral :: Int64 -> Word64) -> spentAtSlotNo) - , spentAtHeaderHash + , fmap fromBinary -> spentAtHeaderHash ) -> yield (DB.resultFromRow DB.Input{..})) , countInputs = \pattern_ -> ReaderT $ \conn -> do - handle (throwIO . UnexpectedRow "countInputs") $ + (throwIO . UnexpectedRow "countInputs") `handle` query_ conn (countInputsQry pattern_) >>= \case [Only n] -> pure n (length -> n) -> throwIO $ ExpectedSingletonResult "countInputs" n , countPolicies = \pattern_ -> ReaderT $ \conn -> do - handle (throwIO . UnexpectedRow "countPolicies") $ + (throwIO . UnexpectedRow "countPolicies") `handle` query_ conn (countPoliciesQry pattern_) >>= \case [Only n] -> pure n (length -> n) -> throwIO $ ExpectedSingletonResult "countPolicies" n @@ -310,17 +325,19 @@ mkDatabase tr longestRollback bracketConnection = Database PG.forEach_ conn (foldPoliciesQry pattern_) $ \(outputReference, policyId) -> yield DB.Policy{..} - , insertPolicies = \policies -> ReaderT $ \conn -> do - let - rows = flip Set.map policies $ \DB.Policy{..} -> - (outputReference, policyId) - insertRows @"policies" conn 2 rows + , insertPolicies = \policies -> ReaderT $ \conn -> + (throwIO . DatabaseException "insertPolicies") `handle` do + let + rows = flip Set.map policies $ \DB.Policy{..} -> + (outputReference, policyId) + insertRows @"policies" conn 2 rows - , insertCheckpoints = \cps -> ReaderT $ \conn -> do - let - rows = cps <&> \(DB.pointToRow -> DB.Checkpoint{..}) -> - (checkpointHeaderHash, ((fromIntegral :: Word64 -> Int64) checkpointSlotNo)) - insertRows @"checkpoints" conn 2 rows + , insertCheckpoints = \cps -> ReaderT $ \conn -> + (throwIO . DatabaseException "insertCheckpoints") `handle` do + let + rows = cps <&> \(DB.pointToRow -> DB.Checkpoint{..}) -> + (Binary checkpointHeaderHash, ((fromIntegral :: Word64 -> Int64) checkpointSlotNo)) + insertRows @"checkpoints" conn 2 rows , listCheckpointsDesc = ReaderT $ \conn -> do let @@ -332,34 +349,39 @@ mkDatabase tr longestRollback bracketConnection = Database where n = ceiling (log (fromIntegral @_ @Double k)) fmap (fmap DB.pointFromRow . nubOn DB.checkpointSlotNo . mconcat) $ forM points $ \pt -> - PG.fold conn listCheckpointsQry [pt :: Int64] [] $ - \xs (checkpointHeaderHash, (fromIntegral :: Int64 -> Word64) -> checkpointSlotNo) -> - pure (DB.Checkpoint{..} : xs) + handle (throwIO . UnexpectedRow "listCheckpointsDesc") $ + PG.fold conn listCheckpointsQry [pt :: Int64] [] $ + \xs (checkpointHeaderHash, (fromIntegral :: Int64 -> Word64) -> checkpointSlotNo) -> + pure (DB.Checkpoint{..} : xs) , listAncestorsDesc = \(SlotNo slotNo) n -> ReaderT $ \conn -> do - fmap reverse $ - PG.fold conn listAncestorQry ((fromIntegral :: Word64 -> Int32) slotNo, n) [] $ - \xs (checkpointHeaderHash, (fromIntegral :: Int32 -> Word64) -> checkpointSlotNo) -> - pure ((DB.pointFromRow DB.Checkpoint{..}) : xs) + handle (throwIO . DatabaseException "listAncestorsDesc") $ + fmap reverse $ + PG.fold conn listAncestorQry ((fromIntegral :: Word64 -> Int32) slotNo, n) [] $ + \xs (checkpointHeaderHash, (fromIntegral :: Int32 -> Word64) -> checkpointSlotNo) -> + pure ((DB.pointFromRow DB.Checkpoint{..}) : xs) - , insertPatterns = \patterns -> ReaderT $ \conn -> do - insertRows @"patterns" conn 1 $ Set.map (Only . patternToText) patterns + , insertPatterns = \patterns -> ReaderT $ \conn -> + handle (throwIO . DatabaseException "insertPatterns") $ + insertRows @"patterns" conn 1 $ Set.map (Only . patternToText) patterns , deletePattern = \pattern_-> ReaderT $ \conn -> do fromIntegral <$> execute conn "DELETE FROM patterns WHERE pattern = ?" (Only $ DB.patternToRow pattern_) - , listPatterns = ReaderT $ \conn -> do - fmap fromList - $ PG.fold_ conn "SELECT * FROM patterns" [] - $ \xs (Only x) -> pure (DB.patternFromRow x:xs) + , listPatterns = ReaderT $ \conn -> + handle (throwIO . DatabaseException "listPatterns") $ do + fmap fromList + $ PG.fold_ conn "SELECT * FROM patterns" [] + $ \xs (Only x) -> pure (DB.patternFromRow x:xs) - , insertBinaryData = \bin -> ReaderT $ \conn -> do - let - rows = bin <&> \DB.BinaryData{..} -> - (binaryDataHash, binaryData) - insertRows @"binary_data" conn 2 rows + , insertBinaryData = \bin -> ReaderT $ \conn -> + handle (throwIO . DatabaseException "insertBinaryData") $ do + let + rows = bin <&> \DB.BinaryData{..} -> + (binaryDataHash, binaryData) + insertRows @"binary_data" conn 2 rows , getBinaryData = \(DB.datumHashToRow -> binaryDataHash) -> ReaderT $ \conn -> do handle (throwIO . UnexpectedRow "getBinaryData") $ @@ -367,50 +389,55 @@ mkDatabase tr longestRollback bracketConnection = Database [Only binaryData] -> pure $ Just (DB.binaryDataFromRow DB.BinaryData{..}) [] -> - pure $ Nothing + pure Nothing (length -> n) -> throwIO $ ExpectedSingletonResult "getBinaryData" n - , pruneBinaryData = ReaderT $ \conn -> do - traceExecute_ tr conn pruneBinaryDataQry + , pruneBinaryData = ReaderT $ \conn -> + handle (throwIO . DatabaseException "pruneBinaryData") $ do + traceExecute_ tr conn pruneBinaryDataQry - , insertScripts = \scripts -> ReaderT $ \conn -> do - let - rows = scripts <&> \DB.ScriptReference{..} -> - (scriptHash, script) - insertRows @"scripts" conn 2 rows + , insertScripts = \scripts -> ReaderT $ \conn -> + handle (throwIO . DatabaseException "insertScripts") $ do + let + rows = scripts <&> \DB.ScriptReference{..} -> + (scriptHash, script) + insertRows @"scripts" conn 2 rows - , getScript = \(DB.scriptHashToRow -> scriptHash)-> ReaderT $ \conn -> do + , getScript = \(DB.scriptHashToRow -> scriptHash)-> ReaderT $ \conn -> handle (throwIO . UnexpectedRow "getScript") $ query conn getScriptQry (Only scriptHash) >>= \case [Only script] -> pure $ Just (DB.scriptFromRow DB.ScriptReference{..}) [] -> - pure $ Nothing + pure Nothing (length -> n) -> throwIO $ ExpectedSingletonResult "getScript" n , rollbackTo = \(fromIntegral . unSlotNo -> minSlotNo) -> ReaderT $ \conn -> do - query_ conn selectMaxCheckpointQry >>= \case - -- NOTE: Rolling back takes quite a bit of time and, when restarting - -- the application, we'll always be asked to rollback to the - -- _current tip_. In this case, there's nothing to delete or update, - -- so we can safely skip it. - [(currentSlotNo, _ :: ByteString)] | currentSlotNo == minSlotNo -> do - pure () - _otherwise -> do - withTemporaryIndex tr conn "inputsByCreatedAt" "inputs(created_at)" $ do - withTemporaryIndex tr conn "inputsBySpentAt" "inputs(spent_at)" $ void $ do - deleteInputsIncrementally tr conn minSlotNo - _ <- traceExecute tr conn rollbackQryUpdateInputs [ minSlotNo ] - traceExecute tr conn rollbackQryDeleteCheckpoints [ minSlotNo ] + handle (throwIO . DatabaseException "rollbackTo") $ + query_ conn selectMaxCheckpointQry >>= \case + -- NOTE: Rolling back takes quite a bit of time and, when restarting + -- the application, we'll always be asked to rollback to the + -- _current tip_. In this case, there's nothing to delete or update, + -- so we can safely skip it. + [(currentSlotNo, _ :: ByteString)] | currentSlotNo == minSlotNo -> do + pure () + _otherwise -> do + withTemporaryIndex tr conn "inputsByCreatedAt" "inputs" "created_at" $ do + withTemporaryIndex tr conn "inputsBySpentAt" "inputs" "spent_at" $ void $ do + deleteInputsIncrementally tr conn minSlotNo + _ <- traceExecute tr conn rollbackQryUpdateInputs [ minSlotNo ] + traceExecute tr conn rollbackQryDeleteCheckpoints [ minSlotNo ] handle (throwIO . UnexpectedRow (show selectMaxCheckpointQry)) $ query_ conn selectMaxCheckpointQry >>= \case - [(fmap (fromIntegral :: Int64 -> Word64) -> Just checkpointSlotNo, Just checkpointHeaderHash)] -> + [((fromIntegral :: Int64 -> Word64) -> checkpointSlotNo, checkpointHeaderHash)] -> return $ Just (DB.pointFromRow DB.Checkpoint{..}) - [(Nothing, Nothing)] -> + [] -> return Nothing res -> throwIO $ ExpectedSingletonResult (show selectMaxCheckpointQry) (length res) + -- ^ // TODO: In SQLite, the pattern matches check for null values. I've changed the query, and + -- I think it should work without checking for null values, but let's check this. , optimize = return () -- // TODO: Review if optimize needs to happen with Postgres. Also determine if this can be hidden within the `Database` implementation. @@ -432,20 +459,22 @@ mkDatabase tr longestRollback bracketConnection = Database deleteInputsQry :: Pattern -> Query deleteInputsQry pattern_ = "DELETE FROM inputs" - <> additionalJoin - <> "WHERE" - <> whereClause - where - (fromText -> whereClause, fromText . fromMaybe "" -> additionalJoin) = patternToSql pattern_ + <> additionalJoin + <> "WHERE" + <> whereClause + where + (fromText -> whereClause, fromText . fromMaybe "" -> additionalJoin) = + patternToSql mkByteaLiteral pattern_ markInputsQry :: Pattern -> Query markInputsQry pattern_ = - "UPDATE inputs SET spent_at = ?" - <> additionalJoin - <> "WHERE" - <> whereClause - where - (fromText -> whereClause, fromText . fromMaybe "" -> additionalJoin) = patternToSql pattern_ + "UPDATE inputs SET spent_at = ? " + <> additionalJoin + <> " WHERE " + <> whereClause + where + (fromText -> whereClause, fromText . fromMaybe "" -> additionalJoin) = + patternToSql mkByteaLiteral pattern_ -- NOTE: This query only prune down a certain number of inputs at a time to keep his time bounded. The -- query in itself is quite expensive, and on large indexes, may takes several minutes. @@ -500,7 +529,7 @@ foldInputsQry pattern_ slotRange statusFlag sortDirection = fromText $ \inputs.created_at " <> ordering (patternWhereClause, fromMaybe "" -> additionalJoin) = - patternToSql pattern_ + patternToSql mkByteaLiteral pattern_ ordering = case sortDirection of Asc -> "ASC" @@ -543,8 +572,8 @@ countInputsQry pattern_ = fromText $ <> " WHERE " <> patternWhereClause where - (patternWhereClause, fromMaybe "" -> additionalJoin) = - patternToSql pattern_ + (patternWhereClause, fromMaybe "" -> additionalJoin) = + patternToSql mkByteaLiteral pattern_ countPoliciesQry :: Pattern -> Query countPoliciesQry pattern_ = @@ -556,7 +585,7 @@ countPoliciesQry pattern_ = <> patternWhereClause where (fromText -> patternWhereClause, _) = - patternToSql pattern_ + patternToSql mkByteaLiteral pattern_ foldPoliciesQry :: Pattern -> Query foldPoliciesQry pattern_ = @@ -566,9 +595,9 @@ foldPoliciesQry pattern_ = \ON inputs.output_reference = policies.output_reference \ \WHERE " <> patternWhereClause - where - (fromText -> patternWhereClause, _) = - patternToSql pattern_ + where + (fromText -> patternWhereClause, _) = + patternToSql mkByteaLiteral pattern_ listCheckpointsQry :: Query listCheckpointsQry = @@ -617,7 +646,7 @@ getScriptQry = selectMaxCheckpointQry :: Query selectMaxCheckpointQry = - "SELECT MAX(slot_no),header_hash FROM checkpoints" + "SELECT slot_no,header_hash FROM checkpoints ORDER BY slot_no DESC LIMIT 1" deleteInputsIncrementally :: Tracer IO TraceConnection -> Connection -> Int64 -> IO () deleteInputsIncrementally tr conn minSlotNo = do @@ -626,8 +655,8 @@ deleteInputsIncrementally tr conn minSlotNo = do rollbackQryDeleteInputs :: Query rollbackQryDeleteInputs = - "DELETE FROM inputs WHERE rowid IN \ - \(SELECT rowid FROM inputs WHERE created_at > ? LIMIT " + "DELETE FROM inputs WHERE output_reference IN \ + \(SELECT output_reference FROM inputs WHERE created_at > ? LIMIT " <> show pruneInputsMaxIncrement <> ")" rollbackQryUpdateInputs :: Query @@ -695,11 +724,11 @@ installIndex tr conn name definition = do True -> traceWith tr (DatabaseIndexAlreadyExists name) -withTemporaryIndex :: Tracer IO TraceConnection -> Connection -> Text -> Text -> IO a -> IO a -withTemporaryIndex tr conn name definition action = do +withTemporaryIndex :: Tracer IO TraceConnection -> Connection -> Text -> Text -> Text -> IO a -> IO a +withTemporaryIndex tr conn name table column action = do exists <- indexDoesExist conn name unless exists $ traceWith tr (ConnectionCreateTemporaryIndex name) - _ <- execute conn "CREATE INDEX IF NOT EXISTS ? ON ?" (name, definition) + _ <- execute conn "CREATE INDEX IF NOT EXISTS ? ON ? ( ? )" (Identifier name, Identifier table, Identifier column) unless exists $ traceWith tr (ConnectionCreatedTemporaryIndex name) a <- action unless exists (dropIndexIfExists tr conn name True) @@ -711,18 +740,18 @@ withTemporaryIndex tr conn name definition action = do -- // TODO: Validate that this works indexDoesExist :: Connection -> Text -> IO Bool indexDoesExist conn name = do - query conn qry [name] <&> \case + query conn qry (Only name) <&> \case [Only n] | n > (0 :: Int64) -> True _doesNotExist -> False where - qry = "SELECT COUNT(*) FROM pg_indexes WHERE indexname = ?;" + qry = "SELECT COUNT(*) FROM pg_indexes WHERE indexname = ?" dropIndexIfExists :: Tracer IO TraceConnection -> Connection -> Text -> Bool -> IO () dropIndexIfExists tr conn indexName wasTemporary = do whenM (indexDoesExist conn indexName) $ traceWith tr $ if wasTemporary then ConnectionRemoveTemporaryIndex{indexName} else ConnectionRemoveIndex{indexName} - void $ execute conn "DROP INDEX IF EXISTS ?" [indexName] + void $ execute conn "DROP INDEX IF EXISTS ?" $ Only $ Identifier indexName -- -- Migrations @@ -774,26 +803,27 @@ runMigrations tr conn = do migrations :: [Migration] migrations = - map - (\(idx, (sql, fromString -> version)) -> mkMigration idx version $ decodeUtf8 sql) - $ zip [1..] files - where - mkMigration :: Int -> Query -> Text -> Migration - mkMigration idx version sql = - ("INSERT INTO migrations (id, version) VALUES (" <> show idx <> ",'" <> version <> "');") - : (map (fromString . T.unpack) . filter (not . T.null . T.strip) . T.splitOn ";") sql - - files = - [ ($(embedFile "db/postgres/v1.0.0-beta/001.sql"), "v1.0.0.-beta/001.sql") - , ($(embedFile "db/postgres/v1.0.0/001.sql"), "v1.0.0/001.sql") - , ($(embedFile "db/postgres/v1.0.0/002.sql"), "v1.0.0/002.sql") - -- // TODO: Confirm this is ok to delete!, ($(embedFile "db/postgres/v1.0.1/001.sql"), "v1.0.1/001.sql") - , ($(embedFile "db/postgres/v2.0.0-beta/001.sql"), "v2.0.0-beta/001.sql") - , ($(embedFile "db/postgres/v2.1.0/001.sql"), "v2.1.0/001.sql") - , ($(embedFile "db/postgres/v2.1.0/002.sql"), "v2.1.0/002.sql") - , ($(embedFile "db/postgres/v2.1.0/003.sql"), "v2.1.0/003.sql") - , ($(embedFile "db/postgres/v2.2.0/001.sql"), "v2.2.0/001.sql") - ] + zipWith + ((\idx (sql, version) -> mkMigration idx (fromString version) $ decodeUtf8 sql)) + [1..] + files + where + mkMigration :: Int -> Query -> Text -> Migration + mkMigration idx version sql = + ("INSERT INTO migrations (id, version) VALUES (" <> show idx <> ",'" <> version <> "');") + : (map (fromString . T.unpack) . filter (not . T.null . T.strip) . T.splitOn ";") sql + + files = + [ ($(embedFile "db/postgres/v1.0.0-beta/001.sql"), "v1.0.0.-beta/001.sql") + , ($(embedFile "db/postgres/v1.0.0/001.sql"), "v1.0.0/001.sql") + , ($(embedFile "db/postgres/v1.0.0/002.sql"), "v1.0.0/002.sql") + -- // TODO: Confirm this is ok to delete!, ($(embedFile "db/postgres/v1.0.1/001.sql"), "v1.0.1/001.sql") + , ($(embedFile "db/postgres/v2.0.0-beta/001.sql"), "v2.0.0-beta/001.sql") + , ($(embedFile "db/postgres/v2.1.0/001.sql"), "v2.1.0/001.sql") + , ($(embedFile "db/postgres/v2.1.0/002.sql"), "v2.1.0/002.sql") + , ($(embedFile "db/postgres/v2.1.0/003.sql"), "v2.1.0/003.sql") + , ($(embedFile "db/postgres/v2.2.0/001.sql"), "v2.2.0/001.sql") + ] -- -- Helpers @@ -823,7 +853,7 @@ insertRows conn len rows = <> symbolVal (Proxy @tableName) <> " VALUES " <> mkValuePlaceholders len - <> "ON CONFLICT DO NOTHING" + <> " ON CONFLICT DO NOTHING" mkValuePlaceholders :: Int -> String mkValuePlaceholders n = @@ -847,6 +877,9 @@ withTransaction conn action = fromText :: Text -> Query fromText = fromString . T.unpack +mkByteaLiteral :: ByteString -> Text +mkByteaLiteral bytes = "'\\x" <> encodeBase16 bytes <> "'" + -- -- Exceptions & Tracing -- @@ -908,3 +941,10 @@ data ExpectedSingletonResultException , received :: !Int } deriving Show instance Exception ExpectedSingletonResultException + +data DatabaseException + = DatabaseException + { context :: !Text + , causedBy :: !SqlError + } deriving Show +instance Exception DatabaseException diff --git a/src/Kupo/App/Database/SQLite.hs b/src/Kupo/App/Database/SQLite.hs index eb630ee..1ea32e0 100644 --- a/src/Kupo/App/Database/SQLite.hs +++ b/src/Kupo/App/Database/SQLite.hs @@ -47,6 +47,9 @@ module Kupo.App.Database.SQLite import Kupo.Prelude +import Control.Concurrent + ( getNumCapabilities + ) import Control.Exception ( IOException , handle @@ -58,9 +61,18 @@ import Control.Tracer ( Tracer , traceWith ) +import qualified Data.Char as Char import Data.FileEmbed ( embedFile ) +import Data.Pool + ( Pool + , defaultPoolConfig + , destroyAllResources + , newPool + , tryWithResource + , withResource + ) import Data.Scientific ( scientific ) @@ -87,10 +99,18 @@ import Database.SQLite.Simple , withConnection' , withStatement ) +import qualified Database.SQLite.Simple as Sqlite import GHC.TypeLits ( KnownSymbol , symbolVal ) +import Kupo.App.Database.Types + ( ConnectionType (..) + , DBPool (..) + , Database (..) + , TraceConnection (..) + , TraceDatabase (..) + ) import Kupo.Control.MonadAsync ( concurrently_ ) @@ -100,6 +120,10 @@ import Kupo.Control.MonadCatch import Kupo.Control.MonadDelay ( threadDelay ) +import Kupo.Control.MonadLog + ( TraceProgress (..) + , nullTracer + ) import Kupo.Control.MonadSTM ( MonadSTM (..) ) @@ -119,10 +143,12 @@ import Kupo.Data.Configuration , LongestRollback (..) , pruneInputsMaxIncrement ) +import qualified Kupo.Data.Configuration as Configuration import Kupo.Data.Database ( SortDirection (..) , patternToSql ) +import qualified Kupo.Data.Database as DB import Kupo.Data.Http.SlotRange ( Range (..) , RangeField (..) @@ -833,7 +859,8 @@ deleteInputsQry pattern_ = , whereClause ] where - (whereClause, fromMaybe "" -> additionalJoin) = patternToSql pattern_ + (whereClause, fromMaybe "" -> additionalJoin) = + patternToSql mkBlobLiteral pattern_ markInputsQry :: Pattern -> Query markInputsQry pattern_ = @@ -844,7 +871,8 @@ markInputsQry pattern_ = , whereClause ] where - (whereClause, fromMaybe "" -> additionalJoin) = patternToSql pattern_ + (whereClause, fromMaybe "" -> additionalJoin) = + patternToSql mkBlobLiteral pattern_ -- NOTE: This query only prune down a certain number of inputs at a time to keep his time bounded. The -- query in itself is quite expensive, and on large indexes, may takes several minutes. @@ -873,7 +901,7 @@ countPoliciesQry pattern_ = Query $ <> patternWhereClause where (patternWhereClause, _) = - patternToSql pattern_ + patternToSql mkBlobLiteral pattern_ foldPoliciesQry :: Pattern -> Query foldPoliciesQry pattern_ = Query $ @@ -885,7 +913,7 @@ foldPoliciesQry pattern_ = Query $ <> patternWhereClause where (patternWhereClause, _) = - patternToSql pattern_ + patternToSql mkBlobLiteral pattern_ countInputsQry :: Pattern -> Query countInputsQry pattern_ = Query $ @@ -895,7 +923,7 @@ countInputsQry pattern_ = Query $ <> patternWhereClause where (patternWhereClause, fromMaybe "" -> additionalJoin) = - patternToSql pattern_ + patternToSql mkBlobLiteral pattern_ foldInputsQry :: Pattern @@ -933,7 +961,7 @@ foldInputsQry pattern_ slotRange statusFlag sortDirection = \inputs.created_at " <> ordering (patternWhereClause, fromMaybe "" -> additionalJoin) = - patternToSql pattern_ + patternToSql mkBlobLiteral pattern_ ordering = case sortDirection of Asc -> "ASC" @@ -1115,6 +1143,9 @@ matchMaybeWord64 = \case _notSQLInteger -> Nothing {-# INLINABLE matchMaybeWord64 #-} +mkBlobLiteral :: ByteString -> Text +mkBlobLiteral bytes = "x'" <> encodeBase16 bytes <> "'" + -- -- Indexes -- diff --git a/src/Kupo/Data/Database.hs b/src/Kupo/Data/Database.hs index 77bdee4..3d1d2a8 100644 --- a/src/Kupo/Data/Database.hs +++ b/src/Kupo/Data/Database.hs @@ -668,9 +668,10 @@ getWord7s = do -- Invariant: cannot be called with 'MatchMetadataTag'; this pattern is only for indexing. patternToSql :: HasCallStack - => App.Pattern + => (ByteString -> Text) + -> App.Pattern -> (Text, Maybe Text) -patternToSql = \case +patternToSql fromBinary = \case App.MatchAny App.IncludingBootstrap -> ( "address LIKE '%'" , Nothing @@ -696,7 +697,7 @@ patternToSql = \case , Nothing ) App.MatchOutputReference ref -> - ( "inputs.output_reference = x'" <> x (outputReferenceToRow ref) <> "'" + ( "inputs.output_reference = " <> fromBinary (outputReferenceToRow ref) , Nothing ) App.MatchTransactionId txId -> @@ -705,17 +706,17 @@ patternToSql = \case upperBound = outputReferenceToRow (mkOutputReference txId maxBound) in ( "inputs.output_reference BETWEEN " - <> "x'" <> x lowerBound <> "'" - <> " AND " - <> "x'" <> x upperBound <> "'" + <> fromBinary lowerBound + <> " AND " + <> fromBinary upperBound , Nothing ) App.MatchPolicyId pid -> - ( "policies.output_reference >= 0 AND policy_id = x'" <> x (App.policyIdToBytes pid) <> "'" + ( "policies.output_reference >= 0 AND policy_id = " <> fromBinary (App.policyIdToBytes pid) , Just "JOIN policies ON inputs.output_reference = policies.output_reference" ) App.MatchAssetId (pid, _) -> - patternToSql (App.MatchPolicyId pid) + patternToSql fromBinary (App.MatchPolicyId pid) App.MatchMetadataTag{} -> error "patternToSql: called for 'MatchMetadataTag'" where diff --git a/test/Test/Kupo/Data/DatabaseSpec.hs b/test/Test/Kupo/Data/DatabaseSpec.hs index 5c1bc62..7e03054 100644 --- a/test/Test/Kupo/Data/DatabaseSpec.hs +++ b/test/Test/Kupo/Data/DatabaseSpec.hs @@ -224,7 +224,8 @@ spec = parallel $ do context "patternToSql" $ around withFixtureDatabase $ do forM_ patterns $ \(_, p, ms) -> do - let (whereClause, fromMaybe "" -> additionalJoin) = patternToSql p + let blobLiteral bytes = "x'" <> encodeBase16 bytes <> "'" + let (whereClause, fromMaybe "" -> additionalJoin) = patternToSql blobLiteral p let results = sort $ (\(_, out) -> getAddress out) <$> ms specify (toString whereClause) $ \conn -> do rows <- query_ conn $ Query $ unwords From c5e0d108b0d834caca42381c04c39748b518c3ed Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 18 Jun 2024 15:24:20 -0700 Subject: [PATCH 4/9] Switch to `withDBPool` and fix data types for escaping in PG module - Switch to `withDBPool` to ensure proper cleanup of resoureces. - Use `Identifier` wrapper for table/column/index names to cause them to be wrapped in double quotes, but use `String`/`Text` for querying index names because the identifier is retrieved as a string (and therefore needs to be compared using single quotes). --- kupo.cabal | 9 +- src/Kupo.hs | 165 +++++++++-------- src/Kupo/App.hs | 2 +- src/Kupo/App/Configuration.hs | 2 +- src/Kupo/App/Database.hs | 21 ++- src/Kupo/App/Database/Postgres.hs | 265 +++++++++++++++++----------- src/Kupo/App/Database/SQLite.hs | 97 +++++----- src/Kupo/App/Database/Types.hs | 4 +- src/Kupo/App/Http.hs | 3 +- test/Test/Kupo/App/Http/Client.hs | 2 +- test/Test/Kupo/App/HttpSpec.hs | 2 +- test/Test/Kupo/AppSpec.hs | 137 +++++++------- test/Test/Kupo/Data/DatabaseSpec.hs | 59 ++++--- 13 files changed, 439 insertions(+), 329 deletions(-) diff --git a/kupo.cabal b/kupo.cabal index 5562cc6..b5a9970 100644 --- a/kupo.cabal +++ b/kupo.cabal @@ -57,7 +57,6 @@ library Kupo.App.ChainSync.Ogmios Kupo.App.Configuration Kupo.App.Database - Kupo.App.Database.Types Kupo.App.FetchBlock.Node Kupo.App.FetchBlock.Ogmios Kupo.App.Health @@ -124,6 +123,14 @@ library Kupo.Version.TH other-modules: Paths_kupo + Kupo.App.Database.Postgres + Kupo.App.Database.Types + else + other-modules: + Paths_kupo + Kupo.App.Database.SQLite + Kupo.App.Database.Types + hs-source-dirs: src default-extensions: diff --git a/src/Kupo.hs b/src/Kupo.hs index 78def2c..b759ece 100644 --- a/src/Kupo.hs +++ b/src/Kupo.hs @@ -62,12 +62,10 @@ import Kupo.App.Configuration , startOrResume ) import Kupo.App.Database - ( copyDatabase - , newDBPool - ) -import Kupo.App.Database.Types ( ConnectionType (..) , DBPool (..) + , copyDatabase + , withDBPool ) import Kupo.App.Health ( connectionStatusToggle @@ -99,8 +97,7 @@ import Kupo.Control.MonadSTM ( MonadSTM (..) ) import Kupo.Control.MonadThrow - ( finally - , throwIO + ( throwIO ) import Kupo.Data.Cardano ( IsBlock @@ -198,95 +195,91 @@ kupoWith tr withProducer withFetchBlock = } } <- ask - dbPool@DBPool { maxConcurrentReaders, maxConcurrentWriters } <- liftIO $ newDBPool + liftIO $ withDBPool (tracerDatabase tr) (isReadOnlyReplica config) databaseLocation - longestRollback - - liftIO $ logWith (tracerConfiguration tr) $ - ConfigurationMaxConcurrency - { maxConcurrentReaders - , maxConcurrentWriters - } + longestRollback $ \dbPool@DBPool { maxConcurrentReaders, maxConcurrentWriters } -> do - let run action - | isReadOnlyReplica config = - -- NOTE: 'ShortLived' is a bad name here. What it really means is 'occasional - -- writers but mostly readers'. However, in the 'ReadOnlyReplica' mode we only - -- ever allow read-only connections and never perform a single write. - (withDatabaseBlocking dbPool) ReadOnly (action InstallIndexesIfNotExist) - | otherwise = - handle - (\NodeTipHasBeenReached{distance} -> do - traceWith (tracerKupo tr) KupoRestartingWithIndexes { distance } - io InstallIndexesIfNotExist - ) - (io deferIndexes) - where - io indexMode = - (withDatabaseExclusiveWriter dbPool) indexMode (action indexMode) - `finally` destroyResources dbPool + liftIO $ logWith (tracerConfiguration tr) $ + ConfigurationMaxConcurrency + { maxConcurrentReaders + , maxConcurrentWriters + } - liftIO $ handle (onUnknownException crashWith) $ run $ \indexMode db -> do - patterns <- newPatternsCache (tracerConfiguration tr) config db - let statusToggle = connectionStatusToggle health - let tracerChainSync = contramap ConsumerChainSync . tracerConsumer - unless (isReadOnlyReplica config) $ do - atomically $ modifyTVar' health $ \h -> h { Health.configuration = Just indexMode } - withProducer $ \forceRollback mailbox producer -> do - withFetchBlock $ \fetchBlock -> do - concurrently4 - -- HTTP Server - ( httpServer - (tracerHttp tr) - (tryWithDatabase dbPool) - forceRollback - fetchBlock - patterns - (readHealth health) - serverHost - serverPort - ) + let run action + | isReadOnlyReplica config = + (withDatabaseBlocking dbPool) ReadOnly (action InstallIndexesIfNotExist) + | otherwise = + handle + (\NodeTipHasBeenReached{distance} -> do + traceWith (tracerKupo tr) KupoRestartingWithIndexes { distance } + io InstallIndexesIfNotExist + ) + (io deferIndexes) + where + io indexMode = + (withDatabaseExclusiveWriter dbPool) indexMode (action indexMode) - -- Block consumer fueling the database - ( if isReadOnlyReplica config then - readOnlyConsumer - health - db - else - consumer - (tracerConsumer tr) - inputManagement - (mkNotifyTip indexMode health) - mailbox + liftIO $ handle (onUnknownException crashWith) $ run $ \indexMode db -> do + patterns <- newPatternsCache (tracerConfiguration tr) config db + let statusToggle = connectionStatusToggle health + let tracerChainSync = contramap ConsumerChainSync . tracerConsumer + unless (isReadOnlyReplica config) $ do + atomically $ modifyTVar' health $ \h -> h { Health.configuration = Just indexMode } + withProducer $ \forceRollback mailbox producer -> do + withFetchBlock $ \fetchBlock -> do + concurrently4 + -- HTTP Server + ( httpServer + (tracerHttp tr) + (tryWithDatabase dbPool) + forceRollback + fetchBlock patterns - db - ) + (readHealth health) + serverHost + serverPort + ) - -- Database garbage-collector - ( if isReadOnlyReplica config then - idle - else - gardener - (tracerGardener tr) - config - patterns - (withDatabaseBlocking dbPool ReadWrite) - ) + -- Block consumer fueling the database + ( if isReadOnlyReplica config then + readOnlyConsumer + health + db + else + consumer + (tracerConsumer tr) + inputManagement + (mkNotifyTip indexMode health) + mailbox + patterns + db + ) + + -- Database garbage-collector + ( if isReadOnlyReplica config then + idle + else + gardener + (tracerGardener tr) + config + patterns + (withDatabaseBlocking dbPool ReadWrite) + ) - -- Block producer, fetching blocks from the network - ( if isReadOnlyReplica config then - toggleConnected statusToggle *> idle - else - withChainSyncExceptionHandler (tracerChainSync tr) statusToggle $ do - (mostRecentCheckpoint, checkpoints) <- startOrResume (tracerConfiguration tr) config db - initializeHealth health mostRecentCheckpoint - producer - (tracerChainSync tr) - checkpoints - statusToggle - ) + -- Block producer, fetching blocks from the network + ( if isReadOnlyReplica config then + toggleConnected statusToggle *> idle + else + withChainSyncExceptionHandler (tracerChainSync tr) statusToggle $ do + (mostRecentCheckpoint, checkpoints) <- startOrResume (tracerConfiguration tr) config db + initializeHealth health mostRecentCheckpoint + producer + (tracerChainSync tr) + checkpoints + statusToggle + ) where onUnknownException :: (SomeException -> IO ()) -> SomeException -> IO () diff --git a/src/Kupo/App.hs b/src/Kupo/App.hs index 7075cdc..e7e319b 100644 --- a/src/Kupo/App.hs +++ b/src/Kupo/App.hs @@ -38,7 +38,7 @@ import Kupo.App.Configuration ( TraceConfiguration (..) , parseNetworkParameters ) -import Kupo.App.Database.Types +import Kupo.App.Database ( DBTransaction , Database (..) ) diff --git a/src/Kupo/App/Configuration.hs b/src/Kupo/App/Configuration.hs index e8d1074..1609bd7 100644 --- a/src/Kupo/App/Configuration.hs +++ b/src/Kupo/App/Configuration.hs @@ -28,7 +28,7 @@ import Data.Aeson.Lens ( _String , key ) -import Kupo.App.Database.Types +import Kupo.App.Database ( Database (..) ) import Kupo.Control.MonadCatch diff --git a/src/Kupo/App/Database.hs b/src/Kupo/App/Database.hs index fbcb627..50789b3 100644 --- a/src/Kupo/App/Database.hs +++ b/src/Kupo/App/Database.hs @@ -8,9 +8,22 @@ module Kupo.App.Database ( -- // TODO: Fix documentation headers + Database (..) + , DBPool + ( DBPool + , tryWithDatabase + , withDatabaseBlocking + , withDatabaseExclusiveWriter + , maxConcurrentReaders + , maxConcurrentWriters + ) + , withDBPool + , ConnectionType (..) + , DBTransaction + -- ** Queries -- *** Inputs - deleteInputsQry + , deleteInputsQry , markInputsQry , pruneInputsQry , foldInputsQry @@ -32,7 +45,6 @@ module Kupo.App.Database -- * Setup , copyDatabase - , newDBPool -- * Internal , installIndexes @@ -40,8 +52,13 @@ module Kupo.App.Database -- * Tracer , TraceDatabase (..) + + -- * Test helpers + , withTestDatabase ) where +import Kupo.App.Database.Types + #if postgres import Kupo.App.Database.Postgres #else diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index fb433dc..3a51f1a 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -35,7 +35,7 @@ module Kupo.App.Database.Postgres , rollbackQryDeleteCheckpoints -- * Setup - , newDBPool + , withDBPool , copyDatabase -- * Internal @@ -44,6 +44,9 @@ module Kupo.App.Database.Postgres -- * Tracer , TraceDatabase (..) + + -- * Test Helpers + , withTestDatabase ) where import Kupo.Prelude @@ -69,6 +72,9 @@ import qualified Data.Char as Char import Data.FileEmbed ( embedFile ) +import Data.Maybe + ( fromJust + ) import Data.Pool ( Pool , defaultPoolConfig @@ -105,7 +111,6 @@ import GHC.TypeLits import Kupo.App.Database.Types ( ConnectionType (..) , DBPool (..) - , DBTransaction , Database (..) , TraceConnection (..) , TraceDatabase (..) @@ -140,7 +145,6 @@ import Kupo.Data.Http.StatusFlag ) import Kupo.Data.Pattern ( Pattern (..) - , patternToText ) import Numeric ( Floating (..) @@ -160,37 +164,57 @@ data FailedToCreateConnectionReason -- | Create a Database pool that uses separate pools for `ReadOnly` and `ReadWrite` connections. -- This function creates a database file if it does not already exist. -newDBPool :: (Tracer IO TraceDatabase) -> Bool -> Configuration.DatabaseLocation -> LongestRollback -> IO (DBPool IO) -newDBPool tr isReadOnly dbLocation longestRollback = do - maxConnections <- - (5*) <$> liftIO getNumCapabilities - - connectionPool <- liftIO $ newPool $ defaultPoolConfig - connectDb - (\Database{close} -> close) - 600 -- // TODO: Review concurrency requirements - maxConnections - - -- // TODO: Will running migrations here break things in a multi-client scenario? - bracket mkConnection PG.close $ runMigrations tr - - let - withDB :: forall a b. (Pool (Database IO) -> (Database IO -> IO a) -> IO b) -> ConnectionType -> (Database IO -> IO a) -> IO b - withDB withRes connType dbAction = +withDBPool + :: (Tracer IO TraceDatabase) + -> Bool + -> Configuration.DatabaseLocation + -> LongestRollback + -> (DBPool IO -> IO a) + -> IO a +withDBPool tr isReadOnly dbLocation longestRollback action = do + bracket mkPool destroy action + where + mkPool = do + maxConnections <- + (5*) <$> liftIO getNumCapabilities + + -- TODO: Will running migrations here break things in a multi-client scenario? + bracket mkConnection PG.close $ runMigrations tr + pool <- liftIO . newPool $ defaultPoolConfig + connectDb + (\Database{close} -> close) + 600 -- // TODO: Review concurrency requirements + maxConnections + + return DBPool + { tryWithDatabase = withDB pool tryWithResource + , withDatabaseBlocking = withDB pool withResource + , withDatabaseExclusiveWriter = withDatabaseExclusiveWriter pool + , maxConcurrentReaders = 0 + , maxConcurrentWriters = maxConnections + , destroy = destroyAllResources pool + } + + withDB + :: Pool (Database IO) + -> (Pool (Database IO) -> (Database IO -> IO a) -> IO b) + -> ConnectionType + -> (Database IO -> IO a) + -> IO b + withDB pool withRes connType dbAction = case connType of ReadWrite | isReadOnly -> fail "Cannot acquire a read/write connection on read-only replica" WriteOnly -> fail "Impossible: tried to acquire a WriteOnly database?" - _ -> withRes connectionPool dbAction + _ -> withRes pool dbAction - -- // TODO: Acutally do something with defer indexes! And possibly actually provide a preferred connection? - withDatabaseExclusiveWriter :: DeferIndexesInstallation -> (Database IO -> IO a) -> IO a + -- // TODO: Acutally do something with defer indexes! And possibly actually provide a preferred connection? + withDatabaseExclusiveWriter + :: Pool (Database IO) + -> DeferIndexesInstallation + -> (Database IO -> IO a) + -> IO a + withDatabaseExclusiveWriter pool _deferIndexes = withResource pool - destroyResources = destroyAllResources connectionPool - withDatabaseExclusiveWriter _deferIndexes = withResource connectionPool - - return DBPool { tryWithDatabase = withDB tryWithResource, withDatabaseBlocking = withDB withResource, withDatabaseExclusiveWriter, maxConcurrentReaders = 0, maxConcurrentWriters = maxConnections, destroyResources } - - where connectDb = mkConnection <&> \conn -> mkDatabase trConn longestRollback (\dbAction -> dbAction conn) mkConnection = case dbLocation of @@ -241,11 +265,11 @@ mkDatabase tr longestRollback bracketConnection = Database handle (throwIO . DatabaseException "insertInptus" ) $ do mapM_ (\DB.Input{..} -> do insertRow @"inputs" conn 7 - ( extendedOutputReference + ( Binary extendedOutputReference , address - , value - , datumInfo - , refScriptHash + , Binary value + , Binary <$> datumInfo + , Binary <$> refScriptHash , (fromIntegral createdAtSlotNo :: Int64) , spentAtSlotNo ) @@ -254,45 +278,48 @@ mkDatabase tr longestRollback bracketConnection = Database pure () Just DB.BinaryData{..} -> insertRow @"binary_data" conn 2 - ( binaryDataHash - , binaryData + ( Binary binaryDataHash + , Binary binaryData ) case refScript of Nothing -> pure () Just DB.ScriptReference{..} -> insertRow @"scripts" conn 2 - ( scriptHash - , script + ( Binary scriptHash + , Binary script ) ) inputs , deleteInputs = \refs -> ReaderT $ \conn -> - (throwIO . DatabaseException "deleteInputs") `handle` do + handle (throwIO . DatabaseException "deleteInputs") $ do withTotalChanges (\pattern -> do execute_ conn (markInputsQry pattern)) refs - -- ^// TODO: Try to convert this to an `executeMany` call - - , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> - (throwIO . DatabaseException "markInputs") `handle` do + -- ^ TODO: Try to convert this to an `executeMany` call + + , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> + handle (throwIO . DatabaseException "markInputs") $ do withTotalChanges (\pattern -> do execute conn (markInputsQry pattern) $ Only (slotNo :: Int64)) refs + -- ^ TODO: Try to convert this to an `executeMany` call - , pruneInputs = ReaderT $ \conn -> - (throwIO . DatabaseException "pruneInputs") `handle` do + + , pruneInputs = ReaderT $ \conn -> + handle (throwIO . DatabaseException "pruneInputs") $ do withTemporaryIndex tr conn "inputsBySpentAt" "inputs" "spent_at" $ do traceExecute tr conn pruneInputsQry [ fromIntegral longestRollback :: Int64 ] - , foldInputs = \pattern_ slotRange statusFlag sortDirection yield -> ReaderT $ \conn -> do + , foldInputs = \pattern_ slotRange statusFlag sortDirection yield -> + ReaderT $ \conn -> do -- TODO: Allow resolving datums / scripts on demand through LEFT JOIN -- -- See [#21](https://github.com/CardanoSolutions/kupo/issues/21) let (datum, refScript) = (Nothing, Nothing) - (throwIO . UnexpectedRow "foldInputs") `handle` + handle (throwIO . DatabaseException "foldInputs") $ PG.forEach_ conn (foldInputsQry pattern_ slotRange statusFlag sortDirection) (\(Binary extendedOutputReference , address @@ -308,32 +335,32 @@ mkDatabase tr longestRollback bracketConnection = Database , countInputs = \pattern_ -> ReaderT $ \conn -> do - (throwIO . UnexpectedRow "countInputs") `handle` - query_ conn (countInputsQry pattern_) >>= \case - [Only n] -> pure n - (length -> n) -> throwIO $ ExpectedSingletonResult "countInputs" n + handle (throwIO . DatabaseException "countInputs") $ do + query_ conn (countInputsQry pattern_) >>= \case + [Only n] -> pure n + (length -> n) -> throwIO $ ExpectedSingletonResult "countInputs" n , countPolicies = \pattern_ -> ReaderT $ \conn -> do - (throwIO . UnexpectedRow "countPolicies") `handle` + handle (throwIO . DatabaseException "countPolicies") $ query_ conn (countPoliciesQry pattern_) >>= \case [Only n] -> pure n (length -> n) -> throwIO $ ExpectedSingletonResult "countPolicies" n , foldPolicies = \pattern_ yield -> ReaderT $ \conn -> do - handle (throwIO . UnexpectedRow "foldPolicies") $ + handle (throwIO . DatabaseException "foldPolicies") $ PG.forEach_ conn (foldPoliciesQry pattern_) $ - \(outputReference, policyId) -> yield DB.Policy{..} + \(Binary outputReference, Binary policyId) -> yield DB.Policy{..} , insertPolicies = \policies -> ReaderT $ \conn -> - (throwIO . DatabaseException "insertPolicies") `handle` do + handle (throwIO . DatabaseException "insertPolicies") $ do let rows = flip Set.map policies $ \DB.Policy{..} -> - (outputReference, policyId) + (Binary outputReference, Binary policyId) insertRows @"policies" conn 2 rows , insertCheckpoints = \cps -> ReaderT $ \conn -> - (throwIO . DatabaseException "insertCheckpoints") `handle` do + handle (throwIO . DatabaseException "insertCheckpoints") $ do let rows = cps <&> \(DB.pointToRow -> DB.Checkpoint{..}) -> (Binary checkpointHeaderHash, ((fromIntegral :: Word64 -> Int64) checkpointSlotNo)) @@ -348,27 +375,28 @@ mkDatabase tr longestRollback bracketConnection = Database [ k `div` (2 ^ e) | (e :: Integer) <- [ n-1, n-2 .. 0 ] ] where n = ceiling (log (fromIntegral @_ @Double k)) - fmap (fmap DB.pointFromRow . nubOn DB.checkpointSlotNo . mconcat) $ forM points $ \pt -> - handle (throwIO . UnexpectedRow "listCheckpointsDesc") $ + handle (throwIO . DatabaseException "listCheckpointsDesc") $ + fmap (fmap DB.pointFromRow . nubOn DB.checkpointSlotNo . mconcat) $ forM points $ \pt -> PG.fold conn listCheckpointsQry [pt :: Int64] [] $ - \xs (checkpointHeaderHash, (fromIntegral :: Int64 -> Word64) -> checkpointSlotNo) -> + \xs (Binary checkpointHeaderHash, (fromIntegral :: Int64 -> Word64) -> checkpointSlotNo) -> pure (DB.Checkpoint{..} : xs) , listAncestorsDesc = \(SlotNo slotNo) n -> ReaderT $ \conn -> do handle (throwIO . DatabaseException "listAncestorsDesc") $ fmap reverse $ - PG.fold conn listAncestorQry ((fromIntegral :: Word64 -> Int32) slotNo, n) [] $ - \xs (checkpointHeaderHash, (fromIntegral :: Int32 -> Word64) -> checkpointSlotNo) -> + PG.fold conn listAncestorQry (fromIntegral slotNo :: Int64, n) [] $ + \xs (Binary checkpointHeaderHash, (fromIntegral :: Int64 -> Word64) -> checkpointSlotNo) -> pure ((DB.pointFromRow DB.Checkpoint{..}) : xs) , insertPatterns = \patterns -> ReaderT $ \conn -> handle (throwIO . DatabaseException "insertPatterns") $ - insertRows @"patterns" conn 1 $ Set.map (Only . patternToText) patterns + insertRows @"patterns" conn 1 $ Set.map (Only . DB.patternToRow) patterns , deletePattern = \pattern_-> ReaderT $ \conn -> do - fromIntegral <$> - execute conn "DELETE FROM patterns WHERE pattern = ?" - (Only $ DB.patternToRow pattern_) + handle (throwIO . DatabaseException "deletePattern") $ do + fromIntegral <$> + execute conn "DELETE FROM patterns WHERE pattern = ?" + (Only $ DB.patternToRow pattern_) , listPatterns = ReaderT $ \conn -> handle (throwIO . DatabaseException "listPatterns") $ do @@ -380,13 +408,13 @@ mkDatabase tr longestRollback bracketConnection = Database handle (throwIO . DatabaseException "insertBinaryData") $ do let rows = bin <&> \DB.BinaryData{..} -> - (binaryDataHash, binaryData) + (Binary binaryDataHash, Binary binaryData) insertRows @"binary_data" conn 2 rows , getBinaryData = \(DB.datumHashToRow -> binaryDataHash) -> ReaderT $ \conn -> do - handle (throwIO . UnexpectedRow "getBinaryData") $ - query conn getBinaryDataQry (Only binaryDataHash) >>= \case - [Only binaryData] -> + handle (throwIO . DatabaseException "getBinaryData") $ + query conn getBinaryDataQry (Only $ Binary binaryDataHash) >>= \case + [Only (Binary binaryData)] -> pure $ Just (DB.binaryDataFromRow DB.BinaryData{..}) [] -> pure Nothing @@ -401,13 +429,13 @@ mkDatabase tr longestRollback bracketConnection = Database handle (throwIO . DatabaseException "insertScripts") $ do let rows = scripts <&> \DB.ScriptReference{..} -> - (scriptHash, script) + (Binary scriptHash, Binary script) insertRows @"scripts" conn 2 rows , getScript = \(DB.scriptHashToRow -> scriptHash)-> ReaderT $ \conn -> - handle (throwIO . UnexpectedRow "getScript") $ - query conn getScriptQry (Only scriptHash) >>= \case - [Only script] -> + handle (throwIO . DatabaseException "getScript") $ + query conn getScriptQry (Only $ Binary scriptHash) >>= \case + [Only (Binary script)] -> pure $ Just (DB.scriptFromRow DB.ScriptReference{..}) [] -> pure Nothing @@ -421,7 +449,7 @@ mkDatabase tr longestRollback bracketConnection = Database -- the application, we'll always be asked to rollback to the -- _current tip_. In this case, there's nothing to delete or update, -- so we can safely skip it. - [(currentSlotNo, _ :: ByteString)] | currentSlotNo == minSlotNo -> do + [(currentSlotNo, _ :: Binary ByteString)] | currentSlotNo == minSlotNo -> do pure () _otherwise -> do withTemporaryIndex tr conn "inputsByCreatedAt" "inputs" "created_at" $ do @@ -429,21 +457,23 @@ mkDatabase tr longestRollback bracketConnection = Database deleteInputsIncrementally tr conn minSlotNo _ <- traceExecute tr conn rollbackQryUpdateInputs [ minSlotNo ] traceExecute tr conn rollbackQryDeleteCheckpoints [ minSlotNo ] - handle (throwIO . UnexpectedRow (show selectMaxCheckpointQry)) $ + handle (throwIO . DatabaseException (show selectMaxCheckpointQry)) $ query_ conn selectMaxCheckpointQry >>= \case - [((fromIntegral :: Int64 -> Word64) -> checkpointSlotNo, checkpointHeaderHash)] -> + [((fromIntegral :: Int64 -> Word64) -> checkpointSlotNo, Binary checkpointHeaderHash)] -> return $ Just (DB.pointFromRow DB.Checkpoint{..}) [] -> return Nothing res -> throwIO $ ExpectedSingletonResult (show selectMaxCheckpointQry) (length res) - -- ^ // TODO: In SQLite, the pattern matches check for null values. I've changed the query, and + -- ^ TODO: In SQLite, the pattern matches check for null values. I've changed the query, and -- I think it should work without checking for null values, but let's check this. - , optimize = return () -- // TODO: Review if optimize needs to happen with Postgres. Also determine if this can be hidden within the `Database` implementation. + , optimize = return () + -- ^ TODO: Review if optimize needs to happen with Postgres. + -- ^ Also determine if this can be hidden within the `Database` implementation. , runTransaction = \r -> bracketConnection $ \conn -> withTransaction conn (runReaderT r conn) - -- ^ // TODO: Check this. Do we need a retry in PG? + -- ^ TODO: Consider implementing a retry behavior , longestRollback @@ -667,12 +697,6 @@ rollbackQryDeleteCheckpoints :: Query rollbackQryDeleteCheckpoints = "DELETE FROM checkpoints WHERE slot_no > ?" --- // TODO: Header comment for this section -withTotalChanges :: forall t a. (Foldable t) => (a -> IO Int64) -> t a -> IO Int -withTotalChanges io t = - fromIntegral <$> - foldM (\accum a -> (accum +) <$> io a) 0 t - -- -- Indexes -- @@ -697,13 +721,13 @@ installIndexes tr conn = \case InstallIndexesIfNotExist -> do installIndex tr conn "inputsByAddress" - "inputs(address)" -- // TODO: I deleted the collate nocase clause + "inputs(address)" -- TODO: Find a substitute for SQLite's nocase clause. installIndex tr conn "inputsByDatumHash" "inputs(datum_hash)" installIndex tr conn "inputsByPaymentCredential" - "inputs(payment_credential)" -- // TODO: I deleted the nocase clause + "inputs(payment_credential)" -- TODO: Find a substitute for SQLite's nocase clause. installIndex tr conn "inputsByCreatedAt" "inputs(created_at)" @@ -720,27 +744,31 @@ installIndex tr conn name definition = do indexDoesExist conn name >>= \case False -> do traceWith tr (DatabaseCreateIndex name) - void $ execute conn "CREATE INDEX IF NOT EXISTS ? ON ?" (T.unpack name, T.unpack definition) + void $ execute conn "CREATE INDEX IF NOT EXISTS ? ON ?" (Identifier name, Identifier definition) True -> traceWith tr (DatabaseIndexAlreadyExists name) withTemporaryIndex :: Tracer IO TraceConnection -> Connection -> Text -> Text -> Text -> IO a -> IO a withTemporaryIndex tr conn name table column action = do exists <- indexDoesExist conn name - unless exists $ traceWith tr (ConnectionCreateTemporaryIndex name) - _ <- execute conn "CREATE INDEX IF NOT EXISTS ? ON ? ( ? )" (Identifier name, Identifier table, Identifier column) - unless exists $ traceWith tr (ConnectionCreatedTemporaryIndex name) + unless exists $ do + traceWith tr $ ConnectionCreateTemporaryIndex name + void $ execute conn "CREATE INDEX IF NOT EXISTS ? ON ? ( ? )" + (Identifier name, Identifier table, Identifier column) + traceWith tr (ConnectionCreatedTemporaryIndex name) a <- action - unless exists (dropIndexIfExists tr conn name True) + unless exists $ do + traceWith tr $ ConnectionRemoveTemporaryIndex name + dropIndexIfExists tr conn name True return a -- | Check whether an index exists in the database. Handy to customize the behavior (e.g. logging) -- depending on whether or not indexes are already there since 'CREATE INDEX IF NOT EXISTS' will not -- tell whether or not it has indeed created something. --- // TODO: Validate that this works +-- // TODO: I don't think this works indexDoesExist :: Connection -> Text -> IO Bool -indexDoesExist conn name = do - query conn qry (Only name) <&> \case +indexDoesExist conn indexName = do + query conn qry (Only indexName) <&> \case [Only n] | n > (0 :: Int64) -> True _doesNotExist -> False where @@ -748,10 +776,11 @@ indexDoesExist conn name = do dropIndexIfExists :: Tracer IO TraceConnection -> Connection -> Text -> Bool -> IO () dropIndexIfExists tr conn indexName wasTemporary = do - whenM (indexDoesExist conn indexName) $ traceWith tr $ if wasTemporary - then ConnectionRemoveTemporaryIndex{indexName} - else ConnectionRemoveIndex{indexName} - void $ execute conn "DROP INDEX IF EXISTS ?" $ Only $ Identifier indexName + whenM (indexDoesExist conn indexName) $ do + traceWith tr $ if wasTemporary + then ConnectionRemoveTemporaryIndex{indexName} + else ConnectionRemoveIndex{indexName} + void . execute conn "DROP INDEX IF EXISTS ?" . Only $ Identifier indexName -- -- Migrations @@ -764,7 +793,7 @@ type Migration = [Query] databaseVersion :: Connection -> IO MigrationRevision databaseVersion conn = do _ <- execute_ conn createStatement - (throwIO . UnexpectedRow "databaseVersion") `handle` + handle (throwIO . DatabaseException "databaseVersion") $ query_ conn countStatement >>= \case [(revision, _version :: String)] -> return revision [] -> return 0 @@ -817,7 +846,7 @@ migrations = [ ($(embedFile "db/postgres/v1.0.0-beta/001.sql"), "v1.0.0.-beta/001.sql") , ($(embedFile "db/postgres/v1.0.0/001.sql"), "v1.0.0/001.sql") , ($(embedFile "db/postgres/v1.0.0/002.sql"), "v1.0.0/002.sql") - -- // TODO: Confirm this is ok to delete!, ($(embedFile "db/postgres/v1.0.1/001.sql"), "v1.0.1/001.sql") + , ($(embedFile "db/postgres/v1.0.1/001.sql"), "v1.0.1/001.sql") , ($(embedFile "db/postgres/v2.0.0-beta/001.sql"), "v2.0.0-beta/001.sql") , ($(embedFile "db/postgres/v2.1.0/001.sql"), "v2.1.0/001.sql") , ($(embedFile "db/postgres/v2.1.0/002.sql"), "v2.1.0/002.sql") @@ -880,6 +909,34 @@ fromText = fromString . T.unpack mkByteaLiteral :: ByteString -> Text mkByteaLiteral bytes = "'\\x" <> encodeBase16 bytes <> "'" +withTotalChanges :: forall t a. (Foldable t) => (a -> IO Int64) -> t a -> IO Int +withTotalChanges io t = + fromIntegral <$> + foldM (\accum a -> (accum +) <$> io a) 0 t + +-- +-- Test helper +-- + +withTestDatabase :: String -> (Configuration.DatabaseLocation -> IO a) -> IO a +withTestDatabase dbName action = do + bracket createDb dropDb (const $ action dbLocation) + where + connectInfo name = PG.defaultConnectInfo + { PG.connectUser = "kupotest" + , PG.connectPassword = "kupo" + , PG.connectDatabase = name + } + + createDb = void $ PG.withConnect (connectInfo "kupo") $ \conn -> + execute conn "CREATE DATABASE ?" (Only . Identifier $ fromString dbName) + + dropDb = const . void $ PG.withConnect (connectInfo "kupo") $ \conn -> + execute conn "DROP DATABASE ?" (Only . Identifier $ fromString dbName) + + dbLocation = Configuration.Remote $ fromJust $ URI.mkURI $ + "postgresql://kupotest:kupo@localhost/" <> T.pack dbName + -- -- Exceptions & Tracing -- @@ -929,12 +986,6 @@ trim = . show --- | Something went wrong when unmarshalling data from the database. -data UnexpectedRowException - = UnexpectedRow !Text !PG.ResultError - deriving Show -instance Exception UnexpectedRowException - data ExpectedSingletonResultException = ExpectedSingletonResult { context :: !Text diff --git a/src/Kupo/App/Database/SQLite.hs b/src/Kupo/App/Database/SQLite.hs index 1ea32e0..c365da3 100644 --- a/src/Kupo/App/Database/SQLite.hs +++ b/src/Kupo/App/Database/SQLite.hs @@ -33,7 +33,7 @@ module Kupo.App.Database.SQLite , rollbackQryDeleteCheckpoints -- * Setup - , newDBPool + , withDBPool , copyDatabase -- * Internal @@ -43,6 +43,9 @@ module Kupo.App.Database.SQLite -- * Tracer , TraceDatabase (..) + + -- * Test Helpers + , withTestDatabase ) where import Kupo.Prelude @@ -400,55 +403,63 @@ withLongLivedConnection tr (DBLock shortLived longLived) k file deferIndexes act -- | Create a Database pool that uses separate pools for `ReadOnly` and `ReadWrite` connections. -- This function creates a database file if it does not already exist. -newDBPool +withDBPool :: (Tracer IO TraceDatabase) -> Bool -> Configuration.DatabaseLocation -> LongestRollback - -> IO (DBPool IO) -newDBPool tr isReadOnly dbLocation longestRollback = do - dbFile <- newDatabaseFile tr dbLocation - lock <- liftIO newLock - - (maxConcurrentWriters, maxConcurrentReaders) <- - liftIO getNumCapabilities <&> \n -> (n, 5 * n) - - readOnlyPool <- liftIO $ newPool $ defaultPoolConfig - (createShortLivedConnection tr ReadOnly lock longestRollback dbFile) - (\Database{close} -> close) - 600 - maxConcurrentReaders - - readWritePool <- liftIO $ newPool $ defaultPoolConfig - (createShortLivedConnection tr ReadWrite lock longestRollback dbFile) - (\Database{close} -> close) - 30 - maxConcurrentWriters - - let - withDB :: forall a b. (Pool (Database IO) -> (Database IO -> IO a) -> IO b) -> ConnectionType -> (Database IO -> IO a) -> IO b - withDB withRes connType dbAction = + -> (DBPool IO -> IO a) + -> IO a +withDBPool tr isReadOnly dbLocation longestRollback action = do + bracket mkPool destroy action + where + mkPool = do + dbFile <- newDatabaseFile tr dbLocation + lock <- liftIO newLock + + (maxConcurrentWriters, maxConcurrentReaders) <- + liftIO getNumCapabilities <&> \n -> (n, 5 * n) + + readOnlyPool <- liftIO $ newPool $ defaultPoolConfig + (createShortLivedConnection tr ReadOnly lock longestRollback dbFile) + (\Database{close} -> close) + 600 + maxConcurrentReaders + + readWritePool <- liftIO $ newPool $ defaultPoolConfig + (createShortLivedConnection tr ReadWrite lock longestRollback dbFile) + (\Database{close} -> close) + 30 + maxConcurrentWriters + + return DBPool + { tryWithDatabase = + withDB tryWithResource + , withDatabaseBlocking = + withDB withResource + , withDatabaseExclusiveWriter = + withLongLivedConnection tr lock longestRollback dbFile + , maxConcurrentReaders + , maxConcurrentWriters = + if isReadOnly then 0 else maxConcurrentWriters + , destroy = do + destroyAllResources readOnlyPool + destroyAllResources readWritePool + } + + withDB + :: Pool (Database IO) + -> (Pool (Database IO) -> (Database IO -> IO a) -> IO b) + -> ConnectionType + -> (Database IO -> IO a) + -> IO b + withDB pool withRes connType dbAction = case connType of ReadOnly -> withRes readOnlyPool dbAction ReadWrite | isReadOnly -> fail "Cannot acquire a read/write connection on read-only replica" ReadWrite -> withRes readWritePool dbAction WriteOnly -> fail "Impossible: tried to acquire a WriteOnly database?" - return DBPool - { tryWithDatabase = - withDB tryWithResource - , withDatabaseBlocking = - withDB withResource - , withDatabaseExclusiveWriter = - withLongLivedConnection tr lock longestRollback dbFile - , destroyResources = do - destroyAllResources readOnlyPool - destroyAllResources readWritePool - , maxConcurrentReaders - , maxConcurrentWriters = - if isReadOnly then 0 else maxConcurrentWriters - } - -- It is therefore also the connection from which we check for and run database migrations when -- needed. Note that this bracket will also create the database if it doesn't exist. withWriteOnlyConnection @@ -1146,6 +1157,12 @@ matchMaybeWord64 = \case mkBlobLiteral :: ByteString -> Text mkBlobLiteral bytes = "x'" <> encodeBase16 bytes <> "'" +withTestDatabase :: String -> (Configuration.DatabaseLocation -> IO a) -> IO a +withTestDatabase _dbName action = do + action $ Configuration.InMemory Nothing +-- ^// TODO: Create the proper connection string + + -- -- Indexes -- diff --git a/src/Kupo/App/Database/Types.hs b/src/Kupo/App/Database/Types.hs index 536bd4e..8dbec26 100644 --- a/src/Kupo/App/Database/Types.hs +++ b/src/Kupo/App/Database/Types.hs @@ -209,8 +209,8 @@ data DBPool m = DBPool , maxConcurrentWriters :: Int - - , destroyResources + + , destroy :: m () } diff --git a/src/Kupo/App/Http.hs b/src/Kupo/App/Http.hs index cbcb127..ec70a49 100644 --- a/src/Kupo/App/Http.hs +++ b/src/Kupo/App/Http.hs @@ -23,7 +23,7 @@ import Kupo.Prelude import Data.Aeson ( (.:) ) -import Kupo.App.Database.Types +import Kupo.App.Database ( ConnectionType (..) , DBTransaction , Database (..) @@ -223,6 +223,7 @@ httpServer tr withDatabase forceRollback fetchBlock patternsVar readHealth host onServerError (hint :: SomeException) = do logWith tr $ HttpUnexpectedError (toText $ displayException hint) + putStrLn $ displayException hint Just <$> send Errors.serverError retrying attempts retryingIn io = io >>= \case diff --git a/test/Test/Kupo/App/Http/Client.hs b/test/Test/Kupo/App/Http/Client.hs index 213caa1..209a679 100644 --- a/test/Test/Kupo/App/Http/Client.hs +++ b/test/Test/Kupo/App/Http/Client.hs @@ -259,7 +259,7 @@ newHttpClientWith manager (serverHost, serverPort) log = fail (show e) Right Json.Null -> do pure Nothing - Right val -> maybe (fail "failed to decode Datum.") (pure . Just) $ do + Right val -> maybe (fail $ "failed to decode Datum: " <> show body) (pure . Just) $ do bytes <- val ^? key "datum" . _String binaryDataFromBytes (unsafeDecodeBase16 bytes) diff --git a/test/Test/Kupo/App/HttpSpec.hs b/test/Test/Kupo/App/HttpSpec.hs index 4f920dd..f1cbd5d 100644 --- a/test/Test/Kupo/App/HttpSpec.hs +++ b/test/Test/Kupo/App/HttpSpec.hs @@ -38,7 +38,7 @@ import Data.OpenApi , schemas , validateJSON ) -import Kupo.App.Database.Types +import Kupo.App.Database ( Database (..) ) import Kupo.App.Http diff --git a/test/Test/Kupo/AppSpec.hs b/test/Test/Kupo/AppSpec.hs index c655ce2..726325a 100644 --- a/test/Test/Kupo/AppSpec.hs +++ b/test/Test/Kupo/AppSpec.hs @@ -29,9 +29,6 @@ import Control.Concurrent.STM.TVar , readTVar , writeTVar ) -import Control.Monad.Class.MonadAsync - ( link - ) import GHC.Generics ( Generic1 ) @@ -43,6 +40,9 @@ import Kupo import Kupo.App ( ChainSyncClient ) +import Kupo.App.Database + ( withTestDatabase + ) import Kupo.App.Mailbox ( Mailbox , newMailbox @@ -111,12 +111,12 @@ import Kupo.Data.ChainSync import Kupo.Data.Configuration ( ChainProducer (..) , Configuration (..) - , DatabaseLocation (..) , DeferIndexesInstallation (..) , InputManagement (..) , LongestRollback (..) , mailboxCapacity ) +import qualified Kupo.Data.Configuration as Configuration import Kupo.Data.FetchBlock ( FetchBlockClient ) @@ -145,6 +145,7 @@ import Network.HTTP.Client import Network.WebSockets ( ConnectionException (..) ) +import qualified Prelude import Test.Hspec ( Spec , runIO @@ -212,12 +213,13 @@ import Test.StateMachine.Types , runGenSym ) +import Control.Monad.Class.MonadAsync + ( link + ) import qualified Data.Aeson.Encoding as Json import qualified Data.Map as Map import qualified Data.Set as Set import qualified Data.Text as T -import qualified GHC.Show as Show -import qualified Prelude import qualified Test.StateMachine.Types.Rank2 as Rank2 varStateMachineIterations :: String @@ -232,64 +234,75 @@ spec = do maxSuccess <- maybe 30 Prelude.read <$> runIO (lookupEnv varStateMachineIterations) + dbNum <- runIO $ newIORef (0 :: Int) + let mkDbName = do + n <- readIORef dbNum + modifyIORef' dbNum (+1) + return $ "kupotest" <> show n + prop "State-Machine" $ withMaxSuccess maxSuccess $ forAll genInputManagement $ \inputManagement -> do forAll genServerPort $ \serverPort -> do - let httpClient = newHttpClientWith manager (serverHost, serverPort) (\_ -> pure ()) - let stateMachine = StateMachine - initModel - transition - (precondition longestRollback) - postcondition - Nothing - (generator inputManagement) - shrinker - (semantics garbageCollectionInterval httpClient chan) - mock - (cleanup chan) - forAllCommands stateMachine Nothing $ \cmds -> monadicIO $ do - let config = Configuration - { chainProducer = CardanoNode -- NOTE: unused, but must be different than ReadOnlyReplica - { nodeSocket = "/dev/null" - , nodeConfig = "/dev/null" - } - , databaseLocation = InMemory Nothing - , serverHost - , serverPort - , since = Just GenesisPoint - , patterns = fromList [MatchAny IncludingBootstrap] - , inputManagement - , longestRollback - , garbageCollectionInterval - , deferIndexes - } - env <- run (newEnvironment config) - producer <- run (newMockProducer httpClient <$> atomically (dupTChan chan)) - fetchBlock <- run (newMockFetchBlock <$> atomically (dupTChan chan)) - let kupo = kupoWith tracers producer fetchBlock `runWith` env - asyncId <- run (async kupo) - run $ link asyncId - (_hist, model, res) <- runCommands stateMachine cmds - run $ cancel asyncId - - -- TODO: Check coverage using the history and label some interesting - -- test scenarios that are relevant to cover. - - monitor (label (show inputManagement)) - monitor (checkCommandNames cmds) - monitor $ counterexample $ toString $ unlines - [ T.intercalate "\n -" - ("== Commands ==" - : (show . getCommand <$> unCommands cmds) - ) - , "" - , "== Model ==" - , show model - , "" - , "== Assertion ==" - , show res - ] - assert (res == Ok) + let httpClient = newHttpClientWith manager (serverHost, serverPort) (\_ -> pure ()) + let stateMachine = StateMachine + initModel + transition + (precondition longestRollback) + postcondition + Nothing + (generator inputManagement) + shrinker + (semantics garbageCollectionInterval httpClient chan) + mock + (cleanup chan) + forAllCommands stateMachine Nothing $ \cmds -> monadicIO $ do + let + runKupo :: Configuration.DatabaseLocation -> IO () + runKupo dbLocation = do + let config = Configuration + { chainProducer = CardanoNode -- NOTE: unused, but must be different than ReadOnlyReplica + { nodeSocket = "/dev/null" + , nodeConfig = "/dev/null" + } + , databaseLocation = dbLocation + , serverHost + , serverPort + , since = Just GenesisPoint + , patterns = fromList [MatchAny IncludingBootstrap] + , inputManagement + , longestRollback + , garbageCollectionInterval + , deferIndexes + } + env <- newEnvironment config + producer <- newMockProducer httpClient <$> atomically (dupTChan chan) + fetchBlock <- newMockFetchBlock <$> atomically (dupTChan chan) + kupoWith tracers producer fetchBlock `runWith` env + + dbName <- run mkDbName + asyncId <- run . async $ withTestDatabase dbName runKupo + run $ link asyncId + (_hist, model, res) <- runCommands stateMachine cmds + run $ cancel asyncId + + -- TODO: Check coverage using the history and label some interesting + -- test scenarios that are relevant to cover. + + monitor (label (show inputManagement)) + monitor (checkCommandNames cmds) + monitor $ counterexample $ toString $ unlines + [ T.intercalate "\n -" + ("== Commands ==" + : (show . getCommand <$> unCommands cmds) + ) + , "" + , "== Model ==" + , show model + , "" + , "== Assertion ==" + , show res + ] + assert (res == Ok) where serverHost = "127.0.0.1" longestRollback = 10 @@ -299,7 +312,7 @@ spec = do genServerPort = sized $ \n -> do i <- arbitrary pure (1024 + n + i) - + -------------------------------------------------------------------------------- ---- Events / Respone -- diff --git a/test/Test/Kupo/Data/DatabaseSpec.hs b/test/Test/Kupo/Data/DatabaseSpec.hs index 7e03054..b3f7279 100644 --- a/test/Test/Kupo/Data/DatabaseSpec.hs +++ b/test/Test/Kupo/Data/DatabaseSpec.hs @@ -9,6 +9,23 @@ module Test.Kupo.Data.DatabaseSpec ( spec ) where +#if postgres +import Kupo.Prelude +import Test.Hspec + ( Spec + , describe + , it + , pendingWith + ) + +spec :: Spec +spec = describe "DatabaseSpec" $ + it "Not yet implemented for Postgres" $ + pendingWith $ "DB tests need to be split into DB-agnostic " + <> "and DB-specific tests and conditionally compiled" + +#else + import Kupo.Prelude import Data.List @@ -25,7 +42,10 @@ import Database.SQLite.Simple , withTransaction ) import Kupo.App.Database - ( deleteInputsQry + ( ConnectionType (..) + , DBPool (..) + , Database (..) + , deleteInputsQry , foldInputsQry , foldPoliciesQry , getBinaryDataQry @@ -34,18 +54,13 @@ import Kupo.App.Database , listAncestorQry , listCheckpointsQry , markInputsQry - , newDBPool , pruneBinaryDataQry , pruneInputsQry , rollbackQryDeleteCheckpoints , rollbackQryDeleteInputs , rollbackQryUpdateInputs , selectMaxCheckpointQry - ) -import Kupo.App.Database.Types - ( ConnectionType (..) - , DBPool (..) - , Database (..) + , withDBPool ) import Kupo.Control.MonadAsync ( mapConcurrently_ @@ -196,14 +211,6 @@ import qualified Data.Set as Set import qualified Data.Text as T import qualified Prelude -#if postgres -spec :: Spec -spec = describe "DatabaseSpec" $ - it "Not yet implemented for Postgres" $ - pendingWith $ "DB tests need to be split into DB-agnostic " - <> "and DB-specific tests and conditionally compiled" -#else - spec :: Spec spec = parallel $ do context "fromRow ↔ toRow" $ do @@ -356,14 +363,17 @@ spec = parallel $ do ) [ ( "in-memory" , \test -> do - test =<< newDBPool nullTracer False + withDBPool + nullTracer + False (InMemory (Just "file::concurrent-read-write:?cache=shared&mode=memory")) k + test ) , ( "on-disk" , \test -> withSystemTempDirectory "kupo-database-concurrent" $ \dir -> do - test =<< newDBPool nullTracer False (Dir dir) k + withDBPool nullTracer False (Dir dir) k test ) ] @@ -1375,18 +1385,19 @@ withInMemoryDatabase = withInMemoryDatabase' run InstallIndexesIfNotExist withInMemoryDatabase' - :: forall (m :: Type -> Type) b. (Monad m) - => (forall a. IO a -> m a) + :: forall (m :: Type -> Type) b. + (forall a. IO a -> m a) -> DeferIndexesInstallation -> Word64 -> (Database IO -> IO b) -> m b withInMemoryDatabase' runInIO deferIndexes k action = do - pool <- runInIO $ newDBPool nullTracer - False - (InMemory (Just ":memory:")) - (LongestRollback { getLongestRollback = k }) - runInIO $ (withDatabaseExclusiveWriter pool) deferIndexes action + runInIO $ withDBPool + nullTracer + False + (InMemory (Just ":memory:")) + (LongestRollback { getLongestRollback = k }) + $ \DBPool {..} -> withDatabaseExclusiveWriter deferIndexes action forAllCheckpoints :: Testable prop From 83a7d02410215ab0698cb30461756d1b1cab4f65 Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 18 Jun 2024 15:24:59 -0700 Subject: [PATCH 5/9] Silence "NOTICE" statements during migrations --- db/postgres/v1.0.0-beta/001.sql | 2 + db/postgres/v1.0.0/001.sql | 2 + db/postgres/v1.0.0/002.sql | 2 + db/postgres/v1.0.1/001.sql | 43 ++++++ db/postgres/v2.0.0-beta/001.sql | 2 + db/postgres/v2.1.0/001.sql | 2 + db/postgres/v2.1.0/002.sql | 2 + db/postgres/v2.1.0/003.sql | 2 + db/postgres/v2.2.0/001.sql | 2 + kupo.cabal | 68 ++++----- package.yaml | 209 +++++++++++++++++++--------- src/Kupo/App/Database/SQLite.hs | 55 ++------ test/Test/Kupo/Data/DatabaseSpec.hs | 5 +- 13 files changed, 252 insertions(+), 144 deletions(-) create mode 100644 db/postgres/v1.0.1/001.sql diff --git a/db/postgres/v1.0.0-beta/001.sql b/db/postgres/v1.0.0-beta/001.sql index 3c47d52..028efef 100644 --- a/db/postgres/v1.0.0-beta/001.sql +++ b/db/postgres/v1.0.0-beta/001.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + CREATE TABLE IF NOT EXISTS inputs ( output_reference BYTEA NOT NULL, address TEXT NOT NULL, diff --git a/db/postgres/v1.0.0/001.sql b/db/postgres/v1.0.0/001.sql index 4fbfa0d..92fbd89 100644 --- a/db/postgres/v1.0.0/001.sql +++ b/db/postgres/v1.0.0/001.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + DROP TABLE inputs; CREATE TABLE IF NOT EXISTS inputs ( diff --git a/db/postgres/v1.0.0/002.sql b/db/postgres/v1.0.0/002.sql index 3fe492e..cea7f72 100644 --- a/db/postgres/v1.0.0/002.sql +++ b/db/postgres/v1.0.0/002.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + CREATE TABLE IF NOT EXISTS patterns ( pattern TEXT NOT NULL, PRIMARY KEY (pattern) diff --git a/db/postgres/v1.0.1/001.sql b/db/postgres/v1.0.1/001.sql new file mode 100644 index 0000000..7c4f270 --- /dev/null +++ b/db/postgres/v1.0.1/001.sql @@ -0,0 +1,43 @@ +DELETE FROM inputs WHERE output_reference >= '\x825820f9ed2fef27cdcf60c863ba03f27d0e38f39c5047cf73ffdf2428b48edbe8323400' AND output_reference <= '\x825820f9ed2fef27cdcf60c863ba03f27d0e38f39c5047cf73ffdf2428b48edbe8323417'; +DELETE FROM inputs WHERE output_reference >= '\x8258209298f499a4c4aeba53a984cb4df0f9a93b7d158da4c2c2d12a06530841f94cd700' AND output_reference <= '\x8258209298f499a4c4aeba53a984cb4df0f9a93b7d158da4c2c2d12a06530841f94cd717'; +DELETE FROM inputs WHERE output_reference >= '\x82582037ae93fc2e1193f0a801d750f84bd3448e50b60915c42017c1077ff77060f4a800' AND output_reference <= '\x82582037ae93fc2e1193f0a801d750f84bd3448e50b60915c42017c1077ff77060f4a817'; +DELETE FROM inputs WHERE output_reference >= '\x82582095b2f040e566200ae41e65a6197d21090eeeeef0c15427b814a9b915158bc10100' AND output_reference <= '\x82582095b2f040e566200ae41e65a6197d21090eeeeef0c15427b814a9b915158bc10117'; +DELETE FROM inputs WHERE output_reference >= '\x825820bc48b849255582c073c0e703f4b70d51aeab81d35c5de32b7d364966aa1a544900' AND output_reference <= '\x825820bc48b849255582c073c0e703f4b70d51aeab81d35c5de32b7d364966aa1a544917'; +DELETE FROM inputs WHERE output_reference >= '\x825820df388c44bf8de15e6080007ea190dc1b48f62c862389d68396641a15131fe4e100' AND output_reference <= '\x825820df388c44bf8de15e6080007ea190dc1b48f62c862389d68396641a15131fe4e117'; +DELETE FROM inputs WHERE output_reference >= '\x825820899e6dfa57b0bae60069dd82c449ddcaed56dd09bd4483ca6c5c61584a0b777600' AND output_reference <= '\x825820899e6dfa57b0bae60069dd82c449ddcaed56dd09bd4483ca6c5c61584a0b777617'; +DELETE FROM inputs WHERE output_reference >= '\x825820b03c7a70cfcd11bac77bb9d6e5020ceeceec76b1f09d22960c79ea4b496ea53400' AND output_reference <= '\x825820b03c7a70cfcd11bac77bb9d6e5020ceeceec76b1f09d22960c79ea4b496ea53417'; +DELETE FROM inputs WHERE output_reference >= '\x825820116fc11e143f6e6c685ea23f4f191d5d54df39359ecdfc7d53506f048fc713b600' AND output_reference <= '\x825820116fc11e143f6e6c685ea23f4f191d5d54df39359ecdfc7d53506f048fc713b617'; +DELETE FROM inputs WHERE output_reference >= '\x82582062f7df4fce8cbe478c68f1cf4ab502a61c86c922bb10de025034177bd6ba4b0300' AND output_reference <= '\x82582062f7df4fce8cbe478c68f1cf4ab502a61c86c922bb10de025034177bd6ba4b0317'; +DELETE FROM inputs WHERE output_reference >= '\x825820e7a8172232688851112f088754247ff212313a564169fa029b1fdb830a04e5b500' AND output_reference <= '\x825820e7a8172232688851112f088754247ff212313a564169fa029b1fdb830a04e5b517'; +DELETE FROM inputs WHERE output_reference >= '\x82582089249ef42073233e87c98080c166d716624e1363b9e0f8b744cfcd040592998b00' AND output_reference <= '\x82582089249ef42073233e87c98080c166d716624e1363b9e0f8b744cfcd040592998b17'; +DELETE FROM inputs WHERE output_reference >= '\x825820a467c4f72d1be92f1c23b675a6313b6a8c7302ae28be9d36a9f3961458e6c20700' AND output_reference <= '\x825820a467c4f72d1be92f1c23b675a6313b6a8c7302ae28be9d36a9f3961458e6c20717'; +DELETE FROM inputs WHERE output_reference >= '\x82582058c8a04c8b78a99cd7337a6589489b54027fa16b918b3a45bddb78e39c65939500' AND output_reference <= '\x82582058c8a04c8b78a99cd7337a6589489b54027fa16b918b3a45bddb78e39c65939517'; +DELETE FROM inputs WHERE output_reference >= '\x82582043149210cbbfbc92bc2b199bb14cb15330414e2288ac31be92a3b5a490f9abfc00' AND output_reference <= '\x82582043149210cbbfbc92bc2b199bb14cb15330414e2288ac31be92a3b5a490f9abfc17'; +DELETE FROM inputs WHERE output_reference >= '\x8258208efd9d40433726a6693b6845078b97dc0c72cf381103bfec838331d8ea3475e700' AND output_reference <= '\x8258208efd9d40433726a6693b6845078b97dc0c72cf381103bfec838331d8ea3475e717'; +DELETE FROM inputs WHERE output_reference >= '\x825820efd21930e7b5b49f8d907a1b6ca5aeaa560334e762a05df9182ba4f1d1f6d43000' AND output_reference <= '\x825820efd21930e7b5b49f8d907a1b6ca5aeaa560334e762a05df9182ba4f1d1f6d43017'; +DELETE FROM inputs WHERE output_reference >= '\x825820e43f8adc738f3352ebd02a673ba9a69fb7ecf4612843166dd9d8081bbc29503b00' AND output_reference <= '\x825820e43f8adc738f3352ebd02a673ba9a69fb7ecf4612843166dd9d8081bbc29503b17'; +DELETE FROM inputs WHERE output_reference >= '\x825820eddd01ce4b025fe8c5eb9b69b45f551d34bc08649da876a89e95414e2ea9149700' AND output_reference <= '\x825820eddd01ce4b025fe8c5eb9b69b45f551d34bc08649da876a89e95414e2ea9149717'; +DELETE FROM inputs WHERE output_reference >= '\x825820a694dc595e1bfd0ca703374f89fdaa18a9faa5de8790ff724040df5d45e7af0500' AND output_reference <= '\x825820a694dc595e1bfd0ca703374f89fdaa18a9faa5de8790ff724040df5d45e7af0517'; +DELETE FROM inputs WHERE output_reference >= '\x8258205c19cd2d962c22b51e7de12626f577229fc7caa9c517920f034002b69ed897db00' AND output_reference <= '\x8258205c19cd2d962c22b51e7de12626f577229fc7caa9c517920f034002b69ed897db17'; +DELETE FROM inputs WHERE output_reference >= '\x825820d2f9a485d35a7ac67d2a9a69155d7ecd5713c960540799e58a615ffe5454868f00' AND output_reference <= '\x825820d2f9a485d35a7ac67d2a9a69155d7ecd5713c960540799e58a615ffe5454868f17'; +DELETE FROM inputs WHERE output_reference >= '\x825820f94d6cd0891629c71c4e82c71e01263e332e5ef8828b2bd247d6dd1dddc2ce4a00' AND output_reference <= '\x825820f94d6cd0891629c71c4e82c71e01263e332e5ef8828b2bd247d6dd1dddc2ce4a17'; +DELETE FROM inputs WHERE output_reference >= '\x825820ca08eb39a83cd68d6ff11205eda33cbcf62bc7ad71d9c2aae5cdab1ececa5d8100' AND output_reference <= '\x825820ca08eb39a83cd68d6ff11205eda33cbcf62bc7ad71d9c2aae5cdab1ececa5d8117'; +DELETE FROM inputs WHERE output_reference >= '\x825820afaba424e5c986f396bfa6d561fe0b7c98fa61408ffc3de3cae61e81c5bb90b200' AND output_reference <= '\x825820afaba424e5c986f396bfa6d561fe0b7c98fa61408ffc3de3cae61e81c5bb90b217'; +DELETE FROM inputs WHERE output_reference >= '\x825820ab82002b00646a154c046d4c79fd5c14d6de0c833fe01206007d5ffe90fa3fff00' AND output_reference <= '\x825820ab82002b00646a154c046d4c79fd5c14d6de0c833fe01206007d5ffe90fa3fff17'; +DELETE FROM inputs WHERE output_reference >= '\x8258207d590872f41c60b9be0870f5c0fb1859d7a033d4923ebd3fc08801434fb1e71600' AND output_reference <= '\x8258207d590872f41c60b9be0870f5c0fb1859d7a033d4923ebd3fc08801434fb1e71617'; +DELETE FROM inputs WHERE output_reference >= '\x8258209858125ad4041fc3b914617f6c51db15df2fff115a45155a5cc65908b46ce2ac00' AND output_reference <= '\x8258209858125ad4041fc3b914617f6c51db15df2fff115a45155a5cc65908b46ce2ac17'; +DELETE FROM inputs WHERE output_reference >= '\x8258202cdfaf95363604f0efb4993be35f03ccd2146863b151b49affea9d3af3344c4800' AND output_reference <= '\x8258202cdfaf95363604f0efb4993be35f03ccd2146863b151b49affea9d3af3344c4817'; +DELETE FROM inputs WHERE output_reference >= '\x825820a08fe6c359c03433a29f01787ec951e8626d827cd99241c156250a7340ba321400' AND output_reference <= '\x825820a08fe6c359c03433a29f01787ec951e8626d827cd99241c156250a7340ba321417'; +DELETE FROM inputs WHERE output_reference >= '\x8258208cb25305efc0e4f897d9c421039871803786617bec23467008c42ff646fd62fa00' AND output_reference <= '\x8258208cb25305efc0e4f897d9c421039871803786617bec23467008c42ff646fd62fa17'; +DELETE FROM inputs WHERE output_reference >= '\x825820bfab189d9addbab5030a0f1820470ded8e90621f39e9612563393526b690ff3e00' AND output_reference <= '\x825820bfab189d9addbab5030a0f1820470ded8e90621f39e9612563393526b690ff3e17'; +DELETE FROM inputs WHERE output_reference >= '\x82582007c3d8abc5b49ac8d844e50735bdc535250d29e79d159ad22c8308048e3bc6f200' AND output_reference <= '\x82582007c3d8abc5b49ac8d844e50735bdc535250d29e79d159ad22c8308048e3bc6f217'; +DELETE FROM inputs WHERE output_reference >= '\x8258203b1f218b82459345213d7cb2d9700b3f4087c6433051394edcb8f7d5fa9f994f00' AND output_reference <= '\x8258203b1f218b82459345213d7cb2d9700b3f4087c6433051394edcb8f7d5fa9f994f17'; +DELETE FROM inputs WHERE output_reference >= '\x825820860e932bec46e05a0212bd36db55152b8468378140a61f22127851f134dcbde400' AND output_reference <= '\x825820860e932bec46e05a0212bd36db55152b8468378140a61f22127851f134dcbde417'; +DELETE FROM inputs WHERE output_reference >= '\x8258205f81ef623e69122582f2a285a04092fe2a92afef42680e6a9049e8a8339857dc00' AND output_reference <= '\x8258205f81ef623e69122582f2a285a04092fe2a92afef42680e6a9049e8a8339857dc17'; +DELETE FROM inputs WHERE output_reference >= '\x8258203d1c3c7ae3db0bd1c4d310e953ca0f319b7adf58b5628b915e0f01948ec77f1d00' AND output_reference <= '\x8258203d1c3c7ae3db0bd1c4d310e953ca0f319b7adf58b5628b915e0f01948ec77f1d17'; +DELETE FROM inputs WHERE output_reference >= '\x82582041ae901251f5a3313b2e7fc1d121201c239df3f2b30c0097bd0cc1cb47bc61ae00' AND output_reference <= '\x82582041ae901251f5a3313b2e7fc1d121201c239df3f2b30c0097bd0cc1cb47bc61ae17'; +DELETE FROM inputs WHERE output_reference >= '\x8258208500961c30306118533ae18d3ef99baf8bbd2594cfac26cb012495070a5634eb00' AND output_reference <= '\x8258208500961c30306118533ae18d3ef99baf8bbd2594cfac26cb012495070a5634eb17'; +DELETE FROM inputs WHERE output_reference >= '\x82582098664faaddf6804e7478bb2d77f5c9d9aea3516d050a73db4c6b47b1e700424c00' AND output_reference <= '\x82582098664faaddf6804e7478bb2d77f5c9d9aea3516d050a73db4c6b47b1e700424c17'; +DELETE FROM inputs WHERE output_reference >= '\x825820e2dd9967b59e6a8a64afdf99c0b3519b5221d010515d6c68396e4ca00625733c00' AND output_reference <= '\x825820e2dd9967b59e6a8a64afdf99c0b3519b5221d010515d6c68396e4ca00625733c17'; +DELETE FROM inputs WHERE output_reference >= '\x825820c8db1cd16ed19f92917fa253615ade21e442bda56809f97744a1b3dfadea41f300' AND output_reference <= '\x825820c8db1cd16ed19f92917fa253615ade21e442bda56809f97744a1b3dfadea41f317'; +DELETE FROM inputs WHERE output_reference >= '\x825820787428e48b09bb8bcca74c11ddec7f2ad8d709b72850ef9a6904d81a3552d30700' AND output_reference <= '\x825820787428e48b09bb8bcca74c11ddec7f2ad8d709b72850ef9a6904d81a3552d30717'; diff --git a/db/postgres/v2.0.0-beta/001.sql b/db/postgres/v2.0.0-beta/001.sql index 442ab9b..aabcd26 100644 --- a/db/postgres/v2.0.0-beta/001.sql +++ b/db/postgres/v2.0.0-beta/001.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + DROP TABLE inputs; CREATE TABLE IF NOT EXISTS inputs ( diff --git a/db/postgres/v2.1.0/001.sql b/db/postgres/v2.1.0/001.sql index 602bbb5..b3e0ade 100644 --- a/db/postgres/v2.1.0/001.sql +++ b/db/postgres/v2.1.0/001.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + DELETE FROM inputs; DELETE FROM checkpoints; DELETE FROM binary_data; diff --git a/db/postgres/v2.1.0/002.sql b/db/postgres/v2.1.0/002.sql index 5edd502..2ffded9 100644 --- a/db/postgres/v2.1.0/002.sql +++ b/db/postgres/v2.1.0/002.sql @@ -1 +1,3 @@ +SET client_min_messages TO WARNING; + ALTER TABLE inputs ADD COLUMN payment_credential TEXT GENERATED ALWAYS AS (substr(address, -56)) STORED; diff --git a/db/postgres/v2.1.0/003.sql b/db/postgres/v2.1.0/003.sql index 065e3e5..e65a197 100644 --- a/db/postgres/v2.1.0/003.sql +++ b/db/postgres/v2.1.0/003.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + ALTER TABLE inputs RENAME COLUMN output_reference TO ext_output_reference; ALTER TABLE inputs ADD COLUMN output_reference BYTEA NOT NULL GENERATED ALWAYS AS (substr(ext_output_reference, 1, 34)) STORED; diff --git a/db/postgres/v2.2.0/001.sql b/db/postgres/v2.2.0/001.sql index 92d2478..194fe09 100644 --- a/db/postgres/v2.2.0/001.sql +++ b/db/postgres/v2.2.0/001.sql @@ -1,3 +1,5 @@ +SET client_min_messages TO WARNING; + DELETE FROM checkpoints; DELETE FROM policies; DELETE FROM inputs; diff --git a/kupo.cabal b/kupo.cabal index b5a9970..219f99d 100644 --- a/kupo.cabal +++ b/kupo.cabal @@ -1,6 +1,6 @@ cabal-version: 1.12 --- This file has been generated from package.yaml by hpack version 0.35.0. +-- This file has been generated from package.yaml by hpack version 0.36.0. -- -- see: https://github.com/sol/hpack @@ -19,15 +19,24 @@ license: MPL-2.0 license-file: LICENSE build-type: Simple extra-source-files: - db/v1.0.0-beta/001.sql - db/v1.0.0/001.sql - db/v1.0.0/002.sql - db/v1.0.1/001.sql - db/v2.0.0-beta/001.sql - db/v2.1.0/001.sql - db/v2.1.0/002.sql - db/v2.1.0/003.sql - db/v2.2.0/001.sql + db/postgres/v1.0.0-beta/001.sql + db/postgres/v1.0.0/001.sql + db/postgres/v1.0.0/002.sql + db/postgres/v1.0.1/001.sql + db/postgres/v2.0.0-beta/001.sql + db/postgres/v2.1.0/001.sql + db/postgres/v2.1.0/002.sql + db/postgres/v2.1.0/003.sql + db/postgres/v2.2.0/001.sql + db/sqlite/v1.0.0-beta/001.sql + db/sqlite/v1.0.0/001.sql + db/sqlite/v1.0.0/002.sql + db/sqlite/v1.0.1/001.sql + db/sqlite/v2.0.0-beta/001.sql + db/sqlite/v2.1.0/001.sql + db/sqlite/v2.1.0/002.sql + db/sqlite/v2.1.0/003.sql + db/sqlite/v2.2.0/001.sql data-files: docs/api/latest.yaml docs/api/v2.7.2.yaml @@ -123,14 +132,6 @@ library Kupo.Version.TH other-modules: Paths_kupo - Kupo.App.Database.Postgres - Kupo.App.Database.Types - else - other-modules: - Paths_kupo - Kupo.App.Database.SQLite - Kupo.App.Database.Types - hs-source-dirs: src default-extensions: @@ -171,11 +172,24 @@ library TypeSynonymInstances ViewPatterns ghc-options: -Wall -Wcompat -Widentities -Wincomplete-record-updates -Wincomplete-uni-patterns -Wpartial-fields -Wredundant-constraints -fno-warn-unticked-promoted-constructors -fno-warn-partial-fields + build-depends: + base >=4.7 && <5 + default-language: Haskell2010 + if flag(postgres) + other-modules: + Kupo.App.Database.Postgres + Kupo.App.Database.Types + cpp-options: -Dpostgres + else + other-modules: + Kupo.App.Database.SQLite + Kupo.App.Database.Types + if flag(production) + ghc-options: -Wunused-packages -Werror -O2 if flag(postgres) build-depends: aeson , attoparsec-aeson - , base >=4.7 && <5 , base16 , base58-bytestring , base64 @@ -244,7 +258,6 @@ library build-depends: aeson , attoparsec-aeson - , base >=4.7 && <5 , base16 , base58-bytestring , base64 @@ -290,6 +303,7 @@ library , ouroboros-network-api , ouroboros-network-framework , ouroboros-network-protocols + , postgresql-simple , prometheus , relude , resource-pool @@ -309,16 +323,6 @@ library , websockets , websockets-json , yaml - default-language: Haskell2010 - if flag(postgres) - other-modules: - Kupo.App.Database.Postgres - cpp-options: -Dpostgres - else - other-modules: - Kupo.App.Database.SQLite - if flag(production) - ghc-options: -Wunused-packages -Werror -O2 executable kupo main-is: Main.hs @@ -439,8 +443,6 @@ test-suite unit TypeSynonymInstances ViewPatterns ghc-options: -Wall -Wcompat -Widentities -Wincomplete-record-updates -Wincomplete-uni-patterns -Wpartial-fields -Wredundant-constraints -Wunused-packages -threaded -rtsopts -with-rtsopts=-N - if flag(postgres) - cpp-options: -Dpostgres build-tool-depends: hspec-discover:hspec-discover build-depends: @@ -474,3 +476,5 @@ test-suite unit , websockets , yaml default-language: Haskell2010 + if flag(postgres) + cpp-options: -Dpostgres diff --git a/package.yaml b/package.yaml index 6519905..f1251a1 100644 --- a/package.yaml +++ b/package.yaml @@ -43,87 +43,162 @@ library: cpp-options: -Dpostgres other-modules: - Kupo.App.Database.Postgres + - Kupo.App.Database.Types else: other-modules: - Kupo.App.Database.SQLite + - Kupo.App.Database.Types - condition: flag(production) ghc-options: - -Wunused-packages - -Werror - -O2 - dependencies: - - aeson - - attoparsec-aeson - - base16 - - base58-bytestring - - base64 - - bech32 - - bech32-th - - binary - - bytestring - - cardano-crypto-class - - cardano-crypto-wrapper - - cardano-ledger-allegra - - cardano-ledger-alonzo - - cardano-ledger-api - - cardano-ledger-babbage - - cardano-ledger-binary - - cardano-ledger-byron - - cardano-ledger-conway - - cardano-ledger-core - - cardano-ledger-mary - - cardano-ledger-shelley - - cardano-slotting - - cardano-strict-containers - - cborg - - containers - - contra-tracer - - contra-tracers - - directory - - fast-bech32 - - file-embed - - filepath - - generic-lens - - http-client - - http-types - - io-classes - - lens - - lens-aeson - - modern-uri - - network-mux - - optparse-applicative - - ouroboros-consensus - - ouroboros-consensus-cardano - - ouroboros-consensus-diffusion - - ouroboros-network - - ouroboros-network-api - - ouroboros-network-framework - - ouroboros-network-protocols - - prometheus - - relude - - resource-pool - - safe - - safe-exceptions - - scientific - - sqlite-simple - - template-haskell - - text - - text-ansi - - time - - transformers - - typed-protocols - - unix - - wai - - warp - - websockets - - websockets-json - - yaml + - condition: flag(postgres) + then: + dependencies: + - aeson + - attoparsec-aeson + - base16 + - base58-bytestring + - base64 + - bech32 + - bech32-th + - binary + - bytestring + - cardano-crypto-class + - cardano-crypto-wrapper + - cardano-ledger-allegra + - cardano-ledger-alonzo + - cardano-ledger-api + - cardano-ledger-babbage + - cardano-ledger-binary + - cardano-ledger-byron + - cardano-ledger-conway + - cardano-ledger-core + - cardano-ledger-mary + - cardano-ledger-shelley + - cardano-slotting + - cardano-strict-containers + - cborg + - containers + - contra-tracer + - contra-tracers + - directory + - fast-bech32 + - file-embed + - filepath + - generic-lens + - http-client + - http-types + - io-classes + - lens + - lens-aeson + - modern-uri + - network-mux + - optparse-applicative + - ouroboros-consensus + - ouroboros-consensus-cardano + - ouroboros-consensus-diffusion + - ouroboros-network + - ouroboros-network-api + - ouroboros-network-framework + - ouroboros-network-protocols + - postgresql-simple + - prometheus + - relude + - resource-pool + - safe + - safe-exceptions + - scientific + - template-haskell + - text + - text-ansi + - time + - transformers + - typed-protocols + - unix + - wai + - warp + - websockets + - websockets-json + - yaml + else: + dependencies: + - aeson + - attoparsec-aeson + - base16 + - base58-bytestring + - base64 + - bech32 + - bech32-th + - binary + - bytestring + - cardano-crypto-class + - cardano-crypto-wrapper + - cardano-ledger-allegra + - cardano-ledger-alonzo + - cardano-ledger-api + - cardano-ledger-babbage + - cardano-ledger-binary + - cardano-ledger-byron + - cardano-ledger-conway + - cardano-ledger-core + - cardano-ledger-mary + - cardano-ledger-shelley + - cardano-slotting + - cardano-strict-containers + - cborg + - containers + - contra-tracer + - contra-tracers + - directory + - fast-bech32 + - file-embed + - filepath + - generic-lens + - http-client + - http-types + - io-classes + - lens + - lens-aeson + - modern-uri + - network-mux + - optparse-applicative + - ouroboros-consensus + - ouroboros-consensus-cardano + - ouroboros-consensus-diffusion + - ouroboros-network + - ouroboros-network-api + - ouroboros-network-framework + - ouroboros-network-protocols + - prometheus + - relude + - resource-pool + - safe + - safe-exceptions + - scientific + - sqlite-simple + - template-haskell + - text + - text-ansi + - time + - transformers + - typed-protocols + - unix + - wai + - warp + - websockets + - websockets-json + - yaml tests: unit: main: Spec.hs source-dirs: test ghc-options: *ghc-options-test + when: + - condition: flag(postgres) + cpp-options: -Dpostgres dependencies: - aeson - base diff --git a/src/Kupo/App/Database/SQLite.hs b/src/Kupo/App/Database/SQLite.hs index c365da3..215a1ae 100644 --- a/src/Kupo/App/Database/SQLite.hs +++ b/src/Kupo/App/Database/SQLite.hs @@ -181,40 +181,13 @@ import System.FilePath import System.IO.Error ( isAlreadyExistsError ) - -import Control.Concurrent - ( getNumCapabilities - ) -import Data.Pool - ( Pool - , defaultPoolConfig - , destroyAllResources - , newPool - , tryWithResource - , withResource - ) -import Kupo.App.Database.Types - ( ConnectionType (..) - , DBPool (..) - , Database (..) - , TraceConnection (..) - , TraceDatabase (..) - ) -import Kupo.Control.MonadLog - ( TraceProgress (..) - , nullTracer - ) import Text.URI ( URI ) -import qualified Data.Char as Char import qualified Data.Text as T import qualified Data.Text.Lazy.Builder as T import qualified Data.Text.Lazy.Builder as TL -import qualified Database.SQLite.Simple as Sqlite -import qualified Kupo.Data.Configuration as Configuration -import qualified Kupo.Data.Database as DB data DatabaseFile = OnDisk !FilePath | InMemory !(Maybe FilePath) deriving (Generic, Eq, Show) @@ -250,7 +223,7 @@ newDatabaseFile tr = \case \You must specify either a working directory or in-memory configuration. \ \Using a remote URL is only allowed on binaries compiled to use PostgreSQL." } - throwIO (FailedToAccessOrCreateDatabaseFile $ RemoteURLSpecifiedForSQLite url) + throwIO (FailedToAccessOrCreateDatabaseFile $ RemoteURLSpecifiedForSQLite url) newDatabaseOnDiskFile :: (MonadIO m) @@ -432,6 +405,19 @@ withDBPool tr isReadOnly dbLocation longestRollback action = do 30 maxConcurrentWriters + let + withDB + :: (Pool (Database IO) -> (Database IO -> IO a) -> IO b) + -> ConnectionType + -> (Database IO -> IO a) + -> IO b + withDB withRes connType dbAction = + case connType of + ReadOnly -> withRes readOnlyPool dbAction + ReadWrite | isReadOnly -> fail "Cannot acquire a read/write connection on read-only replica" + ReadWrite -> withRes readWritePool dbAction + WriteOnly -> fail "Impossible: tried to acquire a WriteOnly database?" + return DBPool { tryWithDatabase = withDB tryWithResource @@ -447,19 +433,6 @@ withDBPool tr isReadOnly dbLocation longestRollback action = do destroyAllResources readWritePool } - withDB - :: Pool (Database IO) - -> (Pool (Database IO) -> (Database IO -> IO a) -> IO b) - -> ConnectionType - -> (Database IO -> IO a) - -> IO b - withDB pool withRes connType dbAction = - case connType of - ReadOnly -> withRes readOnlyPool dbAction - ReadWrite | isReadOnly -> fail "Cannot acquire a read/write connection on read-only replica" - ReadWrite -> withRes readWritePool dbAction - WriteOnly -> fail "Impossible: tried to acquire a WriteOnly database?" - -- It is therefore also the connection from which we check for and run database migrations when -- needed. Note that this bracket will also create the database if it doesn't exist. withWriteOnlyConnection diff --git a/test/Test/Kupo/Data/DatabaseSpec.hs b/test/Test/Kupo/Data/DatabaseSpec.hs index b3f7279..5d73eb7 100644 --- a/test/Test/Kupo/Data/DatabaseSpec.hs +++ b/test/Test/Kupo/Data/DatabaseSpec.hs @@ -10,6 +10,7 @@ module Test.Kupo.Data.DatabaseSpec ) where #if postgres + import Kupo.Prelude import Test.Hspec ( Spec @@ -142,11 +143,7 @@ import Test.Hspec , Spec , around , context - , describe - , hspec - , it , parallel - , pendingWith , shouldBe , specify ) From 0a0e3d9c6bbc0369f9b1214dabac9e896eaafb9d Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 18 Jun 2024 17:03:37 -0700 Subject: [PATCH 6/9] Tidy up TODOs --- src/Kupo/App/Database.hs | 4 ++-- src/Kupo/App/Database/Postgres.hs | 32 +++++++++++++++++-------------- src/Kupo/App/Database/SQLite.hs | 5 +---- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/Kupo/App/Database.hs b/src/Kupo/App/Database.hs index 50789b3..4a3909e 100644 --- a/src/Kupo/App/Database.hs +++ b/src/Kupo/App/Database.hs @@ -7,7 +7,7 @@ {-# LANGUAGE DuplicateRecordFields #-} module Kupo.App.Database - ( -- // TODO: Fix documentation headers + ( -- * Database DSL Database (..) , DBPool ( DBPool @@ -18,8 +18,8 @@ module Kupo.App.Database , maxConcurrentWriters ) , withDBPool - , ConnectionType (..) , DBTransaction + , ConnectionType (..) -- ** Queries -- *** Inputs diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index 3a51f1a..d72689d 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -10,9 +10,7 @@ {-# LANGUAGE TemplateHaskell #-} module Kupo.App.Database.Postgres - ( - -- // TODO: Fix documentation headers - -- ** Queries + ( -- ** Queries -- *** Inputs deleteInputsQry , markInputsQry @@ -183,7 +181,7 @@ withDBPool tr isReadOnly dbLocation longestRollback action = do pool <- liftIO . newPool $ defaultPoolConfig connectDb (\Database{close} -> close) - 600 -- // TODO: Review concurrency requirements + 600 -- TODO: Review concurrency requirements maxConnections return DBPool @@ -207,13 +205,16 @@ withDBPool tr isReadOnly dbLocation longestRollback action = do WriteOnly -> fail "Impossible: tried to acquire a WriteOnly database?" _ -> withRes pool dbAction - -- // TODO: Acutally do something with defer indexes! And possibly actually provide a preferred connection? withDatabaseExclusiveWriter :: Pool (Database IO) -> DeferIndexesInstallation -> (Database IO -> IO a) -> IO a - withDatabaseExclusiveWriter pool _deferIndexes = withResource pool + withDatabaseExclusiveWriter pool deferIndexes exclusiveWriterAction = do + bracket mkConnection PG.close $ \conn -> installIndexes tr conn deferIndexes + withResource pool exclusiveWriterAction + -- ^ TODO: Review if any of the PRAGMAs in the SQLite `withLongLivedConnection` + -- need equivalents here connectDb = mkConnection <&> \conn -> mkDatabase trConn longestRollback (\dbAction -> dbAction conn) @@ -249,7 +250,7 @@ copyDatabase -> FilePath -> Set Pattern -> IO () -copyDatabase = undefined -- // TODO: Implement copyDatabase +copyDatabase = undefined -- TODO: Implement copyDatabase -- -- IO @@ -299,6 +300,9 @@ mkDatabase tr longestRollback bracketConnection = Database execute_ conn (markInputsQry pattern)) refs -- ^ TODO: Try to convert this to an `executeMany` call + -- Since `markInputsQry` creates a few different queries + -- we may have to group the queries into those with equivalent + -- forms , markInputs = \(fromIntegral . unSlotNo -> slotNo) refs -> ReaderT $ \conn -> handle (throwIO . DatabaseException "markInputs") $ do @@ -306,6 +310,9 @@ mkDatabase tr longestRollback bracketConnection = Database execute conn (markInputsQry pattern) $ Only (slotNo :: Int64)) refs -- ^ TODO: Try to convert this to an `executeMany` call + -- Since `markInputsQry` creates a few different queries + -- we may have to group the queries into those with equivalent + -- forms , pruneInputs = ReaderT $ \conn -> @@ -464,12 +471,11 @@ mkDatabase tr longestRollback bracketConnection = Database [] -> return Nothing res -> throwIO $ ExpectedSingletonResult (show selectMaxCheckpointQry) (length res) - -- ^ TODO: In SQLite, the pattern matches check for null values. I've changed the query, and - -- I think it should work without checking for null values, but let's check this. , optimize = return () -- ^ TODO: Review if optimize needs to happen with Postgres. - -- ^ Also determine if this can be hidden within the `Database` implementation. + -- Also determine if this can be hidden within the `Database` implementation. + -- Perhaps this could be done with an async task that runs at regular intervals? , runTransaction = \r -> bracketConnection $ \conn -> withTransaction conn (runReaderT r conn) @@ -649,13 +655,13 @@ getBinaryDataQry = \WHERE binary_data_hash = ? \ \LIMIT 1" --- // TODO: Investigate if the 'ORDER BY' clause is necessary in PostgreSQL -- NOTE: This removes all binary_data that aren't associted with any -- known input. The 'ORDER BY' at the end may seem pointless but is -- actually CRUCIAL for the query performance as it forces SQLite to use -- the availables indexes of both tables on 'data_hash' and -- 'binary_data_hash'. Without that, this query may take 1h+ on a large -- database (e.g. mainnet matching '*'). +-- TODO: Investigate if the 'ORDER BY' clause is necessary in PostgreSQL pruneBinaryDataQry :: Query pruneBinaryDataQry = " DELETE FROM binary_data \ @@ -765,7 +771,6 @@ withTemporaryIndex tr conn name table column action = do -- | Check whether an index exists in the database. Handy to customize the behavior (e.g. logging) -- depending on whether or not indexes are already there since 'CREATE INDEX IF NOT EXISTS' will not -- tell whether or not it has indeed created something. --- // TODO: I don't think this works indexDoesExist :: Connection -> Text -> IO Bool indexDoesExist conn indexName = do query conn qry (Only indexName) <&> \case @@ -809,8 +814,7 @@ databaseVersion conn = do countStatement = "SELECT id, version FROM migrations ORDER BY id DESC LIMIT 1;" --- // TODO: Should there be a command line argument to determine whether or not to run migrations? --- How will running migrations affect a DB that supports multiple Kupo instances? +-- TODO: How will running migrations affect a DB that supports multiple Kupo instances? runMigrations :: Tracer IO TraceDatabase -> Connection -> IO () runMigrations tr conn = do currentVersion <- databaseVersion conn diff --git a/src/Kupo/App/Database/SQLite.hs b/src/Kupo/App/Database/SQLite.hs index 215a1ae..3bfeb68 100644 --- a/src/Kupo/App/Database/SQLite.hs +++ b/src/Kupo/App/Database/SQLite.hs @@ -9,8 +9,7 @@ {-# LANGUAGE TemplateHaskell #-} module Kupo.App.Database.SQLite - ( -- // TODO: Fix documentation headers - -- ** Queries + ( -- ** Queries -- *** Inputs deleteInputsQry , markInputsQry @@ -1133,8 +1132,6 @@ mkBlobLiteral bytes = "x'" <> encodeBase16 bytes <> "'" withTestDatabase :: String -> (Configuration.DatabaseLocation -> IO a) -> IO a withTestDatabase _dbName action = do action $ Configuration.InMemory Nothing --- ^// TODO: Create the proper connection string - -- -- Indexes From 55b9f4fd3df79e3f22e4cbaa6317a85be0bed878 Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 18 Jun 2024 17:48:22 -0700 Subject: [PATCH 7/9] Fix syntax in index creation statement --- src/Kupo/App/Database/Postgres.hs | 33 +++++++++++-------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index d72689d..3c2aaa1 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -725,32 +725,23 @@ installIndexes tr conn = \case dropIndexIfExists (contramap DatabaseConnection tr) conn "inputsBySpentAt" False dropIndexIfExists (contramap DatabaseConnection tr) conn "policiesByPolicyId" False InstallIndexesIfNotExist -> do - installIndex tr conn - "inputsByAddress" - "inputs(address)" -- TODO: Find a substitute for SQLite's nocase clause. - installIndex tr conn - "inputsByDatumHash" - "inputs(datum_hash)" - installIndex tr conn - "inputsByPaymentCredential" - "inputs(payment_credential)" -- TODO: Find a substitute for SQLite's nocase clause. - installIndex tr conn - "inputsByCreatedAt" - "inputs(created_at)" - installIndex tr conn - "inputsBySpentAt" - "inputs(spent_at)" - installIndex tr conn - "policiesByPolicyId" - "policies(policy_id)" + installIndex tr conn "inputsByAddress" "inputs" "address" + -- ^ TODO: Find a substitute for SQLite's nocase clause. + installIndex tr conn "inputsByDatumHash" "inputs" "datum_hash" + installIndex tr conn "inputsByPaymentCredential" "inputs" "payment_credential" + -- ^ TODO: Find a substitute for SQLite's nocase clause. + installIndex tr conn "inputsByCreatedAt" "inputs" "created_at" + installIndex tr conn "inputsBySpentAt" "inputs" "spent_at" + installIndex tr conn "policiesByPolicyId" "policies" "policy_id" -- Create the given index with some extra logging around it. -installIndex :: Tracer IO TraceDatabase -> Connection -> Text -> Text -> IO () -installIndex tr conn name definition = do +installIndex :: Tracer IO TraceDatabase -> Connection -> Text -> Text -> Text -> IO () +installIndex tr conn name table column = do indexDoesExist conn name >>= \case False -> do traceWith tr (DatabaseCreateIndex name) - void $ execute conn "CREATE INDEX IF NOT EXISTS ? ON ?" (Identifier name, Identifier definition) + void $ execute conn "CREATE INDEX IF NOT EXISTS ? ON ? ( ? )" + (Identifier name, Identifier table, Identifier column) True -> traceWith tr (DatabaseIndexAlreadyExists name) From 8f88d81b8048aabaddb83562e43765698109a87e Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 18 Jun 2024 17:48:57 -0700 Subject: [PATCH 8/9] Rebuild `kupo.cabal` with latest changes --- kupo.cabal | 1 - 1 file changed, 1 deletion(-) diff --git a/kupo.cabal b/kupo.cabal index 219f99d..6382efa 100644 --- a/kupo.cabal +++ b/kupo.cabal @@ -303,7 +303,6 @@ library , ouroboros-network-api , ouroboros-network-framework , ouroboros-network-protocols - , postgresql-simple , prometheus , relude , resource-pool From c36019256d5f860fd0a56249b0e2fe2521b9591e Mon Sep 17 00:00:00 2001 From: Dominic Mayhew Date: Tue, 18 Jun 2024 18:17:36 -0700 Subject: [PATCH 9/9] Convert addresses to lowercase to replicate the `NO CASE` collation in SQLite --- src/Kupo/App/Database/Postgres.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Kupo/App/Database/Postgres.hs b/src/Kupo/App/Database/Postgres.hs index 3c2aaa1..2c5d71c 100644 --- a/src/Kupo/App/Database/Postgres.hs +++ b/src/Kupo/App/Database/Postgres.hs @@ -267,7 +267,7 @@ mkDatabase tr longestRollback bracketConnection = Database mapM_ (\DB.Input{..} -> do insertRow @"inputs" conn 7 ( Binary extendedOutputReference - , address + , T.toLower address , Binary value , Binary <$> datumInfo , Binary <$> refScriptHash