diff --git a/CREDITS.md b/CREDITS.md index d5dc7f83..782f40a8 100644 --- a/CREDITS.md +++ b/CREDITS.md @@ -8,8 +8,6 @@ the [Rust ecosystem](https://lib.rs), such as: * [Tracing](https://github.com/tokio-rs/tracing) for the diagnostic system and structured logging. * [Fluent templates](https://github.com/XAMPPRocky/fluent-templates) that incorporate [Fluent](https://projectfluent.org/) for project internationalization. -* [SeaORM](https://www.sea-ql.org/SeaORM/) which employs [SQLx](https://docs.rs/sqlx/latest/sqlx/) - for database access and modeling. * Among others, which you can review in the PageTop [`Cargo.toml`](https://github.com/manuelcillero/pagetop/blob/main/Cargo.toml) file. @@ -32,13 +30,6 @@ PageTop integrates code from various renowned crates to enhance functionality: Default implementations and also removes the need to explicitly list `smart_default` in the `Cargo.toml` files. -* **Database Operations**: PageTop employs [SQLx](https://github.com/launchbadge/sqlx) and - [SeaQuery](https://github.com/SeaQL/sea-query), complemented by a custom version of - [SeaORM Migration](https://github.com/SeaQL/sea-orm/tree/master/sea-orm-migration) (version - [0.12.8](https://github.com/SeaQL/sea-orm/tree/0.12.8/sea-orm-migration/src)). This modification - ensures migration processes are confined to specific packages, enhancing modularity and - maintainability. - # 馃棜 FIGfonts diff --git a/Cargo.toml b/Cargo.toml index b9e9fd25..87592ca9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,15 +28,7 @@ members = ["helpers/*"] [lib] name = "pagetop" -[features] -default = [] -database = ["futures", "sea-orm", "sea-schema"] -mysql = ["database", "sea-orm/sqlx-mysql"] -postgres = ["database", "sea-orm/sqlx-postgres"] -sqlite = ["database", "sea-orm/sqlx-sqlite"] - [dependencies] -async-trait = "0.1.81" chrono = "0.4.38" concat-string = "1.0.1" figlet-rs = "0.1.5" @@ -45,9 +37,7 @@ nom = "7.1.3" paste = "1.0.15" substring = "1.4.5" term_size = "0.3.2" -time = "0.3.36" toml = "0.8.16" -url = "2.5.2" tracing = "0.1.40" tracing-appender = "0.2.3" @@ -68,23 +58,5 @@ pagetop-macros = { version = "0.0", path = "helpers/pagetop-macros" } serde = { version = "1.0", features = ["derive"] } -[dependencies.futures] -version = "0.3.30" -optional = true - -[dependencies.sea-orm] -version = "0.12.15" -features = ["debug-print", "macros", "runtime-async-std-native-tls"] -default-features = false -optional = true - -[dependencies.sea-schema] -version = "0.14.2" -optional = true - [build-dependencies] pagetop-build = { version = "0.0", path = "helpers/pagetop-build" } - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] diff --git a/STARTER.bin.Cargo.toml b/STARTER.bin.Cargo.toml deleted file mode 100644 index 4ce1d03c..00000000 --- a/STARTER.bin.Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "app" -version = "0.1.0" -edition = "2021" - -# Ver m谩s claves y sus definiciones en -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -# Si requiere acceso a base de datos (mysql, postgres y/o sqlite): -pagetop = { version = "0.0", features = ["mysql"], default-features = false } -# pagetop = "0.0" (en otro caso) - -# Opcional. Para usar archivos y recursos binarios contenidos en el ejecutable: -static-files = "0.2.3" -# Opcional. Para serializar estructuras de datos: -serde = { version = "1.0", features = ["derive"] } - -[build-dependencies] -# Opcional. Para incluir archivos y recursos binarios en el ejecutable: -pagetop-build = "0.0" diff --git a/STARTER.lib.Cargo.toml b/STARTER.lib.Cargo.toml deleted file mode 100644 index 26c4debe..00000000 --- a/STARTER.lib.Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "module" -version = "0.1.0" -edition = "2021" - -# Ver m谩s claves y sus definiciones en -# https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -# Si requiere acceso a base de datos: -pagetop = { version = "0.0", features = ["database"], default-features = false } -# pagetop = "0.0" (en otro caso) - -# Opcional. Para usar archivos y recursos binarios contenidos en la librer铆a: -static-files = "0.2.3" -# Opcional. Para serializar estructuras de datos: -serde = { version = "1.0", features = ["derive"] } - -[build-dependencies] -# Opcional. Para incluir archivos y recursos binarios en la propia librer铆a: -pagetop-build = "0.0" diff --git a/docs/predefined-settings.toml b/docs/predefined-settings.toml index 6ffa19cd..08571a92 100644 --- a/docs/predefined-settings.toml +++ b/docs/predefined-settings.toml @@ -10,22 +10,6 @@ direction = "ltr" # Startup banner: "Off", "Slant", "Small", "Speed", or "Starwars". startup_banner = "Slant" -[database] -# Connect to a database (optional). -# Database type (mysql, postgres, or sqlite). -db_type = "" -# Database name (for mysql/postgres) or reference (for sqlite). -db_name = "" -# User and password (for mysql/postgres). -db_user = "" -db_pass = "" -# Database server (for mysql/postgres). -db_host = "localhost" -# Port, usually 3306 (for mysql) or 5432 (for postgres). -db_port = 0 -# Maximum number of enabled connections. -max_pool_size = 5 - [dev] # Static files required by the app are integrated by default into the executable # binary. However, during development, it can be useful to serve these files diff --git a/src/app.rs b/src/app.rs index 43857ce1..06e5df1d 100644 --- a/src/app.rs +++ b/src/app.rs @@ -8,9 +8,6 @@ use crate::response::page::{ErrorPage, ResultPage}; use crate::service::HttpRequest; use crate::{config, locale, service, trace}; -#[cfg(feature = "database")] -use crate::db; - use actix_session::config::{BrowserSession, PersistentSession, SessionLifecycle}; use actix_session::storage::CookieSessionStore; use actix_session::SessionMiddleware; @@ -50,20 +47,12 @@ impl Application { // Validates the default language identifier. LazyLock::force(&locale::LANGID_DEFAULT); - #[cfg(feature = "database")] - // Connects to the database. - LazyLock::force(&db::DBCONN); - // Registers the application's packages. package::all::register_packages(root_package); // Registers package actions. package::all::register_actions(); - #[cfg(feature = "database")] - // Runs pending database migrations. - package::all::run_migrations(); - // Initializes the packages. package::all::init_packages(); diff --git a/src/config.rs b/src/config.rs index 2faecb79..88442aba 100644 --- a/src/config.rs +++ b/src/config.rs @@ -198,11 +198,10 @@ macro_rules! default_settings { } #[derive(Debug, Deserialize)] -/// Configuration settings for the [`[app]`](App), [`[database]`](Database), [`[dev]`](Dev), -/// [`[log]`](Log), and [`[server]`](Server) sections (see [`SETTINGS`]). +/// Configuration settings for the [`[app]`](App), [`[dev]`](Dev), [`[log]`](Log), and +/// [`[server]`](Server) sections (see [`SETTINGS`]). pub struct Settings { pub app: App, - pub database: Database, pub dev: Dev, pub log: Log, pub server: Server, @@ -236,34 +235,6 @@ pub struct App { pub run_mode: String, } -#[derive(Debug, Deserialize)] -/// Section `[database]` of the configuration settings. -/// -/// See [`Settings`]. -pub struct Database { - /// Tipo de base de datos: *"mysql"*, *"postgres"* 贸 *"sqlite"*. - /// Por defecto: *""*. - pub db_type: String, - /// Nombre (para mysql/postgres) o referencia (para sqlite) de la base de datos. - /// Por defecto: *""*. - pub db_name: String, - /// Usuario de conexi贸n a la base de datos (para mysql/postgres). - /// Por defecto: *""*. - pub db_user: String, - /// Contrase帽a para la conexi贸n a la base de datos (para mysql/postgres). - /// Por defecto: *""*. - pub db_pass: String, - /// Servidor de conexi贸n a la base de datos (para mysql/postgres). - /// Por defecto: *"localhost"*. - pub db_host: String, - /// Puerto de conexi贸n a la base de datos, normalmente 3306 (para mysql) 贸 5432 (para postgres). - /// Por defecto: *0*. - pub db_port: u16, - /// N煤mero m谩ximo de conexiones habilitadas. - /// Por defecto: *5*. - pub max_pool_size: u32, -} - #[derive(Debug, Deserialize)] /// Section `[dev]` of the configuration settings. /// @@ -327,15 +298,6 @@ default_settings!( "app.direction" => "ltr", "app.startup_banner" => "Slant", - // [database] - "database.db_type" => "", - "database.db_name" => "", - "database.db_user" => "", - "database.db_pass" => "", - "database.db_host" => "localhost", - "database.db_port" => 0, - "database.max_pool_size" => 5, - // [dev] "dev.pagetop_project_dir" => "", diff --git a/src/core/package/all.rs b/src/core/package/all.rs index 191eb8da..50c650b1 100644 --- a/src/core/package/all.rs +++ b/src/core/package/all.rs @@ -3,9 +3,6 @@ use crate::core::package::PackageRef; use crate::core::theme::all::THEMES; use crate::{config, service, service_for_static_files, static_files, trace}; -#[cfg(feature = "database")] -use crate::db::*; - use std::sync::{LazyLock, RwLock}; static_files!(base); @@ -130,45 +127,6 @@ pub fn init_packages() { } } -// RUN MIGRATIONS ********************************************************************************** - -#[cfg(feature = "database")] -pub fn run_migrations() { - if let Some(dbconn) = &*DBCONN { - if let Err(e) = run_now({ - struct Migrator; - impl MigratorTrait for Migrator { - fn migrations() -> Vec { - let mut migrations = vec![]; - for m in ENABLED_PACKAGES.read().unwrap().iter() { - migrations.append(&mut m.migrations()); - } - migrations - } - } - Migrator::up(SchemaManagerConnection::Connection(dbconn), None) - }) { - trace::error!("Database upgrade failed ({})", e); - }; - - if let Err(e) = run_now({ - struct Migrator; - impl MigratorTrait for Migrator { - fn migrations() -> Vec { - let mut migrations = vec![]; - for m in DROPPED_PACKAGES.read().unwrap().iter() { - migrations.append(&mut m.migrations()); - } - migrations - } - } - Migrator::down(SchemaManagerConnection::Connection(dbconn), None) - }) { - trace::error!("Database downgrade failed ({})", e); - }; - } -} - // CONFIGURE SERVICES ****************************************************************************** pub fn configure_services(scfg: &mut service::web::ServiceConfig) { diff --git a/src/core/package/definition.rs b/src/core/package/definition.rs index 7ec0258a..3506a929 100644 --- a/src/core/package/definition.rs +++ b/src/core/package/definition.rs @@ -4,9 +4,6 @@ use crate::core::AnyBase; use crate::locale::L10n; use crate::{actions, service}; -#[cfg(feature = "database")] -use crate::{db::MigrationItem, migrations}; - pub type PackageRef = &'static dyn PackageTrait; /// Los paquetes deben implementar este *trait*. @@ -35,11 +32,6 @@ pub trait PackageTrait: AnyBase + Send + Sync { actions![] } - #[cfg(feature = "database")] - fn migrations(&self) -> Vec { - migrations![] - } - fn init(&self) {} #[allow(unused_variables)] diff --git a/src/db.rs b/src/db.rs deleted file mode 100644 index faf18926..00000000 --- a/src/db.rs +++ /dev/null @@ -1,174 +0,0 @@ -//! Database access. - -use crate::util::TypeInfo; -use crate::{config, trace}; - -pub use url::Url as DbUri; - -pub use sea_orm::error::{DbErr, RuntimeErr}; -pub use sea_orm::{DatabaseConnection as DbConn, ExecResult, QueryResult}; - -use sea_orm::{ConnectOptions, ConnectionTrait, Database, DatabaseBackend, Statement}; - -use std::sync::LazyLock; - -pub(crate) use futures::executor::block_on as run_now; - -const DBCONN_NOT_INITIALIZED: &str = "Database connection not initialized"; - -pub(crate) static DBCONN: LazyLock> = LazyLock::new(|| { - if !config::SETTINGS.database.db_name.trim().is_empty() { - trace::info!( - "Connecting to database \"{}\" using a pool of {} connections", - &config::SETTINGS.database.db_name, - &config::SETTINGS.database.max_pool_size - ); - - let db_uri = match config::SETTINGS.database.db_type.as_str() { - "mysql" | "postgres" => { - let mut tmp_uri = DbUri::parse( - format!( - "{}://{}/{}", - &config::SETTINGS.database.db_type, - &config::SETTINGS.database.db_host, - &config::SETTINGS.database.db_name - ) - .as_str(), - ) - .unwrap(); - tmp_uri - .set_username(config::SETTINGS.database.db_user.as_str()) - .unwrap(); - // https://github.com/launchbadge/sqlx/issues/1624 - tmp_uri - .set_password(Some(config::SETTINGS.database.db_pass.as_str())) - .unwrap(); - if config::SETTINGS.database.db_port != 0 { - tmp_uri - .set_port(Some(config::SETTINGS.database.db_port)) - .unwrap(); - } - tmp_uri - } - "sqlite" => DbUri::parse( - format!( - "{}://{}", - &config::SETTINGS.database.db_type, - &config::SETTINGS.database.db_name - ) - .as_str(), - ) - .unwrap(), - _ => { - trace::error!( - "Unrecognized database type \"{}\"", - &config::SETTINGS.database.db_type - ); - DbUri::parse("").unwrap() - } - }; - - Some( - run_now(Database::connect::({ - let mut db_opt = ConnectOptions::new(db_uri.to_string()); - db_opt.max_connections(config::SETTINGS.database.max_pool_size); - db_opt - })) - .unwrap_or_else(|_| panic!("Failed to connect to database")), - ) - } else { - None - } -}); - -pub async fn query(stmt: &mut Q) -> Result, DbErr> { - match &*DBCONN { - Some(dbconn) => { - let dbbackend = dbconn.get_database_backend(); - dbconn - .query_all(Statement::from_string( - dbbackend, - match dbbackend { - DatabaseBackend::MySql => stmt.to_string(MysqlQueryBuilder), - DatabaseBackend::Postgres => stmt.to_string(PostgresQueryBuilder), - DatabaseBackend::Sqlite => stmt.to_string(SqliteQueryBuilder), - }, - )) - .await - } - None => Err(DbErr::Conn(RuntimeErr::Internal( - DBCONN_NOT_INITIALIZED.to_owned(), - ))), - } -} - -pub async fn exec(stmt: &mut Q) -> Result, DbErr> { - match &*DBCONN { - Some(dbconn) => { - let dbbackend = dbconn.get_database_backend(); - dbconn - .query_one(Statement::from_string( - dbbackend, - match dbbackend { - DatabaseBackend::MySql => stmt.to_string(MysqlQueryBuilder), - DatabaseBackend::Postgres => stmt.to_string(PostgresQueryBuilder), - DatabaseBackend::Sqlite => stmt.to_string(SqliteQueryBuilder), - }, - )) - .await - } - None => Err(DbErr::Conn(RuntimeErr::Internal( - DBCONN_NOT_INITIALIZED.to_owned(), - ))), - } -} - -pub async fn exec_raw(stmt: String) -> Result { - match &*DBCONN { - Some(dbconn) => { - let dbbackend = dbconn.get_database_backend(); - dbconn - .execute(Statement::from_string(dbbackend, stmt)) - .await - } - None => Err(DbErr::Conn(RuntimeErr::Internal( - DBCONN_NOT_INITIALIZED.to_owned(), - ))), - } -} - -// El siguiente m贸dulo migration es una versi贸n simplificada del m贸dulo sea_orm_migration (v0.11.3) -// https://github.com/SeaQL/sea-orm/tree/0.11.3/sea-orm-migration para evitar los errores generados -// por el paradigma modular de PageTop. Se integran los siguientes archivos del original: -// -// lib.rs => db/migration.rs . . . . . . . . . .(descartando algunos m贸dulos y exportaciones) -// connection.rs => db/migration/connection.rs . . . . . . . . . . . . . . . . . . (completo) -// manager.rs => db/migration/manager.rs . . . . . . . . . . . . . . . . . . . . . (completo) -// migrator.rs => db/migration/migrator.rs . . . . . .(suprimiendo la gesti贸n de los errores) -// prelude.rs => db/migration/prelude.rs . . . . . . . . . . . . . . . . . . . (evitando cli) -// seaql_migrations.rs => db/migration/seaql_migrations.rs . . . . . . . . . . . . (completo) -// -mod migration; -pub use migration::prelude::*; - -impl MigrationName for M { - fn name(&self) -> &str { - TypeInfo::NameTo(-2).of::() - } -} - -pub type MigrationItem = Box; - -#[macro_export] -macro_rules! migrations { - () => { - Vec::::new() - }; - ( $($migration_module:ident),+ $(,)? ) => {{ - let mut m = Vec::::new(); - $( - m.push(Box::new(migration::$migration_module::Migration)); - )* - m - }}; -} diff --git a/src/db/migration.rs b/src/db/migration.rs deleted file mode 100644 index 7670de69..00000000 --- a/src/db/migration.rs +++ /dev/null @@ -1,32 +0,0 @@ -//pub mod cli; -pub mod connection; -pub mod manager; -pub mod migrator; -pub mod prelude; -pub mod seaql_migrations; -//pub mod util; - -pub use connection::*; -pub use manager::*; -//pub use migrator::*; - -//pub use async_trait; -//pub use sea_orm; -//pub use sea_orm::sea_query; -use sea_orm::DbErr; - -pub trait MigrationName { - fn name(&self) -> &str; -} - -/// The migration definition -#[async_trait::async_trait] -pub trait MigrationTrait: MigrationName + Send + Sync { - /// Define actions to perform when applying the migration - async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr>; - - /// Define actions to perform when rolling back the migration - async fn down(&self, _manager: &SchemaManager) -> Result<(), DbErr> { - Err(DbErr::Migration("We Don't Do That Here".to_owned())) - } -} diff --git a/src/db/migration/connection.rs b/src/db/migration/connection.rs deleted file mode 100644 index 116185e4..00000000 --- a/src/db/migration/connection.rs +++ /dev/null @@ -1,148 +0,0 @@ -use futures::Future; -use sea_orm::{ - AccessMode, ConnectionTrait, DatabaseConnection, DatabaseTransaction, DbBackend, DbErr, - ExecResult, IsolationLevel, QueryResult, Statement, TransactionError, TransactionTrait, -}; -use std::pin::Pin; - -pub enum SchemaManagerConnection<'c> { - Connection(&'c DatabaseConnection), - Transaction(&'c DatabaseTransaction), -} - -#[async_trait::async_trait] -impl<'c> ConnectionTrait for SchemaManagerConnection<'c> { - fn get_database_backend(&self) -> DbBackend { - match self { - SchemaManagerConnection::Connection(conn) => conn.get_database_backend(), - SchemaManagerConnection::Transaction(trans) => trans.get_database_backend(), - } - } - - async fn execute(&self, stmt: Statement) -> Result { - match self { - SchemaManagerConnection::Connection(conn) => conn.execute(stmt).await, - SchemaManagerConnection::Transaction(trans) => trans.execute(stmt).await, - } - } - - async fn execute_unprepared(&self, sql: &str) -> Result { - match self { - SchemaManagerConnection::Connection(conn) => conn.execute_unprepared(sql).await, - SchemaManagerConnection::Transaction(trans) => trans.execute_unprepared(sql).await, - } - } - - async fn query_one(&self, stmt: Statement) -> Result, DbErr> { - match self { - SchemaManagerConnection::Connection(conn) => conn.query_one(stmt).await, - SchemaManagerConnection::Transaction(trans) => trans.query_one(stmt).await, - } - } - - async fn query_all(&self, stmt: Statement) -> Result, DbErr> { - match self { - SchemaManagerConnection::Connection(conn) => conn.query_all(stmt).await, - SchemaManagerConnection::Transaction(trans) => trans.query_all(stmt).await, - } - } - - fn is_mock_connection(&self) -> bool { - match self { - SchemaManagerConnection::Connection(conn) => conn.is_mock_connection(), - SchemaManagerConnection::Transaction(trans) => trans.is_mock_connection(), - } - } -} - -#[async_trait::async_trait] -impl<'c> TransactionTrait for SchemaManagerConnection<'c> { - async fn begin(&self) -> Result { - match self { - SchemaManagerConnection::Connection(conn) => conn.begin().await, - SchemaManagerConnection::Transaction(trans) => trans.begin().await, - } - } - - async fn begin_with_config( - &self, - isolation_level: Option, - access_mode: Option, - ) -> Result { - match self { - SchemaManagerConnection::Connection(conn) => { - conn.begin_with_config(isolation_level, access_mode).await - } - SchemaManagerConnection::Transaction(trans) => { - trans.begin_with_config(isolation_level, access_mode).await - } - } - } - - async fn transaction(&self, callback: F) -> Result> - where - F: for<'a> FnOnce( - &'a DatabaseTransaction, - ) -> Pin> + Send + 'a>> - + Send, - T: Send, - E: std::error::Error + Send, - { - match self { - SchemaManagerConnection::Connection(conn) => conn.transaction(callback).await, - SchemaManagerConnection::Transaction(trans) => trans.transaction(callback).await, - } - } - - async fn transaction_with_config( - &self, - callback: F, - isolation_level: Option, - access_mode: Option, - ) -> Result> - where - F: for<'a> FnOnce( - &'a DatabaseTransaction, - ) -> Pin> + Send + 'a>> - + Send, - T: Send, - E: std::error::Error + Send, - { - match self { - SchemaManagerConnection::Connection(conn) => { - conn.transaction_with_config(callback, isolation_level, access_mode) - .await - } - SchemaManagerConnection::Transaction(trans) => { - trans - .transaction_with_config(callback, isolation_level, access_mode) - .await - } - } - } -} - -pub trait IntoSchemaManagerConnection<'c>: Send -where - Self: 'c, -{ - fn into_schema_manager_connection(self) -> SchemaManagerConnection<'c>; -} - -impl<'c> IntoSchemaManagerConnection<'c> for SchemaManagerConnection<'c> { - fn into_schema_manager_connection(self) -> SchemaManagerConnection<'c> { - self - } -} - -impl<'c> IntoSchemaManagerConnection<'c> for &'c DatabaseConnection { - fn into_schema_manager_connection(self) -> SchemaManagerConnection<'c> { - SchemaManagerConnection::Connection(self) - } -} - -impl<'c> IntoSchemaManagerConnection<'c> for &'c DatabaseTransaction { - fn into_schema_manager_connection(self) -> SchemaManagerConnection<'c> { - SchemaManagerConnection::Transaction(self) - } -} diff --git a/src/db/migration/manager.rs b/src/db/migration/manager.rs deleted file mode 100644 index 994204e1..00000000 --- a/src/db/migration/manager.rs +++ /dev/null @@ -1,160 +0,0 @@ -use super::{IntoSchemaManagerConnection, SchemaManagerConnection}; -use sea_orm::sea_query::{ - extension::postgres::{TypeAlterStatement, TypeCreateStatement, TypeDropStatement}, - ForeignKeyCreateStatement, ForeignKeyDropStatement, IndexCreateStatement, IndexDropStatement, - TableAlterStatement, TableCreateStatement, TableDropStatement, TableRenameStatement, - TableTruncateStatement, -}; -use sea_orm::{ConnectionTrait, DbBackend, DbErr, StatementBuilder}; -use sea_schema::{mysql::MySql, postgres::Postgres, probe::SchemaProbe, sqlite::Sqlite}; - -/// Helper struct for writing migration scripts in migration file -pub struct SchemaManager<'c> { - conn: SchemaManagerConnection<'c>, -} - -impl<'c> SchemaManager<'c> { - pub fn new(conn: T) -> Self - where - T: IntoSchemaManagerConnection<'c>, - { - Self { - conn: conn.into_schema_manager_connection(), - } - } - - pub async fn exec_stmt(&self, stmt: S) -> Result<(), DbErr> - where - S: StatementBuilder, - { - let builder = self.conn.get_database_backend(); - self.conn.execute(builder.build(&stmt)).await.map(|_| ()) - } - - pub fn get_database_backend(&self) -> DbBackend { - self.conn.get_database_backend() - } - - pub fn get_connection(&self) -> &SchemaManagerConnection<'c> { - &self.conn - } -} - -/// Schema Creation -impl<'c> SchemaManager<'c> { - pub async fn create_table(&self, stmt: TableCreateStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn create_index(&self, stmt: IndexCreateStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn create_foreign_key(&self, stmt: ForeignKeyCreateStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn create_type(&self, stmt: TypeCreateStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } -} - -/// Schema Mutation -impl<'c> SchemaManager<'c> { - pub async fn alter_table(&self, stmt: TableAlterStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn drop_table(&self, stmt: TableDropStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn rename_table(&self, stmt: TableRenameStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn truncate_table(&self, stmt: TableTruncateStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn drop_index(&self, stmt: IndexDropStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn drop_foreign_key(&self, stmt: ForeignKeyDropStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn alter_type(&self, stmt: TypeAlterStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } - - pub async fn drop_type(&self, stmt: TypeDropStatement) -> Result<(), DbErr> { - self.exec_stmt(stmt).await - } -} - -/// Schema Inspection -impl<'c> SchemaManager<'c> { - pub async fn has_table(&self, table: T) -> Result - where - T: AsRef, - { - let stmt = match self.conn.get_database_backend() { - DbBackend::MySql => MySql::has_table(table), - DbBackend::Postgres => Postgres::has_table(table), - DbBackend::Sqlite => Sqlite::has_table(table), - }; - - let builder = self.conn.get_database_backend(); - let res = self - .conn - .query_one(builder.build(&stmt)) - .await? - .ok_or_else(|| DbErr::Custom("Failed to check table exists".to_owned()))?; - - res.try_get("", "has_table") - } - - pub async fn has_column(&self, table: T, column: C) -> Result - where - T: AsRef, - C: AsRef, - { - let stmt = match self.conn.get_database_backend() { - DbBackend::MySql => MySql::has_column(table, column), - DbBackend::Postgres => Postgres::has_column(table, column), - DbBackend::Sqlite => Sqlite::has_column(table, column), - }; - - let builder = self.conn.get_database_backend(); - let res = self - .conn - .query_one(builder.build(&stmt)) - .await? - .ok_or_else(|| DbErr::Custom("Failed to check column exists".to_owned()))?; - - res.try_get("", "has_column") - } - - pub async fn has_index(&self, table: T, index: I) -> Result - where - T: AsRef, - I: AsRef, - { - let stmt = match self.conn.get_database_backend() { - DbBackend::MySql => MySql::has_index(table, index), - DbBackend::Postgres => Postgres::has_index(table, index), - DbBackend::Sqlite => Sqlite::has_index(table, index), - }; - - let builder = self.conn.get_database_backend(); - let res = self - .conn - .query_one(builder.build(&stmt)) - .await? - .ok_or_else(|| DbErr::Custom("Failed to check index exists".to_owned()))?; - - res.try_get("", "has_index") - } -} diff --git a/src/db/migration/migrator.rs b/src/db/migration/migrator.rs deleted file mode 100644 index 9479cff4..00000000 --- a/src/db/migration/migrator.rs +++ /dev/null @@ -1,589 +0,0 @@ -use futures::Future; -use std::collections::HashSet; -use std::fmt::Display; -use std::pin::Pin; -use std::time::SystemTime; -use tracing::info; - -use sea_orm::sea_query::{ - self, extension::postgres::Type, Alias, Expr, ForeignKey, IntoIden, JoinType, Order, Query, - SelectStatement, SimpleExpr, Table, -}; -use sea_orm::{ - ActiveModelTrait, ActiveValue, Condition, ConnectionTrait, DbBackend, DbErr, DeriveIden, - DynIden, EntityTrait, FromQueryResult, Iterable, QueryFilter, Schema, Statement, - TransactionTrait, -}; -use sea_schema::{mysql::MySql, postgres::Postgres, probe::SchemaProbe, sqlite::Sqlite}; - -use super::{seaql_migrations, IntoSchemaManagerConnection, MigrationTrait, SchemaManager}; - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// Status of migration -pub enum MigrationStatus { - /// Not yet applied - Pending, - /// Applied - Applied, -} - -impl Display for MigrationStatus { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let status = match self { - MigrationStatus::Pending => "Pending", - MigrationStatus::Applied => "Applied", - }; - write!(f, "{status}") - } -} - -pub struct Migration { - migration: Box, - status: MigrationStatus, -} - -impl Migration { - /// Get migration name from MigrationName trait implementation - pub fn name(&self) -> &str { - self.migration.name() - } - - /// Get migration status - pub fn status(&self) -> MigrationStatus { - self.status - } -} - -/// Performing migrations on a database -#[async_trait::async_trait] -pub trait MigratorTrait: Send { - /// Vector of migrations in time sequence - fn migrations() -> Vec>; - - /// Name of the migration table, it is `seaql_migrations` by default - fn migration_table_name() -> DynIden { - seaql_migrations::Entity.into_iden() - } - - /// Get list of migrations wrapped in `Migration` struct - fn get_migration_files() -> Vec { - Self::migrations() - .into_iter() - .map(|migration| Migration { - migration, - status: MigrationStatus::Pending, - }) - .collect() - } - - /// Get list of applied migrations from database - async fn get_migration_models(db: &C) -> Result, DbErr> - where - C: ConnectionTrait, - { - Self::install(db).await?; - let stmt = Query::select() - .table_name(Self::migration_table_name()) - .columns(seaql_migrations::Column::iter().map(IntoIden::into_iden)) - .order_by(seaql_migrations::Column::Version, Order::Asc) - .to_owned(); - let builder = db.get_database_backend(); - seaql_migrations::Model::find_by_statement(builder.build(&stmt)) - .all(db) - .await - } - - /// Get list of migrations with status - async fn get_migration_with_status(db: &C) -> Result, DbErr> - where - C: ConnectionTrait, - { - Self::install(db).await?; - let mut migration_files = Self::get_migration_files(); - let migration_models = Self::get_migration_models(db).await?; - - let migration_in_db: HashSet = migration_models - .into_iter() - .map(|model| model.version) - .collect(); - let migration_in_fs: HashSet = migration_files - .iter() - .map(|file| file.migration.name().to_string()) - .collect(); - - let pending_migrations = &migration_in_fs - &migration_in_db; - for migration_file in migration_files.iter_mut() { - if !pending_migrations.contains(migration_file.migration.name()) { - migration_file.status = MigrationStatus::Applied; - } - } - /* - let missing_migrations_in_fs = &migration_in_db - &migration_in_fs; - let errors: Vec = missing_migrations_in_fs - .iter() - .map(|missing_migration| { - format!("Migration file of version '{missing_migration}' is missing, this migration has been applied but its file is missing") - }).collect(); - - if !errors.is_empty() { - Err(DbErr::Custom(errors.join("\n"))) - } else { */ - Ok(migration_files) - /* } */ - } - - /// Get list of pending migrations - async fn get_pending_migrations(db: &C) -> Result, DbErr> - where - C: ConnectionTrait, - { - Self::install(db).await?; - Ok(Self::get_migration_with_status(db) - .await? - .into_iter() - .filter(|file| file.status == MigrationStatus::Pending) - .collect()) - } - - /// Get list of applied migrations - async fn get_applied_migrations(db: &C) -> Result, DbErr> - where - C: ConnectionTrait, - { - Self::install(db).await?; - Ok(Self::get_migration_with_status(db) - .await? - .into_iter() - .filter(|file| file.status == MigrationStatus::Applied) - .collect()) - } - - /// Create migration table `seaql_migrations` in the database - async fn install(db: &C) -> Result<(), DbErr> - where - C: ConnectionTrait, - { - let builder = db.get_database_backend(); - let schema = Schema::new(builder); - let mut stmt = schema - .create_table_from_entity(seaql_migrations::Entity) - .table_name(Self::migration_table_name()); - stmt.if_not_exists(); - db.execute(builder.build(&stmt)).await.map(|_| ()) - } - - /// Check the status of all migrations - async fn status(db: &C) -> Result<(), DbErr> - where - C: ConnectionTrait, - { - Self::install(db).await?; - - info!("Checking migration status"); - - for Migration { migration, status } in Self::get_migration_with_status(db).await? { - info!("Migration '{}'... {}", migration.name(), status); - } - - Ok(()) - } - - /// Drop all tables from the database, then reapply all migrations - async fn fresh<'c, C>(db: C) -> Result<(), DbErr> - where - C: IntoSchemaManagerConnection<'c>, - { - exec_with_connection::<'_, _, _>(db, move |manager| { - Box::pin(async move { exec_fresh::(manager).await }) - }) - .await - } - - /// Rollback all applied migrations, then reapply all migrations - async fn refresh<'c, C>(db: C) -> Result<(), DbErr> - where - C: IntoSchemaManagerConnection<'c>, - { - exec_with_connection::<'_, _, _>(db, move |manager| { - Box::pin(async move { - exec_down::(manager, None).await?; - exec_up::(manager, None).await - }) - }) - .await - } - - /// Rollback all applied migrations - async fn reset<'c, C>(db: C) -> Result<(), DbErr> - where - C: IntoSchemaManagerConnection<'c>, - { - exec_with_connection::<'_, _, _>(db, move |manager| { - Box::pin(async move { exec_down::(manager, None).await }) - }) - .await - } - - /// Apply pending migrations - async fn up<'c, C>(db: C, steps: Option) -> Result<(), DbErr> - where - C: IntoSchemaManagerConnection<'c>, - { - exec_with_connection::<'_, _, _>(db, move |manager| { - Box::pin(async move { exec_up::(manager, steps).await }) - }) - .await - } - - /// Rollback applied migrations - async fn down<'c, C>(db: C, steps: Option) -> Result<(), DbErr> - where - C: IntoSchemaManagerConnection<'c>, - { - exec_with_connection::<'_, _, _>(db, move |manager| { - Box::pin(async move { exec_down::(manager, steps).await }) - }) - .await - } -} - -async fn exec_with_connection<'c, C, F>(db: C, f: F) -> Result<(), DbErr> -where - C: IntoSchemaManagerConnection<'c>, - F: for<'b> Fn( - &'b SchemaManager<'_>, - ) -> Pin> + Send + 'b>>, -{ - let db = db.into_schema_manager_connection(); - - match db.get_database_backend() { - DbBackend::Postgres => { - let transaction = db.begin().await?; - let manager = SchemaManager::new(&transaction); - f(&manager).await?; - transaction.commit().await - } - DbBackend::MySql | DbBackend::Sqlite => { - let manager = SchemaManager::new(db); - f(&manager).await - } - } -} - -async fn exec_fresh(manager: &SchemaManager<'_>) -> Result<(), DbErr> -where - M: MigratorTrait + ?Sized, -{ - let db = manager.get_connection(); - - M::install(db).await?; - let db_backend = db.get_database_backend(); - - // Temporarily disable the foreign key check - if db_backend == DbBackend::Sqlite { - info!("Disabling foreign key check"); - db.execute(Statement::from_string( - db_backend, - "PRAGMA foreign_keys = OFF".to_owned(), - )) - .await?; - info!("Foreign key check disabled"); - } - - // Drop all foreign keys - if db_backend == DbBackend::MySql { - info!("Dropping all foreign keys"); - let stmt = query_mysql_foreign_keys(db); - let rows = db.query_all(db_backend.build(&stmt)).await?; - for row in rows.into_iter() { - let constraint_name: String = row.try_get("", "CONSTRAINT_NAME")?; - let table_name: String = row.try_get("", "TABLE_NAME")?; - info!( - "Dropping foreign key '{}' from table '{}'", - constraint_name, table_name - ); - let mut stmt = ForeignKey::drop(); - stmt.table(Alias::new(table_name.as_str())) - .name(constraint_name.as_str()); - db.execute(db_backend.build(&stmt)).await?; - info!("Foreign key '{}' has been dropped", constraint_name); - } - info!("All foreign keys dropped"); - } - - // Drop all tables - let stmt = query_tables(db); - let rows = db.query_all(db_backend.build(&stmt)).await?; - for row in rows.into_iter() { - let table_name: String = row.try_get("", "table_name")?; - info!("Dropping table '{}'", table_name); - let mut stmt = Table::drop(); - stmt.table(Alias::new(table_name.as_str())) - .if_exists() - .cascade(); - db.execute(db_backend.build(&stmt)).await?; - info!("Table '{}' has been dropped", table_name); - } - - // Drop all types - if db_backend == DbBackend::Postgres { - info!("Dropping all types"); - let stmt = query_pg_types(db); - let rows = db.query_all(db_backend.build(&stmt)).await?; - for row in rows { - let type_name: String = row.try_get("", "typname")?; - info!("Dropping type '{}'", type_name); - let mut stmt = Type::drop(); - stmt.name(Alias::new(&type_name)); - db.execute(db_backend.build(&stmt)).await?; - info!("Type '{}' has been dropped", type_name); - } - } - - // Restore the foreign key check - if db_backend == DbBackend::Sqlite { - info!("Restoring foreign key check"); - db.execute(Statement::from_string( - db_backend, - "PRAGMA foreign_keys = ON".to_owned(), - )) - .await?; - info!("Foreign key check restored"); - } - - // Reapply all migrations - exec_up::(manager, None).await -} - -async fn exec_up(manager: &SchemaManager<'_>, mut steps: Option) -> Result<(), DbErr> -where - M: MigratorTrait + ?Sized, -{ - let db = manager.get_connection(); - - M::install(db).await?; - - if let Some(steps) = steps { - info!("Applying {} pending migrations", steps); - } else { - info!("Applying all pending migrations"); - } - - let migrations = M::get_pending_migrations(db).await?.into_iter(); - if migrations.len() == 0 { - info!("No pending migrations"); - } - for Migration { migration, .. } in migrations { - if let Some(steps) = steps.as_mut() { - if steps == &0 { - break; - } - *steps -= 1; - } - info!("Applying migration '{}'", migration.name()); - migration.up(manager).await?; - info!("Migration '{}' has been applied", migration.name()); - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("SystemTime before UNIX EPOCH!"); - seaql_migrations::Entity::insert(seaql_migrations::ActiveModel { - version: ActiveValue::Set(migration.name().to_owned()), - applied_at: ActiveValue::Set(now.as_secs() as i64), - }) - .table_name(M::migration_table_name()) - .exec(db) - .await?; - } - - Ok(()) -} - -async fn exec_down(manager: &SchemaManager<'_>, mut steps: Option) -> Result<(), DbErr> -where - M: MigratorTrait + ?Sized, -{ - let db = manager.get_connection(); - - M::install(db).await?; - - if let Some(steps) = steps { - info!("Rolling back {} applied migrations", steps); - } else { - info!("Rolling back all applied migrations"); - } - - let migrations = M::get_applied_migrations(db).await?.into_iter().rev(); - if migrations.len() == 0 { - info!("No applied migrations"); - } - for Migration { migration, .. } in migrations { - if let Some(steps) = steps.as_mut() { - if steps == &0 { - break; - } - *steps -= 1; - } - info!("Rolling back migration '{}'", migration.name()); - migration.down(manager).await?; - info!("Migration '{}' has been rollbacked", migration.name()); - seaql_migrations::Entity::delete_many() - .filter(Expr::col(seaql_migrations::Column::Version).eq(migration.name())) - .table_name(M::migration_table_name()) - .exec(db) - .await?; - } - - Ok(()) -} - -fn query_tables(db: &C) -> SelectStatement -where - C: ConnectionTrait, -{ - match db.get_database_backend() { - DbBackend::MySql => MySql::query_tables(), - DbBackend::Postgres => Postgres::query_tables(), - DbBackend::Sqlite => Sqlite::query_tables(), - } -} - -fn get_current_schema(db: &C) -> SimpleExpr -where - C: ConnectionTrait, -{ - match db.get_database_backend() { - DbBackend::MySql => MySql::get_current_schema(), - DbBackend::Postgres => Postgres::get_current_schema(), - DbBackend::Sqlite => unimplemented!(), - } -} - -#[derive(DeriveIden)] -enum InformationSchema { - #[sea_orm(iden = "information_schema")] - Schema, - #[sea_orm(iden = "TABLE_NAME")] - TableName, - #[sea_orm(iden = "CONSTRAINT_NAME")] - ConstraintName, - TableConstraints, - TableSchema, - ConstraintType, -} - -fn query_mysql_foreign_keys(db: &C) -> SelectStatement -where - C: ConnectionTrait, -{ - let mut stmt = Query::select(); - stmt.columns([ - InformationSchema::TableName, - InformationSchema::ConstraintName, - ]) - .from(( - InformationSchema::Schema, - InformationSchema::TableConstraints, - )) - .cond_where( - Condition::all() - .add(Expr::expr(get_current_schema(db)).equals(( - InformationSchema::TableConstraints, - InformationSchema::TableSchema, - ))) - .add( - Expr::col(( - InformationSchema::TableConstraints, - InformationSchema::ConstraintType, - )) - .eq("FOREIGN KEY"), - ), - ); - stmt -} - -#[derive(DeriveIden)] -enum PgType { - Table, - Typname, - Typnamespace, - Typelem, -} - -#[derive(DeriveIden)] -enum PgNamespace { - Table, - Oid, - Nspname, -} - -fn query_pg_types(db: &C) -> SelectStatement -where - C: ConnectionTrait, -{ - let mut stmt = Query::select(); - stmt.column(PgType::Typname) - .from(PgType::Table) - .join( - JoinType::LeftJoin, - PgNamespace::Table, - Expr::col((PgNamespace::Table, PgNamespace::Oid)) - .equals((PgType::Table, PgType::Typnamespace)), - ) - .cond_where( - Condition::all() - .add( - Expr::expr(get_current_schema(db)) - .equals((PgNamespace::Table, PgNamespace::Nspname)), - ) - .add(Expr::col((PgType::Table, PgType::Typelem)).eq(0)), - ); - stmt -} - -trait QueryTable { - type Statement; - - fn table_name(self, table_name: DynIden) -> Self::Statement; -} - -impl QueryTable for SelectStatement { - type Statement = SelectStatement; - - fn table_name(mut self, table_name: DynIden) -> SelectStatement { - self.from(table_name); - self - } -} - -impl QueryTable for sea_query::TableCreateStatement { - type Statement = sea_query::TableCreateStatement; - - fn table_name(mut self, table_name: DynIden) -> sea_query::TableCreateStatement { - self.table(table_name); - self - } -} - -impl QueryTable for sea_orm::Insert -where - A: ActiveModelTrait, -{ - type Statement = sea_orm::Insert; - - fn table_name(mut self, table_name: DynIden) -> sea_orm::Insert { - sea_orm::QueryTrait::query(&mut self).into_table(table_name); - self - } -} - -impl QueryTable for sea_orm::DeleteMany -where - E: EntityTrait, -{ - type Statement = sea_orm::DeleteMany; - - fn table_name(mut self, table_name: DynIden) -> sea_orm::DeleteMany { - sea_orm::QueryTrait::query(&mut self).from_table(table_name); - self - } -} diff --git a/src/db/migration/prelude.rs b/src/db/migration/prelude.rs deleted file mode 100644 index 5556a094..00000000 --- a/src/db/migration/prelude.rs +++ /dev/null @@ -1,13 +0,0 @@ -//pub use super::cli; - -pub use super::connection::IntoSchemaManagerConnection; -pub use super::connection::SchemaManagerConnection; -pub use super::manager::SchemaManager; -pub use super::migrator::MigratorTrait; -pub use super::{MigrationName, MigrationTrait}; -pub use async_trait; -pub use sea_orm; -pub use sea_orm::sea_query; -pub use sea_orm::sea_query::*; -pub use sea_orm::DeriveIden; -pub use sea_orm::DeriveMigrationName; diff --git a/src/db/migration/seaql_migrations.rs b/src/db/migration/seaql_migrations.rs deleted file mode 100644 index 9926ea9c..00000000 --- a/src/db/migration/seaql_migrations.rs +++ /dev/null @@ -1,14 +0,0 @@ -use sea_orm::entity::prelude::*; - -#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] -#[sea_orm(table_name = "seaql_migrations")] -pub struct Model { - #[sea_orm(primary_key, auto_increment = false)] - pub version: String, - pub applied_at: i64, -} - -#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] -pub enum Relation {} - -impl ActiveModelBehavior for ActiveModel {} diff --git a/src/lib.rs b/src/lib.rs index 9ab9d72f..4b0d9635 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -115,11 +115,6 @@ pub mod locale; // Date and time handling. pub mod datetime; -// Database access. -#[cfg_attr(docsrs, doc(cfg(feature = "database")))] -#[cfg(feature = "database")] -pub mod db; - // Essential web framework. pub mod service; diff --git a/src/prelude.rs b/src/prelude.rs index 88e91dc0..f98a6675 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -36,9 +36,6 @@ pub use crate::locale::*; pub use crate::datetime::*; -#[cfg(feature = "database")] -pub use crate::{db, db::*, migrations}; - pub use crate::service; pub use crate::service::{HttpMessage, HttpRequest};