I hate everything.

This commit is contained in:
Jordan Petridis 2017-11-20 16:57:27 +02:00
parent e07683c046
commit ce29602431
No known key found for this signature in database
GPG Key ID: CEABAD9F5683B9A6
7 changed files with 61 additions and 74 deletions

View File

@ -7,33 +7,33 @@ use chrono::prelude::*;
/// Random db querries helper functions. /// Random db querries helper functions.
/// Probably needs cleanup. /// Probably needs cleanup.
use POOL; use connection;
pub fn get_sources() -> QueryResult<Vec<Source>> { pub fn get_sources() -> QueryResult<Vec<Source>> {
use schema::source::dsl::*; use schema::source::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
source.load::<Source>(&*con) source.load::<Source>(&*con)
} }
pub fn get_podcasts() -> QueryResult<Vec<Podcast>> { pub fn get_podcasts() -> QueryResult<Vec<Podcast>> {
use schema::podcast::dsl::*; use schema::podcast::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
podcast.load::<Podcast>(&*con) podcast.load::<Podcast>(&*con)
} }
pub fn get_episodes() -> QueryResult<Vec<Episode>> { pub fn get_episodes() -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode.order(epoch.desc()).load::<Episode>(&*con) episode.order(epoch.desc()).load::<Episode>(&*con)
} }
pub fn get_downloaded_episodes() -> QueryResult<Vec<Episode>> { pub fn get_downloaded_episodes() -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode episode
.filter(local_uri.is_not_null()) .filter(local_uri.is_not_null())
.load::<Episode>(&*con) .load::<Episode>(&*con)
@ -42,21 +42,21 @@ pub fn get_downloaded_episodes() -> QueryResult<Vec<Episode>> {
pub fn get_played_episodes() -> QueryResult<Vec<Episode>> { pub fn get_played_episodes() -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode.filter(played.is_not_null()).load::<Episode>(&*con) episode.filter(played.is_not_null()).load::<Episode>(&*con)
} }
pub fn get_episode_from_id(ep_id: i32) -> QueryResult<Episode> { pub fn get_episode_from_id(ep_id: i32) -> QueryResult<Episode> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode.filter(id.eq(ep_id)).get_result::<Episode>(&*con) episode.filter(id.eq(ep_id)).get_result::<Episode>(&*con)
} }
pub fn get_episode_local_uri_from_id(ep_id: i32) -> QueryResult<Option<String>> { pub fn get_episode_local_uri_from_id(ep_id: i32) -> QueryResult<Option<String>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode episode
.filter(id.eq(ep_id)) .filter(id.eq(ep_id))
@ -67,7 +67,7 @@ pub fn get_episode_local_uri_from_id(ep_id: i32) -> QueryResult<Option<String>>
pub fn get_episodes_with_limit(limit: u32) -> QueryResult<Vec<Episode>> { pub fn get_episodes_with_limit(limit: u32) -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode episode
.order(epoch.desc()) .order(epoch.desc())
@ -78,14 +78,14 @@ pub fn get_episodes_with_limit(limit: u32) -> QueryResult<Vec<Episode>> {
pub fn get_podcast_from_id(pid: i32) -> QueryResult<Podcast> { pub fn get_podcast_from_id(pid: i32) -> QueryResult<Podcast> {
use schema::podcast::dsl::*; use schema::podcast::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
podcast.filter(id.eq(pid)).get_result::<Podcast>(&*con) podcast.filter(id.eq(pid)).get_result::<Podcast>(&*con)
} }
pub fn get_pd_episodes(parent: &Podcast) -> QueryResult<Vec<Episode>> { pub fn get_pd_episodes(parent: &Podcast) -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
Episode::belonging_to(parent) Episode::belonging_to(parent)
.order(epoch.desc()) .order(epoch.desc())
@ -95,7 +95,7 @@ pub fn get_pd_episodes(parent: &Podcast) -> QueryResult<Vec<Episode>> {
pub fn get_pd_unplayed_episodes(parent: &Podcast) -> QueryResult<Vec<Episode>> { pub fn get_pd_unplayed_episodes(parent: &Podcast) -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
Episode::belonging_to(parent) Episode::belonging_to(parent)
.filter(played.is_null()) .filter(played.is_null())
@ -106,7 +106,7 @@ pub fn get_pd_unplayed_episodes(parent: &Podcast) -> QueryResult<Vec<Episode>> {
pub fn get_pd_episodes_limit(parent: &Podcast, limit: u32) -> QueryResult<Vec<Episode>> { pub fn get_pd_episodes_limit(parent: &Podcast, limit: u32) -> QueryResult<Vec<Episode>> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
Episode::belonging_to(parent) Episode::belonging_to(parent)
.order(epoch.desc()) .order(epoch.desc())
@ -117,14 +117,14 @@ pub fn get_pd_episodes_limit(parent: &Podcast, limit: u32) -> QueryResult<Vec<Ep
pub fn get_source_from_uri(uri_: &str) -> QueryResult<Source> { pub fn get_source_from_uri(uri_: &str) -> QueryResult<Source> {
use schema::source::dsl::*; use schema::source::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
source.filter(uri.eq(uri_)).get_result::<Source>(&*con) source.filter(uri.eq(uri_)).get_result::<Source>(&*con)
} }
pub fn get_podcast_from_title(title_: &str) -> QueryResult<Podcast> { pub fn get_podcast_from_title(title_: &str) -> QueryResult<Podcast> {
use schema::podcast::dsl::*; use schema::podcast::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
podcast podcast
.filter(title.eq(title_)) .filter(title.eq(title_))
.get_result::<Podcast>(&*con) .get_result::<Podcast>(&*con)
@ -133,12 +133,12 @@ pub fn get_podcast_from_title(title_: &str) -> QueryResult<Podcast> {
pub fn get_episode_from_uri(uri_: &str) -> QueryResult<Episode> { pub fn get_episode_from_uri(uri_: &str) -> QueryResult<Episode> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
episode.filter(uri.eq(uri_)).get_result::<Episode>(&*con) episode.filter(uri.eq(uri_)).get_result::<Episode>(&*con)
} }
pub fn remove_feed(pd: &Podcast) -> QueryResult<usize> { pub fn remove_feed(pd: &Podcast) -> QueryResult<usize> {
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
con.transaction(|| -> QueryResult<usize> { con.transaction(|| -> QueryResult<usize> {
delete_source(pd.source_id())?; delete_source(pd.source_id())?;
@ -150,28 +150,28 @@ pub fn remove_feed(pd: &Podcast) -> QueryResult<usize> {
pub fn delete_source(source_id: i32) -> QueryResult<usize> { pub fn delete_source(source_id: i32) -> QueryResult<usize> {
use schema::source::dsl::*; use schema::source::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
diesel::delete(source.filter(id.eq(source_id))).execute(&*con) diesel::delete(source.filter(id.eq(source_id))).execute(&*con)
} }
pub fn delete_podcast(podcast_id: i32) -> QueryResult<usize> { pub fn delete_podcast(podcast_id: i32) -> QueryResult<usize> {
use schema::podcast::dsl::*; use schema::podcast::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
diesel::delete(podcast.filter(id.eq(podcast_id))).execute(&*con) diesel::delete(podcast.filter(id.eq(podcast_id))).execute(&*con)
} }
pub fn delete_podcast_episodes(parent_id: i32) -> QueryResult<usize> { pub fn delete_podcast_episodes(parent_id: i32) -> QueryResult<usize> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
diesel::delete(episode.filter(podcast_id.eq(parent_id))).execute(&*con) diesel::delete(episode.filter(podcast_id.eq(parent_id))).execute(&*con)
} }
pub fn update_none_to_played_now(parent: &Podcast) -> QueryResult<usize> { pub fn update_none_to_played_now(parent: &Podcast) -> QueryResult<usize> {
use schema::episode::dsl::*; use schema::episode::dsl::*;
let con = POOL.clone().get().unwrap(); let con = connection().get().unwrap();
let epoch_now = Utc::now().timestamp() as i32; let epoch_now = Utc::now().timestamp() as i32;
con.transaction(|| -> QueryResult<usize> { con.transaction(|| -> QueryResult<usize> {

View File

@ -6,7 +6,7 @@ use rss;
use dbqueries; use dbqueries;
use parser; use parser;
use POOL; use connection;
use models::{Podcast, Source}; use models::{Podcast, Source};
use errors::*; use errors::*;
@ -51,7 +51,7 @@ impl Feed {
.map(|item| parser::new_episode(item, *pd.id())) .map(|item| parser::new_episode(item, *pd.id()))
.collect(); .collect();
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
let _ = tempdb.transaction::<(), Error, _>(|| { let _ = tempdb.transaction::<(), Error, _>(|| {
episodes.into_iter().for_each(|x| { episodes.into_iter().for_each(|x| {
let e = x.index(&*tempdb); let e = x.index(&*tempdb);

View File

@ -32,7 +32,13 @@ pub mod errors;
mod parser; mod parser;
mod schema; mod schema;
use r2d2_diesel::ConnectionManager;
use diesel::SqliteConnection;
use std::path::PathBuf; use std::path::PathBuf;
// use std::time::Duration;
type Pool = r2d2::Pool<ConnectionManager<SqliteConnection>>;
lazy_static!{ lazy_static!{
#[allow(dead_code)] #[allow(dead_code)]
@ -56,39 +62,44 @@ lazy_static!{
HAMMOND_XDG.create_data_directory("Downloads").unwrap() HAMMOND_XDG.create_data_directory("Downloads").unwrap()
}; };
pub static ref DB_PATH: PathBuf = HAMMOND_XDG.place_data_file("hammond.db").unwrap(); static ref POOL: Pool = init_pool(DB_PATH.to_str().unwrap());
} }
#[cfg(not(test))] #[cfg(not(test))]
lazy_static! { lazy_static! {
pub static ref POOL: utils::Pool = utils::init_pool(DB_PATH.to_str().unwrap()); static ref DB_PATH: PathBuf = HAMMOND_XDG.place_data_file("hammond.db").unwrap();
} }
#[cfg(test)]
lazy_static! {
static ref TEMPDB: TempDB = get_temp_db();
pub static ref POOL: &'static utils::Pool = &TEMPDB.2;
}
#[cfg(test)]
struct TempDB(tempdir::TempDir, PathBuf, utils::Pool);
#[cfg(test)] #[cfg(test)]
extern crate tempdir; extern crate tempdir;
#[cfg(test)] #[cfg(test)]
/// Create and return a Temporary DB. lazy_static! {
/// Will be destroed once the returned variable(s) is dropped. static ref TEMPDIR: tempdir::TempDir = {
fn get_temp_db() -> TempDB { tempdir::TempDir::new("hammond_unit_test").unwrap()
let tmp_dir = tempdir::TempDir::new("hammond_unit_test").unwrap(); };
let db_path = tmp_dir.path().join("test.db");
static ref DB_PATH: PathBuf = TEMPDIR.path().join("hammond.db");
}
pub fn connection() -> Pool {
POOL.clone()
}
fn init_pool(db_path: &str) -> Pool {
let config = r2d2::Config::builder()
// .pool_size(60)
// .min_idle(Some(60))
// .connection_timeout(Duration::from_secs(60))
.build();
let manager = ConnectionManager::<SqliteConnection>::new(db_path);
let pool = r2d2::Pool::new(config, manager).expect("Failed to create pool.");
info!("Database pool initialized.");
let pool = utils::init_pool(db_path.to_str().unwrap());
{ {
let db = pool.clone().get().unwrap(); let db = pool.clone().get().unwrap();
utils::run_migration_on(&*db).unwrap(); utils::run_migration_on(&*db).unwrap();
} }
TempDB(tmp_dir, db_path, pool) pool
} }

View File

@ -3,7 +3,7 @@ use diesel;
use schema::{episode, podcast, source}; use schema::{episode, podcast, source};
use models::{Podcast, Source}; use models::{Podcast, Source};
use POOL; use connection;
use errors::*; use errors::*;
use dbqueries; use dbqueries;
@ -29,7 +29,7 @@ impl<'a> NewSource<'a> {
fn index(&self) { fn index(&self) {
use schema::source::dsl::*; use schema::source::dsl::*;
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
// Throw away the result like `insert or ignore` // Throw away the result like `insert or ignore`
// Diesel deos not support `insert or ignore` yet. // Diesel deos not support `insert or ignore` yet.
let _ = diesel::insert_into(source).values(self).execute(&*tempdb); let _ = diesel::insert_into(source).values(self).execute(&*tempdb);
@ -112,13 +112,13 @@ impl NewPodcast {
match pd { match pd {
Ok(foo) => if foo.link() != self.link { Ok(foo) => if foo.link() != self.link {
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
diesel::replace_into(podcast) diesel::replace_into(podcast)
.values(self) .values(self)
.execute(&*tempdb)?; .execute(&*tempdb)?;
}, },
Err(_) => { Err(_) => {
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
diesel::insert_into(podcast).values(self).execute(&*tempdb)?; diesel::insert_into(podcast).values(self).execute(&*tempdb)?;
} }
} }

View File

@ -10,7 +10,7 @@ use feed::Feed;
use errors::*; use errors::*;
use models::insertables::NewPodcast; use models::insertables::NewPodcast;
use POOL; use connection;
use std::io::Read; use std::io::Read;
use std::str::FromStr; use std::str::FromStr;
@ -129,7 +129,7 @@ impl Episode {
} }
pub fn save(&self) -> QueryResult<Episode> { pub fn save(&self) -> QueryResult<Episode> {
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
self.save_changes::<Episode>(&*tempdb) self.save_changes::<Episode>(&*tempdb)
} }
} }
@ -226,7 +226,7 @@ impl Podcast {
} }
pub fn save(&self) -> QueryResult<Podcast> { pub fn save(&self) -> QueryResult<Podcast> {
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
self.save_changes::<Podcast>(&*tempdb) self.save_changes::<Podcast>(&*tempdb)
} }
} }
@ -285,7 +285,7 @@ impl<'a> Source {
} }
pub fn save(&self) -> QueryResult<Source> { pub fn save(&self) -> QueryResult<Source> {
let tempdb = POOL.clone().get().unwrap(); let tempdb = connection().get().unwrap();
self.save_changes::<Source>(&*tempdb) self.save_changes::<Source>(&*tempdb)
} }

View File

@ -1,9 +1,7 @@
use rayon::prelude::*; use rayon::prelude::*;
use chrono::prelude::*; use chrono::prelude::*;
use r2d2;
use diesel::sqlite::SqliteConnection; use diesel::sqlite::SqliteConnection;
use r2d2_diesel::ConnectionManager;
use errors::*; use errors::*;
use dbqueries; use dbqueries;
@ -11,30 +9,9 @@ use models::Episode;
use std::path::Path; use std::path::Path;
use std::fs; use std::fs;
use std::sync::Arc;
use std::time::Duration;
use POOL;
embed_migrations!("migrations/"); embed_migrations!("migrations/");
pub type Pool = Arc<r2d2::Pool<ConnectionManager<SqliteConnection>>>;
pub fn init() -> Result<()> {
let con = POOL.clone().get().unwrap();
run_migration_on(&*con)
}
pub fn init_pool(db_path: &str) -> Pool {
let config = r2d2::Config::builder()
.connection_timeout(Duration::from_secs(60))
.build();
let manager = ConnectionManager::<SqliteConnection>::new(db_path);
let pool = r2d2::Pool::new(config, manager).expect("Failed to create pool.");
info!("Database pool initialized.");
Arc::new(pool)
}
pub fn run_migration_on(connection: &SqliteConnection) -> Result<()> { pub fn run_migration_on(connection: &SqliteConnection) -> Result<()> {
info!("Running DB Migrations..."); info!("Running DB Migrations...");
embedded_migrations::run(connection)?; embedded_migrations::run(connection)?;

View File

@ -105,7 +105,6 @@ fn main() {
// TODO: make the the logger a cli -vv option // TODO: make the the logger a cli -vv option
loggerv::init_with_level(LogLevel::Info).unwrap(); loggerv::init_with_level(LogLevel::Info).unwrap();
static_resource::init().expect("Something went wrong with the resource file initialization."); static_resource::init().expect("Something went wrong with the resource file initialization.");
hammond_data::utils::init().expect("Hammond Initialazation failed.");
let application = gtk::Application::new("org.gnome.Hammond", gio::ApplicationFlags::empty()) let application = gtk::Application::new("org.gnome.Hammond", gio::ApplicationFlags::empty())
.expect("Initialization failed..."); .expect("Initialization failed...");