Thats a hack, a terrible one, but without it the user

would have to manually click refresh if the user that was added
happen to redirect somewhere else.

The problem is in the hammond-data/src/models/source.rs
fn request_constructor. It has to do with futures and lifetimes
and I am not sure how to tackle it yet.
This commit is contained in:
Jordan Petridis 2018-02-03 00:06:02 +02:00
parent f1f248fa40
commit e8ae0b0251
No known key found for this signature in database
GPG Key ID: CEABAD9F5683B9A6
3 changed files with 56 additions and 4 deletions

View File

@ -208,6 +208,17 @@ pub fn get_source_from_uri(uri_: &str) -> Result<Source> {
.map_err(From::from)
}
pub fn get_source_from_id(id_: i32) -> Result<Source> {
use schema::source::dsl::*;
let db = connection();
let con = db.get()?;
source
.filter(id.eq(id_))
.get_result::<Source>(&con)
.map_err(From::from)
}
pub fn get_podcast_from_source_id(sid: i32) -> Result<Podcast> {
use schema::podcast::dsl::*;
let db = connection();

View File

@ -85,6 +85,23 @@ pub fn run(sources: Vec<Source>, ignore_etags: bool) -> Result<()> {
pipeline(sources, ignore_etags, &mut core, &pool, client)
}
/// Docs
pub fn index_single_source(s: Source, ignore_etags: bool) -> Result<()> {
let pool = CpuPool::new_num_cpus();
let mut core = Core::new()?;
let handle = core.handle();
let client = Client::configure()
.connector(HttpsConnector::new(num_cpus::get(), &handle)?)
.build(&handle);
let work = s.into_feed(&client, pool.clone(), ignore_etags)
.and_then(clone!(pool => move |feed| pool.clone().spawn(feed.index())))
.map(|_| ());
core.run(work)
}
fn determine_ep_state(ep: NewEpisodeMinimal, item: &rss::Item) -> Result<IndexState<NewEpisode>> {
// Check if feed exists
let exists = dbqueries::episode_exists(ep.title(), ep.podcast_id())?;

View File

@ -23,11 +23,35 @@ pub fn refresh_feed(source: Option<Vec<Source>>, sender: Sender<Action>) {
sender.send(Action::HeaderBarShowUpdateIndicator).unwrap();
thread::spawn(move || {
let sources = source.unwrap_or_else(|| dbqueries::get_sources().unwrap());
let mut sources = source.unwrap_or_else(|| dbqueries::get_sources().unwrap());
if let Err(err) = pipeline::run(sources, false) {
error!("Error While trying to update the database.");
error!("Error msg: {}", err);
// Work around to improve the feed addition experience.
// Many times links to rss feeds are just redirects(usually to an https version).
// Sadly I haven't figured yet a nice way to follow up links redirects without getting
// to lifetime hell with futures and hyper.
// So the requested refresh is only of 1 feed, and the feed fails to be indexed,
// (as a 301 redict would update the source entry and exit), another refresh is run.
// For more see hammond_data/src/models/source.rs `fn request_constructor`.
// also ping me on irc if or open an issue if you want to tackle it.
if sources.len() == 1 {
let source = sources.remove(0);
let id = source.id();
if let Err(err) = pipeline::index_single_source(source, false) {
error!("Error While trying to update the database.");
error!("Error msg: {}", err);
let source = dbqueries::get_source_from_id(id).unwrap();
if let Err(err) = pipeline::index_single_source(source, false) {
error!("Error While trying to update the database.");
error!("Error msg: {}", err);
}
}
} else {
// This is what would normally run
if let Err(err) = pipeline::run(sources, false) {
error!("Error While trying to update the database.");
error!("Error msg: {}", err);
}
}
sender.send(Action::HeaderBarHideUpdateIndicator).unwrap();