[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish.spec
|
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/.gitignore
^
|
@@ -0,0 +1 @@
+__pycache__
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/LICENSE.ISC
^
|
@@ -1,5 +1,5 @@
gPodder: Media and podcast aggregator
-Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/MANIFEST.in
^
|
@@ -1,4 +1,4 @@
include README LICENSE.* MANIFEST.in ChangeLog makefile setup.py
recursive-include share *
recursive-include po *
-recursive-include tools *
+recursive-include test *
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/README
^
|
@@ -6,7 +6,7 @@
|___/
Media and podcast aggregator
- Copyright 2005-2013 Thomas Perl and the gPodder Team
+ Copyright 2005-2014 Thomas Perl and the gPodder Team
[ LICENSE ]
@@ -48,8 +48,9 @@
[ TESTING ]
- To run tests, use...
- make tests
+ To run automated tests, use...
+
+ make test
Tests in gPodder are written in two different ways:
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/bin/gpo
^
|
@@ -2,7 +2,7 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -30,6 +30,7 @@
subscribe URL [TITLE] Subscribe to a new feed at URL (as TITLE)
rename URL TITLE Rename feed at URL to TITLE
+ rewrite OLDURL NEWURL Change the feed URL of [OLDURL] to [NEWURL]
unsubscribe URL Unsubscribe from feed at URL
import FILENAME|URL Subscribe to all podcasts in an OPML file
@@ -47,6 +48,24 @@
download [URL] Download new episodes (all or only from URL)
pending [URL] List new episodes (all or only from URL)
episodes [URL] List episodes (all or only from URL)
+ query EQL Query episodes based on an EQL expression
+
+ "pending", "episodes" and "query" show an
+ alphanumeric ID of the episode that can be
+ used with the following commands:
+
+ mark old|new EPISODE Mark episode as old or new (new = pending)
+ fetch EPISODE Download a single episode now
+ rm EPISODE Delete episode
+ details EPISODE Display episode details and shownotes
+
+ apply COMMAND Apply COMMAND (mark old|new, fetch, rm,
+ details) to episodes matched in the last
+ query command
+
+ - gpodder.net -
+
+ search QUERY Search for podcasts on gpodder.net
- Configuration -
@@ -56,18 +75,18 @@
versioncheck Check if a new gPodder version is available
license Show the software license information
- rewrite OLDURL NEWURL Change the feed URL of [OLDURL] to [NEWURL]
+ registry Print registered resolvers
+ shortcuts List available URL shortcuts
"""
-
-
import sys
import collections
import os
import re
import inspect
import functools
+import itertools
try:
import readline
except ImportError:
@@ -86,6 +105,12 @@
fcntl = None
struct = None
+try:
+ import mygpoclient.public
+except:
+ mygpoclient = None
+
+
# A poor man's argparse/getopt - but it works for our use case :)
verbose = False
for flag in ('-v', '--verbose'):
@@ -117,25 +142,40 @@
import gpodder
from gpodder import core
-from gpodder import download
from gpodder import opml
from gpodder import util
+from gpodder import query
+from gpodder import registry
from gpodder.config import config_value_to_string
+
def incolor(color_id, s):
if have_ansi and cli._config.ui.cli.colors:
return '\033[9%dm%s\033[0m' % (color_id, s)
return s
# ANSI Colors: red = 1, green = 2, yellow = 3, blue = 4
-inred, ingreen, inyellow, inblue = (functools.partial(incolor, x)
- for x in range(1, 5))
+inred, ingreen, inyellow, inblue = (functools.partial(incolor, x) for x in range(1, 5))
+
def FirstArgumentIsPodcastURL(function):
"""Decorator for functions that take a podcast URL as first arg"""
setattr(function, '_first_arg_is_podcast', True)
return function
+def needs_mygpoclient(function):
+ """ Wrap methods that require mygpoclient """
+ @functools.wraps(function)
+ def _wrapper(self, *args, **kwargs):
+
+ if mygpoclient is None:
+ self._error(_('Install mygpoclient for gpodder.net features'))
+ return False
+
+ return function(self, *args, **kwargs)
+
+ return _wrapper
+
def get_terminal_size():
if None in (termios, fcntl, struct):
return (80, 24)
@@ -146,23 +186,24 @@
rows, cols, xp, yp = struct.unpack('HHHH', x)
return rows, cols
+
class gPodderCli(object):
COLUMNS = 80
EXIT_COMMANDS = ('quit', 'exit', 'bye')
- def __init__(self, prefix, verbose):
- self.core = core.Core(prefix=prefix, verbose=verbose)
+ def __init__(self, verbose):
+ self.core = core.Core(verbose=verbose)
self._db = self.core.db
self._config = self.core.config
self._model = self.core.model
self._current_action = ''
- self._commands = dict((name.rstrip('_'), func)
- for name, func in inspect.getmembers(self)
- if inspect.ismethod(func) and not name.startswith('_'))
+ self._commands = dict((name.rstrip('_'), func) for name, func in inspect.getmembers(self)
+ if inspect.ismethod(func) and not name.startswith('_'))
self._prefixes, self._expansions = self._build_prefixes_expansions()
self._prefixes.update({'?': 'help'})
self._valid_commands = sorted(self._prefixes.values())
+ self._last_query_match = []
def _build_prefixes_expansions(self):
prefixes = {}
@@ -231,20 +272,37 @@
# -------------------------------------------------------------------
def import_(self, url):
+ """Import subscriptions from an OPML file
+
+ import http://example.com/subscriptions.opml
+
+ Import subscriptions from the given URL
+
+ import ./feeds.opml
+
+ Import subscriptions from a local file
+ """
for channel in opml.Importer(url).items:
self.subscribe(channel['url'], channel.get('title'))
def export(self, filename):
+ """Export subscriptions to an OPML file
+
+ export ./subscriptions.opml
+
+ Export the subscriptinos to a local file
+ """
podcasts = self._model.get_podcasts()
opml.Exporter(filename).write(podcasts)
- def get_podcast(self, url, create=False, check_only=False):
+ def _get_podcast(self, url, create=False, check_only=False):
"""Get a specific podcast by URL
Returns a podcast object for the URL or None if
the podcast has not been subscribed to.
"""
- url = util.normalize_feed_url(url)
+ url = self.core.model.normalize_feed_url(url)
+
if url is None:
self._error(_('Invalid URL: %(url)s') % {'url': url})
return None
@@ -263,16 +321,24 @@
return None
def subscribe(self, url, title=None):
+ """Subscribe to a new podcast via a URL
+
+ subscribe http://example.org/feed.rss
+
+ Subscribe to the feed at the given URL
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/makefile
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,7 @@
##########################################################################
PYTHON ?= python3
+PEP8 ?= pep8
##########################################################################
@@ -27,6 +28,7 @@
@echo " make headlink Print commit URL for the current Git head"
@echo ""
@echo " make test Run automated tests"
+ @echo " make pep8 Run pep8 utility to check code style"
@echo " make clean Remove generated and compiled files"
@echo " make distclean 'make clean' + remove dist/"
@echo ""
@@ -50,6 +52,9 @@
releasetest: test $(POFILES)
for lang in $(POFILES); do $(MSGFMT) --check $$lang; done
+pep8:
+ $(PEP8) --max-line-length=100 src
+
##########################################################################
release: releasetest distclean
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/po/de.po
^
|
@@ -7,7 +7,7 @@
msgstr ""
"Project-Id-Version: gpodder 4.0.0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2013-09-29 23:10+0200\n"
+"POT-Creation-Date: 2014-03-14 23:28+0100\n"
"PO-Revision-Date: 2013-09-29 23:10+0200\n"
"Last-Translator: Thomas Perl <thp@gpodder.org>\n"
"Language-Team: German <de@li.org>\n"
@@ -17,106 +17,203 @@
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1;\n"
-#: bin/gpo:249 bin/gpo:519
+#: bin/gpo:172
+msgid "Install mygpoclient for gpodder.net features"
+msgstr "Für gpodder.net-Funktionen muss mygpoclient installiert sein"
+
+#: bin/gpo:307 bin/gpo:946
#, python-format
msgid "Invalid URL: %(url)s"
msgstr "Ungültige URL: %(url)s"
-#: bin/gpo:262
+#: bin/gpo:320
#, python-format
msgid "Not subscribed to %(url)s"
msgstr "Kein Abonnement für %(url)s"
-#: bin/gpo:266
+#: bin/gpo:332
#, python-format
msgid "Subscribing to %(url)s"
msgstr "Abonniere %(url)s"
-#: bin/gpo:271
+#: bin/gpo:337
#, python-format
msgid "Already subscribed to %(url)s"
msgstr "Abonement existiert bereits: %(url)s"
-#: bin/gpo:277
+#: bin/gpo:343
#, python-format
msgid "Subscription to %(url)s failed"
msgstr "Abonnieren von %(url)s fehlgeschlagen"
-#: bin/gpo:306
+#: bin/gpo:352
+#, python-format
+msgid "Resolved feed URL: %(url)s"
+msgstr "Aufgelöste Feed-URL: %(url)s"
+
+#: bin/gpo:386
#, python-format
msgid "Configuration option %(key)s not found"
msgstr "Konfigurations-Option %(key)s nicht gefunden"
-#: bin/gpo:310
+#: bin/gpo:390
#, python-format
msgid "Invalid configuration option: %(key)s"
msgstr "Ungültige Konfigurations-Option: %(key)s"
-#: bin/gpo:333
+#: bin/gpo:425
#, python-format
msgid "Removed %(url)s"
msgstr "%(url)s entfernt"
-#: bin/gpo:402
+#: bin/gpo:516
+msgid "Usage: query EQL"
+msgstr "Aufruf: query SQL"
+
+#: bin/gpo:535
+#, python-format
+msgid "%(count)d episode matched"
+msgid_plural "%(count)d episodes matched"
+msgstr[0] "%(count)d Episode gefunden"
+msgstr[1] "%(count)d Episoden gefunden"
+
+#: bin/gpo:537
+msgid "Use \"apply\" to apply a single-episode command"
+msgstr "Benutze \"apply\" um einen Episoden-Befehl anzuwenden"
+
+#: bin/gpo:545
msgid "Subscription suspended"
msgstr "Abonnement ausgesetzt"
-#: bin/gpo:417
+#: bin/gpo:569
#, python-format
msgid "%(count)d new episode"
msgid_plural "%(count)d new episodes"
msgstr[0] "%(count)d neue Episode"
msgstr[1] "%(count)d neue Episoden"
-#: bin/gpo:423
+#: bin/gpo:584
msgid "Checking for new episodes"
msgstr "Neue Episoden werden gesucht"
-#: bin/gpo:432
+#: bin/gpo:591
#, python-format
msgid "Skipping %(podcast)s"
msgstr "Überspringe %(podcast)s"
-#: bin/gpo:457
+#: bin/gpo:636
#, python-format
-msgid "Downloading %(episode)s"
-msgstr "Lade %(episode)s herunter"
+msgid "Invalid episode ID: %(error)s"
+msgstr "Fehlerhafte Episoden-ID: %(error)s"
+
+#: bin/gpo:644
+#, python-format
+msgid "Episode ID not found: %(id)x"
+msgstr "Episoden-ID nicht gefunden: %(id)x"
+
+#: bin/gpo:663
+#, python-format
+msgid "Invalid action: %(action)s. Valid actions: %(valid_actions)s"
+msgstr "Ungültiger Befehl: %(action)s. Verfügbare Befehle: %(valid_actions)s"
-#: bin/gpo:475
+#: bin/gpo:672
+#, python-format
+msgid "Episode marked as old: %(title)s"
+msgstr "Episode als alt markiert: %(title)s"
+
+#: bin/gpo:675
+#, python-format
+msgid "Episode marked as new: %(title)s"
+msgstr "Episode als neu markiert: %(title)s"
+
+#: bin/gpo:715
+#, python-format
+msgid "Episode deleted: %(episode)s"
+msgstr "Episode gelöscht: %(episode)s"
+
+#: bin/gpo:768
+msgid "\"apply\" can only be used during an interactive session"
+msgstr "\"apply\" kann nur während einer interaktiven Sitzung verwendet werden"
+
+#: bin/gpo:772
+msgid "Empty query result (use \"query\" first)"
+msgstr "Leeres Abfrage-Ergebnis (zuerst \"query\" benutzen)"
+
+#: bin/gpo:776
+msgid "Cannot apply this command"
+msgstr "Kann diesen Befehl nicht anwenden"
+
+#: bin/gpo:787
+msgid "Please provide a search query"
+msgstr "Bitte eine Suchanfrage eingeben"
+
+#: bin/gpo:803
#, python-format
msgid "%(count)d episode downloaded"
msgid_plural "%(count)d episodes downloaded"
msgstr[0] "%(count)d Episode heruntergeladen"
msgstr[1] "%(count)d Episoden heruntergeladen"
-#: bin/gpo:488
+#: bin/gpo:806
+#, python-format
+msgid "Downloading %(episode)s"
+msgstr "Lade %(episode)s herunter"
+
+#: bin/gpo:858
#, python-format
msgid "Subscription suspended: %(url)s"
msgstr "Abonnement ausgesetzt: %(url)s"
-#: bin/gpo:500
+#: bin/gpo:879
#, python-format
msgid "Subscription resumed: %(url)s"
msgstr "Abonnement fortgesetzt: %(url)s"
-#: bin/gpo:508
+#: bin/gpo:899
msgid "No software updates available"
msgstr "Keine Software-Aktualisierungen verfügbar"
-#: bin/gpo:510
+#: bin/gpo:901
+#, python-format
+msgid "New version %(latestversion)s available (released: %(latestdate)s)"
+msgstr "Neue Version %(latestversion)s verfügbar (veröffentlicht: %(latestdate)s)"
+
+#: bin/gpo:902
#, python-format
-msgid "New version %(version)s available: %(url)s"
-msgstr "Neue Version %(version)s verfügbar: %(url)s"
+msgid "You have version %(thisversion)s (released: %(thisdate)s)"
+msgstr "Sie haben Version %(thisversion)s (veröffentlicht: %(thisdate)s)"
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/po/messages.pot
^
|
@@ -8,7 +8,7 @@
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2013-09-29 23:10+0200\n"
+"POT-Creation-Date: 2014-03-14 23:28+0100\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -18,106 +18,203 @@
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n"
-#: bin/gpo:249 bin/gpo:519
+#: bin/gpo:172
+msgid "Install mygpoclient for gpodder.net features"
+msgstr ""
+
+#: bin/gpo:307 bin/gpo:946
#, python-format
msgid "Invalid URL: %(url)s"
msgstr ""
-#: bin/gpo:262
+#: bin/gpo:320
#, python-format
msgid "Not subscribed to %(url)s"
msgstr ""
-#: bin/gpo:266
+#: bin/gpo:332
#, python-format
msgid "Subscribing to %(url)s"
msgstr ""
-#: bin/gpo:271
+#: bin/gpo:337
#, python-format
msgid "Already subscribed to %(url)s"
msgstr ""
-#: bin/gpo:277
+#: bin/gpo:343
#, python-format
msgid "Subscription to %(url)s failed"
msgstr ""
-#: bin/gpo:306
+#: bin/gpo:352
+#, python-format
+msgid "Resolved feed URL: %(url)s"
+msgstr ""
+
+#: bin/gpo:386
#, python-format
msgid "Configuration option %(key)s not found"
msgstr ""
-#: bin/gpo:310
+#: bin/gpo:390
#, python-format
msgid "Invalid configuration option: %(key)s"
msgstr ""
-#: bin/gpo:333
+#: bin/gpo:425
#, python-format
msgid "Removed %(url)s"
msgstr ""
-#: bin/gpo:402
+#: bin/gpo:516
+msgid "Usage: query EQL"
+msgstr ""
+
+#: bin/gpo:535
+#, python-format
+msgid "%(count)d episode matched"
+msgid_plural "%(count)d episodes matched"
+msgstr[0] ""
+msgstr[1] ""
+
+#: bin/gpo:537
+msgid "Use \"apply\" to apply a single-episode command"
+msgstr ""
+
+#: bin/gpo:545
msgid "Subscription suspended"
msgstr ""
-#: bin/gpo:417
+#: bin/gpo:569
#, python-format
msgid "%(count)d new episode"
msgid_plural "%(count)d new episodes"
msgstr[0] ""
msgstr[1] ""
-#: bin/gpo:423
+#: bin/gpo:584
msgid "Checking for new episodes"
msgstr ""
-#: bin/gpo:432
+#: bin/gpo:591
#, python-format
msgid "Skipping %(podcast)s"
msgstr ""
-#: bin/gpo:457
+#: bin/gpo:636
#, python-format
-msgid "Downloading %(episode)s"
+msgid "Invalid episode ID: %(error)s"
+msgstr ""
+
+#: bin/gpo:644
+#, python-format
+msgid "Episode ID not found: %(id)x"
+msgstr ""
+
+#: bin/gpo:663
+#, python-format
+msgid "Invalid action: %(action)s. Valid actions: %(valid_actions)s"
+msgstr ""
+
+#: bin/gpo:672
+#, python-format
+msgid "Episode marked as old: %(title)s"
+msgstr ""
+
+#: bin/gpo:675
+#, python-format
+msgid "Episode marked as new: %(title)s"
+msgstr ""
+
+#: bin/gpo:715
+#, python-format
+msgid "Episode deleted: %(episode)s"
+msgstr ""
+
+#: bin/gpo:768
+msgid "\"apply\" can only be used during an interactive session"
+msgstr ""
+
+#: bin/gpo:772
+msgid "Empty query result (use \"query\" first)"
+msgstr ""
+
+#: bin/gpo:776
+msgid "Cannot apply this command"
msgstr ""
-#: bin/gpo:475
+#: bin/gpo:787
+msgid "Please provide a search query"
+msgstr ""
+
+#: bin/gpo:803
#, python-format
msgid "%(count)d episode downloaded"
msgid_plural "%(count)d episodes downloaded"
msgstr[0] ""
msgstr[1] ""
-#: bin/gpo:488
+#: bin/gpo:806
+#, python-format
+msgid "Downloading %(episode)s"
+msgstr ""
+
+#: bin/gpo:858
#, python-format
msgid "Subscription suspended: %(url)s"
msgstr ""
-#: bin/gpo:500
+#: bin/gpo:879
#, python-format
msgid "Subscription resumed: %(url)s"
msgstr ""
-#: bin/gpo:508
+#: bin/gpo:899
msgid "No software updates available"
msgstr ""
-#: bin/gpo:510
+#: bin/gpo:901
#, python-format
-msgid "New version %(version)s available: %(url)s"
+msgid "New version %(latestversion)s available (released: %(latestdate)s)"
msgstr ""
-#: bin/gpo:574
+#: bin/gpo:902
+#, python-format
+msgid "You have version %(thisversion)s (released: %(thisdate)s)"
+msgstr ""
+
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/setup.py
^
|
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
#
# setup.py: gPodder Setup Script
-# Copyright (c) 2005-2013, Thomas Perl <m@thp.io>
+# Copyright (c) 2005-2014, Thomas Perl <m@thp.io>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -33,7 +33,9 @@
author, email = re.match(r'^(.*) <(.*)>$', metadata['author']).groups()
-class MissingFile(BaseException): pass
+class MissingFile(BaseException):
+ pass
+
def info(message, item=None):
print('=>', message, item if item is not None else '')
@@ -131,18 +133,17 @@
setup(
- name = 'gpodder',
- version = metadata['version'],
- description = metadata['tagline'],
- license = metadata['license'],
- url = metadata['url'],
-
- author = author,
- author_email = email,
-
- package_dir = {'': 'src'},
- packages = packages,
- scripts = scripts,
- data_files = data_files,
+ name='gpodder-core',
+ version=metadata['version'],
+ description=metadata['tagline'],
+ license=metadata['license'],
+ url=metadata['url'],
+
+ author=author,
+ author_email=email,
+
+ package_dir={'': 'src'},
+ packages=packages,
+ scripts=scripts,
+ data_files=data_files,
)
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/share/man/man1/gpo.1
^
|
@@ -1,6 +1,6 @@
-.TH GPO "1" "April 2013" "gpodder 3.5.1" "User Commands"
+.TH GPO "1" "March 2014" "gpodder 4.0.0" "User Commands"
.SH NAME
-gpo \- Text mode interface of gPodder
+gpo \- gPodder command-line interface
.SH SYNOPSIS
.B gpo
[\fI--verbose|-v\fR]
@@ -8,15 +8,9 @@
.SH DESCRIPTION
.PP
-gpo is the text mode interface of gPodder. Run it without any arguments to
-start the interactive shell (see below). Use "gpo help" to get a list of
-supported commands.
-.PP
-gpo can be used to manage podcasts from the command line without having to
-start gPodder. It can also be used to automate tasks such as downloading or
-updating feeds.
-.PP
-The database and files are the same as used by \fIgpodder(1)\fR.
+gpo is the text mode interface to gPodder. gPodder downloads and manages free
+audio and video content ("podcasts") for you. Run it without any arguments to
+start the interactive shell, then type "help" for an overview of commands.
.SH INTERACTIVE SHELL MODE
.PP
@@ -24,10 +18,6 @@
mode. From there, you can type commands directly. When readline is available,
this shell supports completion using <Tab>. Podcast feed URLs are also
completed for commands that take the URL of a podcast as argument.
-.PP
-Some commands (e.g. \fIsearch\fR and \fItoplist\fR) will provide a query in
-interactive shell mode (to subscribe to podcasts). These queries will not be
-shown when started directly from the command line.
.SH COMMAND PREFIXES
.PP
@@ -40,37 +30,63 @@
recommended (e.g. use "gpo update" and not "gpo up" in scripts and cronjobs).
The short command prefixes are mostly useful for interactive usage.
-.SH EXAMPLES
-
+.SH QUERYING USING EQL
+.PP
+Using the
+.I query
+command allows you to use the full range of Episode Query Language expressions
+that gPodder supports. Combined with the
+.I apply
+command, this can be very powerful.
+.PP
+For example, you can mark all episodes that
+are videos, not yet downloaded, smaller than 10 MB and released in the last 7
+days as new like this (in the interactive shell mode):
.PP
-.B gpo
.RS 4
-Enter interactive shell mode
+.B query video and not downloaded and mb < 10 and since < 7
+.PP
+.B apply mark new
.RE
.PP
-.B gpo update && gpo download
+Similarly, you can mark all new episodes as old with a combination of:
+.PP
.RS 4
-Check for new episodes, then download all new episodes
+.B query new
+.PP
+.B apply mark old
.RE
-
.PP
-.B gpo search linux outlaws
+Delete all downloaded episodes that have been downloaded more than 15 days ago:
+.PP
+.RS 4
+.B query downloaded and age > 15
+.PP
+.B apply rm
+.RE
+.PP
+And finally, download all audio files that are shorter than 30 minutes and that
+are not yet downloaded:
+.PP
.RS 4
-Search the directory for podcasts named "linux outlaws"
+.B query audio and minutes < 30 not downloaded
+.PP
+.B apply fetch
.RE
+.SH EXAMPLES
+
.PP
-.B gpo youtube http://youtube.com/watch?v=oHg5SJYRHA0
+.B gpo
.RS 4
-Print download URL of a YouTube video to stdout
+Enter interactive shell mode
.RE
-
-.SH SEE ALSO
.PP
-gpodder(1)
+.B gpo update && gpo download
+.RS 4
+Check for new episodes, then download all new episodes
+.RE
.SH BUGS
.PP
-If you find bugs, don't keep them for yourself!
-.PP
-Report bugs and feature requests at \fIhttp://bugs.gpodder.org/\fR
+Report bugs at \fIhttp://bugs.gpodder.org/\fR
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/__init__.py
^
|
@@ -4,7 +4,7 @@
"""
gPodder: Media and podcast aggregator
-Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
Historically, gPodder was licensed under the terms of the "GNU GPLv2 or
later", and has been upgraded to "GNU GPLv3 or later" in August 2007.
@@ -48,14 +48,14 @@
"""
# This metadata block gets parsed by setup.py - use single quotes only
-__tagline__ = 'Media and podcast aggregator'
-__author__ = 'Thomas Perl <thp@gpodder.org>'
-__version__ = '4.0.0'
-__date__ = '2014-02-06'
-__relname__ = 'cuatro-core-wip'
+__tagline__ = 'Media and podcast aggregator'
+__author__ = 'Thomas Perl <thp@gpodder.org>'
+__version__ = '4.0.0'
+__date__ = '2014-02-14'
+__relname__ = 'Iffy Kiffy Izzy Oh'
__copyright__ = '© 2005-2014 Thomas Perl and the gPodder Team'
-__license__ = 'ISC / GPLv3 or later'
-__url__ = 'http://gpodder.org/'
+__license__ = 'ISC / GPLv3 or later'
+__url__ = 'http://gpodder.org/'
__version_info__ = tuple(int(x) for x in __version__.split('.'))
@@ -64,4 +64,3 @@
# Episode states used in the database
STATE_NORMAL, STATE_DOWNLOADED, STATE_DELETED = list(range(3))
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/api.py
^
|
@@ -22,12 +22,13 @@
import gpodder.core
import gpodder.util
+
class core:
Core = gpodder.core.Core
+
class util:
run_in_background = gpodder.util.run_in_background
normalize_feed_url = gpodder.util.normalize_feed_url
remove_html_tags = gpodder.util.remove_html_tags
format_date = gpodder.util.format_date
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/common.py
^
|
@@ -41,7 +41,8 @@
util.delete_file(tempfile)
-def find_partial_downloads(directory, channels, start_progress_callback, progress_callback, finish_progress_callback):
+def find_partial_downloads(directory, channels, start_progress_callback, progress_callback,
+ finish_progress_callback):
"""Find partial downloads and match them with episodes
directory - Download directory
@@ -89,6 +90,7 @@
else:
clean_up_downloads(directory, True)
+
def get_expired_episodes(channels, config):
for channel in channels:
for index, episode in enumerate(channel.get_episodes(gpodder.STATE_DOWNLOADED)):
@@ -99,8 +101,7 @@
# Download strategy "Only keep latest"
if (channel.download_strategy == channel.STRATEGY_LATEST and
index > 0):
- logger.info('Removing episode (only keep latest strategy): %s',
- episode.title)
+ logger.info('Removing episode (only keep latest strategy): %s', episode.title)
yield episode
continue
@@ -128,4 +129,3 @@
continue
yield episode
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/config.py
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -26,7 +26,7 @@
import gpodder
from gpodder import util
-import jsonconfig
+from gpodder import jsonconfig
import os
import shutil
@@ -38,20 +38,20 @@
'limit': {
'bandwidth': {
'enabled': False,
- 'kbps': 500.0, # maximum kB/s per download
+ 'kbps': 500.0, # maximum kB/s per download
},
'downloads': {
'enabled': True,
'concurrent': 1,
},
- 'episodes': 200, # max episodes per feed
+ 'episodes': 200, # max episodes per feed
},
# Automatic feed updates, download removal and retry on download timeout
'auto': {
'update': {
'enabled': False,
- 'frequency': 20, # minutes
+ 'frequency': 20, # minutes
},
'cleanup': {
@@ -61,7 +61,7 @@
'unfinished': True,
},
- 'retries': 3, # number of retries when downloads time out
+ 'retries': 3, # number of retries when downloads time out
},
'ui': {
@@ -71,10 +71,11 @@
},
},
- # XXX: Move this to a "plugins" subtree
- 'youtube': {
- 'preferred_fmt_id': 18, # default fmt_id (see fallbacks in youtube.py)
- 'preferred_fmt_ids': [], # for advanced uses (custom fallback sequence)
+ 'plugins': {
+ 'youtube': {
+ 'preferred_fmt_id': 18, # default fmt_id (see fallbacks in youtube.py)
+ 'preferred_fmt_ids': [], # for advanced uses (custom fallback sequence)
+ },
},
}
@@ -91,6 +92,7 @@
else:
return str(config_value)
+
def string_to_config_value(new_value, old_value):
config_type = type(old_value)
@@ -108,7 +110,7 @@
def __init__(self, filename='gpodder.json'):
self.__json_config = jsonconfig.JsonConfig(default=defaults,
- on_key_changed=self._on_key_changed)
+ on_key_changed=self._on_key_changed)
self.__save_thread = None
self.__filename = filename
self.__observers = []
@@ -186,8 +188,7 @@
data = open(self.__filename, 'rt').read()
new_keys_added = self.__json_config._restore(data)
except:
- logger.warn('Cannot parse config file: %s',
- self.__filename, exc_info=True)
+ logger.warn('Cannot parse config file: %s', self.__filename, exc_info=True)
new_keys_added = False
if new_keys_added:
@@ -210,8 +211,8 @@
try:
observer(name, old_value, value)
except Exception as exception:
- logger.error('Error while calling observer %r: %s',
- observer, exception, exc_info=True)
+ logger.error('Error while calling observer %r: %s', observer, exception,
+ exc_info=True)
self.schedule_save()
@@ -224,4 +225,3 @@
return
setattr(self.__json_config, name, value)
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/core.py
^
|
@@ -31,29 +31,21 @@
import logging
import socket
+
class Core(object):
def __init__(self,
config_class=config.Config,
database_class=storage.Database,
model_class=model.Model,
- prefix=None,
verbose=True,
progname='gpodder'):
self._set_socket_timeout()
- self.prefix = prefix
- if not self.prefix:
- # XXX
- self.prefix = os.path.abspath('.')
-
home = os.path.expanduser('~')
- xdg_data_home = os.environ.get('XDG_DATA_HOME',
- os.path.join(home, '.local', 'share'))
- xdg_config_home = os.environ.get('XDG_CONFIG_HOME',
- os.path.join(home, '.config'))
- xdg_cache_home = os.environ.get('XDG_CACHE_HOME',
- os.path.join(home, '.cache'))
+ xdg_data_home = os.environ.get('XDG_DATA_HOME', os.path.join(home, '.local', 'share'))
+ xdg_config_home = os.environ.get('XDG_CONFIG_HOME', os.path.join(home, '.config'))
+ xdg_cache_home = os.environ.get('XDG_CACHE_HOME', os.path.join(home, '.cache'))
self.data_home = os.path.join(xdg_data_home, progname)
self.config_home = os.path.join(xdg_config_home, progname)
@@ -71,8 +63,7 @@
config_file = os.path.join(self.config_home, 'Settings.json')
database_file = os.path.join(self.data_home, 'Database')
# Downloads go to <data_home> or $GPODDER_DOWNLOAD_DIR
- self.downloads = os.environ.get('GPODDER_DOWNLOAD_DIR',
- os.path.join(self.data_home))
+ self.downloads = os.environ.get('GPODDER_DOWNLOAD_DIR', os.path.join(self.data_home))
# Initialize the gPodder home directories
util.make_directory(self.data_home)
@@ -96,8 +87,13 @@
def _load_plugins(self):
# Plugins to load by default
DEFAULT_PLUGINS = [
+ # Custom handlers (tried in order, put most specific first)
'gpodder.plugins.soundcloud',
- 'gpodder.plugins.xspf',
+ 'gpodder.plugins.itunes',
+ 'gpodder.plugins.youtube',
+ 'gpodder.plugins.vimeo',
+
+ # Fallback handlers (catch-all)
'gpodder.plugins.podcast',
]
@@ -110,8 +106,7 @@
try:
__import__(plugin)
except Exception as e:
- self.logger.warn('Cannot load plugin "%s": %s', plugin, e,
- exc_info=True)
+ self.logger.warn('Cannot load plugin "%s": %s', plugin, e, exc_info=True)
def save(self):
# XXX: Although the function is called close(), this actually doesn't
@@ -126,4 +121,3 @@
# Close the database and store outstanding changes
self.db.close()
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/coverart.py
^
|
@@ -22,11 +22,11 @@
logger = logging.getLogger(__name__)
from gpodder import util
-
-from gpodder.plugins import youtube
+from gpodder import registry
import os
+
class CoverDownloader(object):
# File name extension dict, lists supported cover art extensions
# Values: functions that check if some data is of that file type
@@ -55,18 +55,15 @@
# If allowed to download files, do so here
if download:
- # YouTube-specific cover art image resolver
- youtube_cover_url = youtube.get_real_cover(podcast.url)
- if youtube_cover_url is not None:
- cover_url = youtube_cover_url
+ cover_url = registry.cover_art.resolve(podcast, cover_url)
if not cover_url:
return None
# We have to add username/password, because password-protected
# feeds might keep their cover art also protected (bug 1521)
- cover_url = util.url_add_authentication(cover_url,
- podcast.auth_username, podcast.auth_password)
+ cover_url = util.url_add_authentication(cover_url, podcast.auth_username,
+ podcast.auth_password)
try:
logger.info('Downloading cover art: %s', cover_url)
@@ -88,13 +85,12 @@
raise ValueError(msg)
# Successfully downloaded the cover art - save it!
- fp = open(filename + extension, 'wb')
- fp.write(data)
- fp.close()
+ with util.update_file_safely(filename + extension) as temp_filename:
+ with open(temp_filename, 'wb') as fp:
+ fp.write(data)
return filename + extension
except Exception as e:
logger.warn('Cannot save cover art', exc_info=True)
return None
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/download.py
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,19 +24,19 @@
# Based on libwget.py (2005-10-29)
#
-
-
import logging
logger = logging.getLogger(__name__)
from gpodder import util
-import gpodder
+from gpodder import registry
-from gpodder.plugins import youtube, vimeo
+import gpodder
import socket
import threading
-import urllib.request, urllib.parse, urllib.error
+import urllib.request
+import urllib.parse
+import urllib.error
import urllib.parse
import shutil
import os.path
@@ -62,7 +62,7 @@
"""
value = None
try:
- headers_string = ['%s:%s'%(k,v) for k,v in list(headers.items())]
+ headers_string = ['%s:%s' % (k, v) for k, v in list(headers.items())]
msg = email.message_from_string('\n'.join(headers_string))
if header_name in msg:
raw_value = msg.get_param(param, header=header_name)
@@ -73,6 +73,7 @@
return value
+
class ContentRange(object):
# Based on:
# http://svn.pythonpaste.org/Paste/WebOb/trunk/webob/byterange.py
@@ -175,8 +176,13 @@
return cls(start, end-1, length)
-class DownloadCancelledException(Exception): pass
-class AuthenticationError(Exception): pass
+class DownloadCancelledException(Exception):
+ pass
+
+
+class AuthenticationError(Exception):
+ pass
+
class gPodderDownloadHTTPError(Exception):
def __init__(self, url, error_code, error_message):
@@ -184,15 +190,16 @@
self.error_code = error_code
self.error_message = error_message
+
class DownloadURLOpener(urllib.request.FancyURLopener):
version = gpodder.user_agent
# Sometimes URLs are not escaped correctly - try to fix them
# (see RFC2396; Section 2.4.3. Excluded US-ASCII Characters)
# FYI: The omission of "%" in the list is to avoid double escaping!
- ESCAPE_CHARS = dict((ord(c), '%%%x'%ord(c)) for c in ' <>#"{}|\\^[]`')
+ ESCAPE_CHARS = dict((ord(c), '%%%x' % ord(c)) for c in ' <>#"{}|\\^[]`')
- def __init__( self, channel):
+ def __init__(self, channel):
self.channel = channel
self._auth_retry_counter = 0
urllib.request.FancyURLopener.__init__(self, None)
@@ -209,26 +216,26 @@
void = fp.read()
fp.close()
raise gPodderDownloadHTTPError(url, errcode, errmsg)
-
+
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
""" This is the exact same function that's included with urllib
except with "void = fp.read()" commented out. """
-
+
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
-
+
# This blocks forever(?) with certain servers (see bug #465)
#void = fp.read()
fp.close()
-
+
# In case the server sent a relative URL, join with original:
newurl = urllib.parse.urljoin(self.type + ":" + url, newurl)
return self.open(newurl)
-
+
# The following is based on Python's urllib.py "URLopener.retrieve"
# Also based on http://mail.python.org/pipermail/python-list/2001-October/110069.html
@@ -286,7 +293,7 @@
blocknum = int(current_size/bs)
if reporthook:
if 'content-length' in headers:
- size = int(headers['content-length']) + current_size
+ size = int(headers['content-length']) + current_size
reporthook(blocknum, bs, size)
while read < size or size == -1:
if size == -1:
@@ -308,13 +315,13 @@
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise urllib.error.ContentTooShortError("retrieval incomplete: got only %i out "
- "of %i bytes" % (read, size), result)
+ "of %i bytes" % (read, size), result)
return result
# end code based on urllib.py
- def prompt_user_passwd( self, host, realm):
+ def prompt_user_passwd(self, host, realm):
# Keep track of authentication attempts, fail after the third one
self._auth_retry_counter += 1
if self._auth_retry_counter > 3:
@@ -322,8 +329,8 @@
if self.channel.auth_username or self.channel.auth_password:
logger.debug('Authenticating as "%s" to "%s" for realm "%s".',
- self.channel.auth_username, host, realm)
- return ( self.channel.auth_username, self.channel.auth_password )
+ self.channel.auth_username, host, realm)
+ return (self.channel.auth_username, self.channel.auth_password)
return (None, None)
@@ -410,7 +417,7 @@
minimum_tasks = 0
worker = DownloadQueueWorker(self.tasks, self.__exit_callback,
- self.__continue_check_callback, minimum_tasks)
+ self.__continue_check_callback, minimum_tasks)
self.worker_threads.append(worker)
util.run_in_background(worker.run)
@@ -629,8 +636,7 @@
if totalSize != self.total_size and totalSize > 0:
self.total_size = float(totalSize)
if self.__episode.file_size != self.total_size:
- logger.debug('Updating file size of %s to %s',
- self.filename, self.total_size)
+ logger.debug('Updating file size of %s to %s', self.filename, self.total_size)
self.__episode.file_size = self.total_size
self.__episode.save()
@@ -654,16 +660,17 @@
if count % 5 == 0:
now = time.time()
if self.__start_time > 0:
- # Has rate limiting been enabled or disabled?
+ # Has rate limiting been enabled or disabled?
if self.__limit_rate != self._config.limit.bandwidth.enabled:
- # If it has been enabled then reset base time and block count
+ # If it has been enabled then reset base time and block count
if self._config.limit.bandwidth.enabled:
self.__start_time = now
self.__start_blocks = count
self.__limit_rate = self._config.limit.bandwidth.enabled
- # Has the rate been changed and are we currently limiting?
- if self.__limit_rate_value != self._config.limit.bandwith.kbps and self.__limit_rate:
+ # Has the rate been changed and are we currently limiting?
+ if self.__limit_rate_value != self._config.limit.bandwith.kbps and \
+ self.__limit_rate:
self.__start_time = now
self.__start_blocks = count
self.__limit_rate_value = self._config.limit.bandwidth.kbps
@@ -684,7 +691,8 @@
if self._config.limit.bandwidth.enabled and speed > self._config.limit.bandwidth.kbps:
# calculate the time that should have passed to reach
# the desired download rate and wait if necessary
- should_have_passed = float((count-self.__start_blocks)*blockSize)/(self._config.limit.bandwidth.kbps*1024.0)
+ should_have_passed = (float((count-self.__start_blocks)*blockSize) /
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/jsonconfig.py
^
|
@@ -0,0 +1,219 @@
+#
+# jsonconfig - JSON-based configuration backend (2012-01-18)
+# Copyright (c) 2012, 2013, Thomas Perl <m@thp.io>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+
+import copy
+from functools import reduce
+
+import json
+
+
+class JsonConfigSubtree(object):
+ def __init__(self, parent, name):
+ self._parent = parent
+ self._name = name
+
+ def __repr__(self):
+ return '<Subtree %r of JsonConfig>' % (self._name,)
+
+ def _attr(self, name):
+ return '.'.join((self._name, name))
+
+ def __getitem__(self, name):
+ return self._parent._lookup(self._name).__getitem__(name)
+
+ def __delitem__(self, name):
+ self._parent._lookup(self._name).__delitem__(name)
+
+ def __setitem__(self, name, value):
+ self._parent._lookup(self._name).__setitem__(name, value)
+
+ def __getattr__(self, name):
+ if name == 'keys':
+ # Kludge for using dict() on a JsonConfigSubtree
+ return getattr(self._parent._lookup(self._name), name)
+
+ return getattr(self._parent, self._attr(name))
+
+ def __setattr__(self, name, value):
+ if name.startswith('_'):
+ object.__setattr__(self, name, value)
+ else:
+ self._parent.__setattr__(self._attr(name), value)
+
+
+class JsonConfig(object):
+ _INDENT = 2
+
+ def __init__(self, data=None, default=None, on_key_changed=None):
+ """
+ Create a new JsonConfig object
+
+ data: A JSON string that contains the data to load (optional)
+ default: A dict that contains default config values (optional)
+ on_key_changed: Callback when a value changes (optional)
+
+ The signature of on_key_changed looks like this:
+
+ func(name, old_value, new_value)
+
+ name: The key name, e.g. "ui.gtk.show_toolbar"
+ old_value: The old value, e.g. False
+ new_value: The new value, e.g. True
+
+ For newly-set keys, on_key_changed is also called. In this case,
+ None will be the old_value:
+
+ >>> def callback(*args): print('callback:', args)
+ >>> c = JsonConfig(on_key_changed=callback)
+ >>> c.a.b = 10
+ callback: ('a.b', None, 10)
+ >>> c.a.b = 11
+ callback: ('a.b', 10, 11)
+ >>> c.x.y.z = [1,2,3]
+ callback: ('x.y.z', None, [1, 2, 3])
+ >>> c.x.y.z = 42
+ callback: ('x.y.z', [1, 2, 3], 42)
+
+ Please note that dict-style access will not call on_key_changed:
+
+ >>> def callback(*args): print('callback:', args)
+ >>> c = JsonConfig(on_key_changed=callback)
+ >>> c.a.b = 1 # This works as expected
+ callback: ('a.b', None, 1)
+ >>> c.a['c'] = 10 # This doesn't call on_key_changed!
+ >>> del c.a['c'] # This also doesn't call on_key_changed!
+ """
+ self._default = default
+ self._data = copy.deepcopy(self._default) or {}
+ self._on_key_changed = on_key_changed
+ if data is not None:
+ self._restore(data)
+
+ def _restore(self, backup):
+ """
+ Restore a previous state saved with repr()
+
+ This function allows you to "snapshot" the current values of
+ the configuration and reload them later on. Any missing
+ default values will be added on top of the restored config.
+
+ Returns True if new keys from the default config have been added,
+ False if no keys have been added (backup contains all default keys)
+
+ >>> c = JsonConfig()
+ >>> c.a.b = 10
+ >>> backup = repr(c)
+ >>> print(c.a.b)
+ 10
+ >>> c.a.b = 11
+ >>> print(c.a.b)
+ 11
+ >>> c._restore(backup)
+ False
+ >>> print(c.a.b)
+ 10
+ """
+ self._data = json.loads(backup)
+ # Add newly-added default configuration options
+ if self._default is not None:
+ return self._merge_keys(self._default)
+
+ return False
+
+ def _merge_keys(self, merge_source):
+ """Merge keys from merge_source into this config object
+
+ Return True if new keys were merged, False otherwise
+ """
+ added_new_key = False
+ # Recurse into the data and add missing items
+ work_queue = [(self._data, merge_source)]
+ while work_queue:
+ data, default = work_queue.pop()
+ for key, value in default.items():
+ if key not in data:
+ # Copy defaults for missing key
+ data[key] = copy.deepcopy(value)
+ added_new_key = True
+ elif isinstance(value, dict):
+ # Recurse into sub-dictionaries
+ work_queue.append((data[key], value))
+ elif type(value) != type(data[key]):
+ # Type mismatch of current value and default
+ if type(value) == int and type(data[key]) == float:
+ # Convert float to int if default value is int
+ data[key] = int(data[key])
+
+ return added_new_key
+
+ def __repr__(self):
+ """
+ >>> c = JsonConfig('{"a": 1}')
+ >>> print(c)
+ {
+ "a": 1
+ }
+ """
+ return json.dumps(self._data, indent=self._INDENT)
+
+ def _lookup(self, name):
+ return reduce(lambda d, k: d[k], name.split('.'), self._data)
+
+ def _keys_iter(self):
+ work_queue = []
+ work_queue.append(([], self._data))
+ while work_queue:
+ path, data = work_queue.pop(0)
+
+ if isinstance(data, dict):
+ for key in sorted(data.keys()):
+ work_queue.append((path + [key], data[key]))
+ else:
+ yield '.'.join(path)
+
+ def __getattr__(self, name):
+ try:
+ value = self._lookup(name)
+ if not isinstance(value, dict):
+ return value
+ except KeyError:
+ pass
+
+ return JsonConfigSubtree(self, name)
+
+ def __setattr__(self, name, value):
+ if name.startswith('_'):
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/log.py
^
|
@@ -27,14 +27,15 @@
logger = logging.getLogger(__name__)
+
def setup(home=None, verbose=True):
# Configure basic stdout logging
STDOUT_FMT = '%(created)f [%(name)s] %(levelname)s: %(message)s'
- logging.basicConfig(format=STDOUT_FMT,
- level=logging.DEBUG if verbose else logging.WARNING)
+ logging.basicConfig(format=STDOUT_FMT, level=logging.DEBUG if verbose else logging.WARNING)
# Replace except hook with a custom one that logs it as an error
original_excepthook = sys.excepthook
+
def on_uncaught_exception(exctype, value, tb):
message = ''.join(traceback.format_exception(exctype, value, tb))
logger.error('Uncaught exception: %s', message)
@@ -49,8 +50,7 @@
try:
os.makedirs(logging_directory)
except:
- logger.warn('Cannot create output directory: %s',
- logging_directory)
+ logger.warn('Cannot create output directory: %s', logging_directory)
return False
# Keep logs around for 5 days
@@ -77,4 +77,3 @@
logger.debug('==== gPodder starts up ====')
return True
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/model.py
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
# Copyright (c) 2011 Neal H. Walfield
#
# gPodder is free software; you can redistribute it and/or modify
@@ -27,8 +27,7 @@
from gpodder import util
from gpodder import coverart
from gpodder import download
-
-from gpodder.plugins import youtube, vimeo
+from gpodder import registry
import logging
logger = logging.getLogger(__name__)
@@ -39,37 +38,24 @@
import shutil
import time
import datetime
+import itertools
import hashlib
-import collections
import string
-class NoHandlerForURL(Exception): pass
+class NoHandlerForURL(Exception):
+ pass
-class gPodderFetcher:
- def __init__(self):
- self.custom_handlers = []
-
- def fetch_channel(self, channel, max_episodes):
- for handler in self.custom_handlers:
- feed = handler(channel, max_episodes)
- if feed is not None:
- return feed
-
- raise NoHandlerForURL(channel.url)
-
- def _resolve_url(self, url):
- url = youtube.get_real_channel_url(url)
- url = vimeo.get_real_channel_url(url)
- return url
- def register(self, handler):
- self.custom_handlers.append(handler)
+def fetch_channel(channel, max_episodes):
+ for resolver in (registry.feed_handler, registry.fallback_feed_handler):
+ feed = resolver.resolve(channel, None, max_episodes)
+ if feed is not None:
+ return feed
+
+ raise NoHandlerForURL(channel.url)
-# The "register" method is exposed here for external usage
-fetcher = gPodderFetcher()
-register_custom_handler = fetcher.register
# Our podcast model:
#
@@ -154,11 +140,13 @@
"""
pass
+
class PodcastEpisode(PodcastModelObject):
"""holds data for one object in a channel"""
MAX_FILENAME_LENGTH = 200
- UPDATE_KEYS = ('title', 'url', 'description', 'link', 'published', 'guid', 'file_size', 'payment_url')
+ UPDATE_KEYS = ('title', 'url', 'description', 'link', 'published', 'guid', 'file_size',
+ 'payment_url')
__schema__ = EpisodeColumns
__slots__ = __schema__
@@ -231,8 +219,8 @@
return title
# "#001: Title" -> "001: Title"
- if (not self.parent._common_prefix and re.match('^#\d+: ',
- self.title) and len(self.title)-1 > LEFTOVER_MIN):
+ if (not self.parent._common_prefix and re.match('^#\d+: ', self.title) and
+ len(self.title)-1 > LEFTOVER_MIN):
return self.title[1:]
if (self.parent._common_prefix is not None and
@@ -246,7 +234,9 @@
task = download.DownloadTask(self)
task.add_progress_callback(progress_callback)
task.status = download.DownloadTask.QUEUED
- return task.run()
+ result = task.run()
+ task.recycle()
+ return result
def download_progress(self):
task = self.download_task
@@ -272,8 +262,8 @@
return False
return task.status in (download.DownloadTask.DOWNLOADING,
- download.DownloadTask.QUEUED,
- download.DownloadTask.PAUSED)
+ download.DownloadTask.QUEUED,
+ download.DownloadTask.PAUSED)
def save(self):
self.db.save_episode(self)
@@ -293,8 +283,7 @@
return self.is_new and self.state == gpodder.STATE_NORMAL
def age_in_days(self):
- return util.file_age_in_days(self.local_filename(create=False,
- check_only=True))
+ return util.file_age_in_days(self.local_filename(create=False, check_only=True))
def delete(self):
filename = self.local_filename(create=False, check_only=True)
@@ -305,7 +294,7 @@
self.is_new = False
self.save()
- def get_playback_url(self, fmt_ids=None, allow_partial=False):
+ def get_playback_url(self, allow_partial=False):
"""Local (or remote) playback/streaming filename/URL
Returns either the local filename or a streaming URL that
@@ -321,9 +310,7 @@
return url + '.partial'
if url is None or not os.path.exists(url):
- url = self.url
- url = youtube.get_real_download_url(url, fmt_ids)
- url = vimeo.get_real_download_url(url)
+ url = registry.download_url.resolve(self, self.url, self.parent.model.core.config)
return url
@@ -332,16 +319,15 @@
filename = filename.strip('.' + string.whitespace) + extension
# Existing download folder names must not be used
- existing_names = [episode.download_filename
- for episode in self.parent.episodes
- if episode is not self]
+ existing_names = [episode.download_filename for episode in self.parent.episodes
+ if episode is not self]
for name in util.generate_names(filename):
if name not in existing_names:
return name
- def local_filename(self, create, force_update=False, check_only=False,
- template=None, return_wanted_filename=False):
+ def local_filename(self, create, force_update=False, check_only=False, template=None,
+ return_wanted_filename=False):
"""Get (and possibly generate) the local saving filename
Pass create=True if you want this function to generate a
@@ -381,8 +367,7 @@
if not check_only and (force_update or not self.download_filename):
# Avoid and catch gPodder bug 1440 and similar situations
if template == '':
- logger.warn('Empty template. Report this podcast URL %s',
- self.channel.url)
+ logger.warn('Empty template. Report this podcast URL %s', self.channel.url)
template = None
# Try to find a new filename for the current file
@@ -405,21 +390,16 @@
episode_filename, _ = util.filename_from_url(resolved_url)
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
except Exception as e:
- logger.warn('Cannot resolve redirection for %s', self.url,
- exc_info=True)
+ logger.warn('Cannot resolve redirection for %s', self.url, exc_info=True)
- # Use title for YouTube, Vimeo and Soundcloud downloads
- if (youtube.is_video_link(self.url) or
- vimeo.is_video_link(self.url) or
- fn_template == 'stream'):
- sanitized = util.sanitize_filename(self.title, self.MAX_FILENAME_LENGTH)
- if sanitized:
- fn_template = sanitized
+ sanitized_title = util.sanitize_filename(self.title, self.MAX_FILENAME_LENGTH)
+ if fn_template == 'stream' and sanitized_title:
+ fn_template = sanitized_title
+ fn_template = registry.episode_basename.resolve(self, fn_template, sanitized_title)
# If the basename is empty, use the md5 hexdigest of the URL
if not fn_template or fn_template.startswith('redirect.'):
- logger.error('Report this feed: Podcast %s, episode %s',
- self.channel.url, self.url)
+ logger.error('Report this feed: Podcast %s, episode %s', self.channel.url, self.url)
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/opml.py
^
|
@@ -23,9 +23,9 @@
"""OPML import and export functionality
-This module contains helper classes to import subscriptions
-from OPML files on the web and to export a list of channel
-objects to valid OPML 1.1 files that can be used to backup
+This module contains helper classes to import subscriptions
+from OPML files on the web and to export a list of channel
+objects to valid OPML 1.1 files that can be used to backup
or distribute gPodder's channel subscriptions.
"""
@@ -47,7 +47,7 @@
class Importer(object):
"""
Helper class to import an OPML feed from protocols
- supported by urllib2 (e.g. HTTP) and return a GTK
+ supported by urllib2 (e.g. HTTP) and return a GTK
ListStore that can be displayed in the GUI.
This class should support standard OPML feeds and
@@ -56,9 +56,9 @@
VALID_TYPES = ('rss', 'link')
- def __init__( self, url):
+ def __init__(self, url):
"""
- Parses the OPML feed from the given URL into
+ Parses the OPML feed from the given URL into
a local data structure containing channel metadata.
"""
self.items = []
@@ -75,26 +75,30 @@
continue
if outline.getAttribute('xmlUrl') or outline.getAttribute('url'):
- channel = {
- 'url': outline.getAttribute('xmlUrl') or outline.getAttribute('url'),
- 'title': outline.getAttribute('title') or outline.getAttribute('text') or outline.getAttribute('xmlUrl') or outline.getAttribute('url'),
- 'description': outline.getAttribute('text') or outline.getAttribute('xmlUrl') or outline.getAttribute('url'),
- }
+ channel = {'url': (outline.getAttribute('xmlUrl') or
+ outline.getAttribute('url')),
+ 'title': (outline.getAttribute('title') or
+ outline.getAttribute('text') or
+ outline.getAttribute('xmlUrl') or
+ outline.getAttribute('url')),
+ 'description': (outline.getAttribute('text') or
+ outline.getAttribute('xmlUrl') or
+ outline.getAttribute('url')),
+ }
if channel['description'] == channel['title']:
channel['description'] = channel['url']
- for attr in ( 'url', 'title', 'description' ):
+ for attr in ('url', 'title', 'description'):
channel[attr] = channel[attr].strip()
- self.items.append( channel)
+ self.items.append(channel)
if not len(self.items):
logger.info('OPML import finished, but no items found: %s', url)
except:
logger.error('Cannot import OPML from URL: %s', url, exc_info=True)
-
class Exporter(object):
"""
Helper class to export a list of channel objects
@@ -105,45 +109,45 @@
FEED_TYPE = 'rss'
- def __init__( self, filename):
+ def __init__(self, filename):
if filename is None:
self.filename = None
- elif filename.endswith( '.opml') or filename.endswith( '.xml'):
+ elif filename.endswith('.opml') or filename.endswith('.xml'):
self.filename = filename
else:
- self.filename = '%s.opml' % ( filename, )
+ self.filename = '%s.opml' % (filename,)
- def create_node( self, doc, name, content):
+ def create_node(self, doc, name, content):
"""
- Creates a simple XML Element node in a document
- with tag name "name" and text content "content",
+ Creates a simple XML Element node in a document
+ with tag name "name" and text content "content",
as in <name>content</name> and returns the element.
"""
- node = doc.createElement( name)
- node.appendChild( doc.createTextNode( content))
+ node = doc.createElement(name)
+ node.appendChild(doc.createTextNode(content))
return node
- def create_outline( self, doc, channel):
+ def create_outline(self, doc, channel):
"""
Creates a OPML outline as XML Element node in a
document for the supplied channel.
"""
- outline = doc.createElement( 'outline')
- outline.setAttribute( 'title', channel.title)
- outline.setAttribute( 'text', channel.description)
- outline.setAttribute( 'xmlUrl', channel.url)
- outline.setAttribute( 'type', self.FEED_TYPE)
+ outline = doc.createElement('outline')
+ outline.setAttribute('title', channel.title)
+ outline.setAttribute('text', channel.description)
+ outline.setAttribute('xmlUrl', channel.url)
+ outline.setAttribute('type', self.FEED_TYPE)
return outline
- def write( self, channels):
+ def write(self, channels):
"""
- Creates a XML document containing metadata for each
- channel object in the "channels" parameter, which
+ Creates a XML document containing metadata for each
+ channel object in the "channels" parameter, which
should be a list of channel objects.
OPML 2.0 specification: http://www.opml.org/spec2
- Returns True on success or False when there was an
+ Returns True on success or False when there was an
error writing the file.
"""
if self.filename is None:
@@ -155,24 +159,22 @@
opml.setAttribute('version', '2.0')
doc.appendChild(opml)
- head = doc.createElement( 'head')
- head.appendChild( self.create_node( doc, 'title', 'gPodder subscriptions'))
- head.appendChild( self.create_node( doc, 'dateCreated', formatdate(localtime=True)))
- opml.appendChild( head)
+ head = doc.createElement('head')
+ head.appendChild(self.create_node(doc, 'title', 'gPodder subscriptions'))
+ head.appendChild(self.create_node(doc, 'dateCreated', formatdate(localtime=True)))
+ opml.appendChild(head)
- body = doc.createElement( 'body')
+ body = doc.createElement('body')
for channel in channels:
- body.appendChild( self.create_outline( doc, channel))
- opml.appendChild( body)
+ body.appendChild(self.create_outline(doc, channel))
+ opml.appendChild(body)
try:
with util.update_file_safely(self.filename) as temp_filename:
with open(temp_filename, 'w') as fp:
fp.write(doc.toprettyxml(indent=' ', newl=os.linesep))
except:
- logger.error('Could not open file for writing: %s',
- self.filename, exc_info=True)
+ logger.error('Could not open file for writing: %s', self.filename, exc_info=True)
return False
return True
-
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/plugins/itunes.py
^
|
@@ -0,0 +1,64 @@
+
+#
+# gpodder.plugins.itunes: Resolve iTunes feed URLs (based on a gist by Yepoleb, 2014-03-09)
+# Copyright (c) 2014, Thomas Perl <m@thp.io>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+
+
+import gpodder
+
+from gpodder import util
+from gpodder import registry
+
+import re
+import logging
+
+logger = logging.getLogger(__name__)
+
+ITUNES_DEFAULT_VERSION = '11.1.5'
+
+ITUNES_FEEDURL_RE = {'10.7': r'feed-url="([^"]+)"',
+ '11.1.5': r'"feedUrl":\s*"([^"]+)"'}
+
+
+class ITunesFeedException(BaseException):
+ pass
+
+
+@registry.feed_handler.register
+def itunes_feed_handler(channel, max_episodes):
+ m = re.match(r'https?://itunes.apple.com/(?:[^/]*/)?podcast/.+$', channel.url, re.I)
+ if m is None:
+ return None
+
+ logger.debug('Detected iTunes feed.')
+ version = ITUNES_DEFAULT_VERSION
+ headers = {'User-agent': 'iTunes/{}'.format(version)}
+ try:
+ data = util.urlopen(channel.url, headers).read().decode('utf-8')
+ m = re.search(ITUNES_FEEDURL_RE[version], data)
+ if m is None:
+ raise ITunesFeedException('Could not resolve real feed URL from iTunes feed.')
+
+ url = m.group(1)
+ logger.info('Resolved iTunes feed URL: {} -> {}'.format(channel.url, url))
+ channel.url = url
+
+ # Delegate further processing of the feed to the normal podcast parser
+ # by returning None (will try the next handler in the resolver chain)
+ return None
+ except Exception as ex:
+ logger.warn('Cannot resolve iTunes feed: {}'.format(str(ex)))
+ raise
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/plugins/podcast.py
^
|
@@ -18,21 +18,21 @@
import gpodder
-from gpodder import model
+from gpodder import registry
from gpodder import util
-# XXX: Avoid "cross-importing" of plugins
-from gpodder.plugins import youtube
-from gpodder.plugins import vimeo
-
import podcastparser
-import urllib.request, urllib.error, urllib.parse
+import urllib.request
+import urllib.error
+import urllib.parse
+import re
import logging
logger = logging.getLogger(__name__)
+
class PodcastParserFeed(object):
def __init__(self, channel, max_episodes):
url = channel.authenticate_url(channel.url)
@@ -91,13 +91,6 @@
def _pick_enclosure(self, episode_dict):
if not episode_dict['enclosures']:
del episode_dict['enclosures']
-
- # XXX: Move special cases to {youtube,vimeo}.py as subclass
- if (youtube.is_video_link(episode_dict['link']) or
- vimeo.is_video_link(episode_dict['link'])):
- episode_dict['url'] = episode_dict['link']
- return True
-
return False
# FIXME: Pick the right enclosure from multiple ones
@@ -127,7 +120,29 @@
return new_episodes, seen_guids
-@model.register_custom_handler
+
+class PodcastParserEnclosureFallbackFeed(PodcastParserFeed):
+ # Implement this in a subclass to determine a fallback enclosure
+ # for feeds that don't list their media files as enclosures
+ def _get_enclosure_url(self, episode_dict):
+ return None
+
+ def _pick_enclosure(self, episode_dict):
+ if not episode_dict['enclosures']:
+ url = self._get_enclosure_url(episode_dict)
+ if url is not None:
+ del episode_dict['enclosures']
+ episode_dict['url'] = url
+ return True
+
+ return super(PodcastParserEnclosureFallbackFeed, self)._pick_enclosure(episode_dict)
+
+
+@registry.fallback_feed_handler.register
def podcast_parser_handler(channel, max_episodes):
return PodcastParserFeed(channel, max_episodes)
+
+@registry.url_shortcut.register
+def podcast_resolve_url_shortcut():
+ return {'fb': 'http://feeds.feedburner.com/%s'}
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/plugins/soundcloud.py
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@
import gpodder
-from gpodder import model
+from gpodder import registry
from gpodder import util
import json
@@ -36,9 +36,11 @@
# gPodder's consumer key for the Soundcloud API
CONSUMER_KEY = 'zrweghtEtnZLpXf3mlm8mQ'
+
def fetch_json(url):
return json.loads(str(util.urlopen(url).read().decode('utf-8')))
+
def soundcloud_parsedate(s):
"""Parse a string into a unix timestamp
@@ -48,6 +50,7 @@
m = re.match(r'(\d{4})/(\d{2})/(\d{2}) (\d{2}):(\d{2}):(\d{2})', s)
return time.mktime(tuple([int(x) for x in m.groups()]+[0, 0, -1]))
+
def get_param(s, param='filename', header='content-disposition'):
"""Get a parameter from a string of headers
@@ -69,6 +72,7 @@
return None
+
def get_metadata(url):
"""Get file download metadata
@@ -80,7 +84,7 @@
headers = track_fp.info()
filesize = headers['content-length'] or '0'
filetype = headers['content-type'] or 'application/octet-stream'
- headers_s = '\n'.join('%s:%s'%(k,v) for k, v in list(headers.items()))
+ headers_s = '\n'.join('%s:%s' % (k, v) for k, v in list(headers.items()))
filename = get_param(headers_s) or os.path.basename(os.path.dirname(url))
track_fp.close()
return filesize, filetype, filename
@@ -93,7 +97,8 @@
def get_coverart(self):
global CONSUMER_KEY
- json_url = 'http://api.soundcloud.com/users/%s.json?consumer_key=%s' % (self.username, CONSUMER_KEY)
+ json_url = 'http://api.soundcloud.com/users/%s.json?consumer_key=%s' %\
+ (self.username, CONSUMER_KEY)
user_info = fetch_json(json_url)
image = user_info.get('avatar_url', None)
@@ -106,15 +111,15 @@
track it can find for its user."""
global CONSUMER_KEY
- json_url = 'http://api.soundcloud.com/users/%(user)s/%(feed)s.json?filter=downloadable&consumer_key=%(consumer_key)s' \
- % { "user":self.username, "feed":feed, "consumer_key": CONSUMER_KEY }
+ json_url = 'http://api.soundcloud.com/users/%(user)s/%(feed)s.json?' \
+ 'filter=downloadable&consumer_key=%(consumer_key)s' \
+ % {"user": self.username, "feed": feed, "consumer_key": CONSUMER_KEY}
tracks = (track for track in fetch_json(json_url) if track['downloadable'])
for track in tracks:
# Prefer stream URL (MP3), fallback to download URL
url = track.get('stream_url', track['download_url']) + \
- '?consumer_key=%(consumer_key)s' \
- % { 'consumer_key': CONSUMER_KEY }
+ '?consumer_key=%(consumer_key)s' % {'consumer_key': CONSUMER_KEY}
filesize, filetype, filename = get_metadata(url)
yield {
@@ -128,6 +133,7 @@
'published': soundcloud_parsedate(track.get('created_at', None)),
}
+
class SoundcloudFeed(object):
def __init__(self, username):
self.username = username
@@ -175,6 +181,7 @@
return new_episodes, seen_guids
+
class SoundcloudFavFeed(SoundcloudFeed):
def __init__(self, username):
super(SoundcloudFavFeed, self).__init__(username)
@@ -192,7 +199,7 @@
return self._get_new_episodes(channel, 'favorites')
-@model.register_custom_handler
+@registry.feed_handler.register
def soundcloud_feed_handler(channel, max_episodes):
m = re.match(r'http://([a-z]+\.)?soundcloud\.com/([^/]+)$', channel.url, re.I)
@@ -200,7 +207,8 @@
subdomain, username = m.groups()
return SoundcloudFeed(username)
-@model.register_custom_handler
+
+@registry.feed_handler.register
def soundcloud_fav_feed_handler(channel, max_episodes):
m = re.match(r'http://([a-z]+\.)?soundcloud\.com/([^/]+)/favorites', channel.url, re.I)
@@ -208,3 +216,8 @@
subdomain, username = m.groups()
return SoundcloudFavFeed(username)
+
+@registry.url_shortcut.register
+def soundcloud_resolve_url_shortcut():
+ return {'sc': 'http://soundcloud.com/%s',
+ 'scfav': 'http://soundcloud.com/%s/favorites'}
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/plugins/vimeo.py
^
|
@@ -19,46 +19,61 @@
import gpodder
from gpodder import util
+from gpodder import registry
+
+from gpodder.plugins import podcast
import logging
logger = logging.getLogger(__name__)
import re
+import json
VIMEOCOM_RE = re.compile(r'http://vimeo\.com/(\d+)$', re.IGNORECASE)
+VIMEOCHANNEL_RE = re.compile(r'http://vimeo\.com/([a-z0-9]+)[/]?$', re.IGNORECASE)
MOOGALOOP_RE = re.compile(r'http://vimeo\.com/moogaloop\.swf\?clip_id=(\d+)$', re.IGNORECASE)
SIGNATURE_RE = re.compile(r'"timestamp":(\d+),"signature":"([^"]+)"')
+DATA_CONFIG_RE = re.compile(r'data-config-url="([^"]+)"')
+
-class VimeoError(BaseException): pass
+class VimeoError(BaseException):
+ pass
-def get_real_download_url(url):
- quality = 'sd'
- codecs = 'H264,VP8,VP6'
+
+@registry.download_url.register
+def vimeo_resolve_download_url(episode, config):
+ url = episode.url
video_id = get_vimeo_id(url)
if video_id is None:
- return url
+ return None
web_url = 'http://vimeo.com/%s' % video_id
web_data = util.urlopen(web_url).read().decode('utf-8')
- sig_pair = SIGNATURE_RE.search(web_data)
+ data_config_frag = DATA_CONFIG_RE.search(web_data)
+
+ if data_config_frag is None:
+ raise VimeoError('Cannot get data config from Vimeo')
+
+ data_config_url = data_config_frag.group(1).replace('&', '&')
+
+ def get_urls(data_config_url):
+ data_config_data = util.urlopen(data_config_url).read().decode('utf-8')
+ data_config = json.loads(data_config_data)
+ for fileinfo in data_config['request']['files'].values():
+ if not isinstance(fileinfo, dict):
+ continue
- if sig_pair is None:
- raise VimeoError('Cannot get signature pair from Vimeo')
+ for fileformat, keys in fileinfo.items():
+ if not isinstance(keys, dict):
+ continue
+
+ yield (fileformat, keys['url'])
+
+ for quality, url in get_urls(data_config_url):
+ return url
- timestamp, signature = sig_pair.groups()
- params = '&'.join('%s=%s' % i for i in [
- ('clip_id', video_id),
- ('sig', signature),
- ('time', timestamp),
- ('quality', quality),
- ('codecs', codecs),
- ('type', 'moogaloop_local'),
- ('embed_location', ''),
- ])
- player_url = 'http://player.vimeo.com/play_redirect?%s' % params
- return player_url
def get_vimeo_id(url):
result = MOOGALOOP_RE.match(url)
@@ -71,16 +86,57 @@
return None
+
def is_video_link(url):
return (get_vimeo_id(url) is not None)
+
def get_real_channel_url(url):
- result = VIMEOCOM_RE.match(url)
+ result = VIMEOCHANNEL_RE.match(url)
if result is not None:
return 'http://vimeo.com/%s/videos/rss' % result.group(1)
- return url
+ return None
+
def get_real_cover(url):
return None
+
+class PodcastParserVimeoFeed(podcast.PodcastParserEnclosureFallbackFeed):
+ def _get_enclosure_url(self, episode_dict):
+ if is_video_link(episode_dict['link']):
+ return episode_dict['link']
+
+ return None
+
+
+@registry.feed_handler.register
+def vimeo_feed_handler(channel, max_episodes):
+ url = get_real_channel_url(channel.url)
+ if url is None:
+ return None
+
+ logger.info('Vimeo feed resolved: {} -> {}'.format(channel.url, url))
+ channel.url = url
+
+ return PodcastParserVimeoFeed(channel, max_episodes)
+
+
+@registry.episode_basename.register
+def vimeo_resolve_episode_basename(episode, sanitized):
+ if sanitized and is_video_link(episode.url):
+ return sanitized
+
+
+@registry.podcast_title.register
+def vimeo_resolve_podcast_title(podcast, new_title):
+ VIMEO_PREFIX = 'Vimeo / '
+ if new_title.startswith(VIMEO_PREFIX):
+ return new_title[len(VIMEO_PREFIX):] + ' on Vimeo'
+
+
+@registry.content_type.register
+def vimeo_resolve_content_type(episode):
+ if is_video_link(episode.url):
+ return 'video'
|
[-]
[+]
|
Deleted |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/plugins/xspf.py
^
|
@@ -1,168 +0,0 @@
-#
-# gpodder.plugins.xspf: XSPF playlist parser module for gPodder (2010-08-07)
-# Copyright (c) 2010-2013, Thomas Perl <m@thp.io>
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-
-
-# Currently, this is restricted to FM4 On Demand content, as the XSPF parser
-# here isn't generic enough to parse all other feeds reliably. Please get in
-# touch if you want support for other feeds - you can use the existing parser
-# as a template for your own! :)
-#
-# See http://fm4.orf.at/radio/stories/audio for available feeds
-
-
-import gpodder
-
-from gpodder import model
-from gpodder import util
-
-import podcastparser
-
-import os
-import time
-
-import re
-
-from xml.dom import minidom
-
-
-def get_metadata(url):
- """Get file download metadata
-
- Returns a (size, type, name) from the given download
- URL. Will use the network connection to determine the
- metadata via the HTTP header fields.
- """
- track_fp = util.urlopen(url)
- headers = track_fp.info()
- filesize = headers['content-length'] or '0'
- filetype = headers['content-type'] or 'application/octet-stream'
-
- if 'last-modified' in headers:
- filedate = podcastparser.parse_date(headers['last-modified'])
- else:
- filedate = None
-
- filename = os.path.basename(os.path.dirname(url))
- track_fp.close()
- return filesize, filetype, filedate, filename
-
-
-class FM4OnDemandPlaylist(object):
- CONTENT = {
- 'spezialmusik': (
- 'FM4 Sendungen',
- 'http://onapp1.orf.at/webcam/fm4/fod/SOD_Bild_Spezialmusik.jpg',
- 'http://fm4.orf.at/',
- 'Sendungen jeweils sieben Tage zum Nachhören.',
- ),
- 'unlimited': (
- 'FM4 Unlimited',
- 'http://onapp1.orf.at/webcam/fm4/fod/SOD_Bild_Unlimited.jpg',
- 'http://fm4.orf.at/unlimited',
- 'Montag bis Freitag (14-15 Uhr)',
- ),
- 'soundpark': (
- 'FM4 Soundpark',
- 'http://onapp1.orf.at/webcam/fm4/fod/SOD_Bild_Soundpark.jpg',
- 'http://fm4.orf.at/soundpark',
- 'Nacht von Sonntag auf Montag (1-6 Uhr)',
- ),
- }
-
- @classmethod
- def get_text_contents(cls, node):
- if hasattr(node, '__iter__'):
- return ''.join(cls.get_text_contents(x) for x in node)
- elif node.nodeType == node.TEXT_NODE:
- return node.data
- else:
- return ''.join(cls.get_text_contents(c) for c in node.childNodes)
-
- def __init__(self, url, category):
- self.url = url
- self.category = category
- # TODO: Use proper caching of contents with support for
- # conditional GETs (If-Modified-Since, ETag, ...)
- self.data = minidom.parse(util.urlopen(url))
- self.playlist = self.data.getElementsByTagName('playlist')[0]
-
- def was_updated(self):
- return True
-
- def get_etag(self, default):
- return default
-
- def get_modified(self, default):
- return default
-
- def get_title(self):
- title = self.playlist.getElementsByTagName('title')[0]
- default = self.get_text_contents(title)
- return self.CONTENT.get(self.category, \
- (default, None, None, None))[0]
-
- def get_image(self):
- return self.CONTENT.get(self.category, \
- (None, None, None, None))[1]
-
- def get_link(self):
- return self.CONTENT.get(self.category, \
- (None, None, 'http://fm4.orf.at/', None))[2]
-
- def get_description(self):
- return self.CONTENT.get(self.category, \
- (None, None, None, 'XSPF playlist'))[3]
-
- def get_payment_url(self):
- return None
-
- def get_new_episodes(self, channel):
- tracks = []
- existing_guids = [episode.guid for episode in channel.children]
- seen_guids = []
-
- for track in self.playlist.getElementsByTagName('track'):
- title = self.get_text_contents(track.getElementsByTagName('title'))
- url = self.get_text_contents(track.getElementsByTagName('location'))
- seen_guids.append(url)
- if url in existing_guids:
- continue
-
- filesize, filetype, filedate, filename = get_metadata(url)
- episode = channel.episode_factory({
- 'title': title,
- 'link': '',
- 'description': '',
- 'url': url,
- 'file_size': int(filesize),
- 'mime_type': filetype,
- 'guid': url,
- 'published': filedate,
- })
- episode.save()
- tracks.append(episode)
-
- return tracks, seen_guids
-
-@model.register_custom_handler
-def fm4_on_demand_playlist_handler(channel, max_episodes):
- m = re.match(r'http://onapp1\.orf\.at/webcam/fm4/fod/([^/]+)\.xspf$', channel.url)
-
- if m is not None:
- category = m.group(1)
- return FM4OnDemandPlaylist(channel.url, category)
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/plugins/youtube.py
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,10 +19,11 @@
# Justin Forest <justin.forest@gmail.com> 2008-10-13
#
-
import gpodder
from gpodder import util
+from gpodder import registry
+from gpodder.plugins import podcast
import os.path
@@ -35,7 +36,9 @@
import json
import re
-import urllib.request, urllib.parse, urllib.error
+import urllib.request
+import urllib.parse
+import urllib.error
try:
# Python >= 2.6
@@ -51,33 +54,39 @@
# Fallback to an MP4 version of same quality.
# Try 34 (FLV 360p H.264 AAC) if 18 (MP4 360p) fails.
# Fallback to 6 or 5 (FLV Sorenson H.263 MP3) if all fails.
- (46, ([46, 37, 45, 22, 44, 35, 43, 18, 6, 34, 5], '45/1280x720/99/0/0', 'WebM 1080p (1920x1080)')), # N/A, 192 kbps
- (45, ([45, 22, 44, 35, 43, 18, 6, 34, 5], '45/1280x720/99/0/0', 'WebM 720p (1280x720)')), # 2.0 Mbps, 192 kbps
- (44, ([44, 35, 43, 18, 6, 34, 5], '44/854x480/99/0/0', 'WebM 480p (854x480)')), # 1.0 Mbps, 128 kbps
- (43, ([43, 18, 6, 34, 5], '43/640x360/99/0/0', 'WebM 360p (640x360)')), # 0.5 Mbps, 128 kbps
+ (46, ([46, 37, 45, 22, 44, 35, 43, 18, 6, 34, 5], '45/1280x720/99/0/0',
+ 'WebM 1080p (1920x1080)')),
+ (45, ([45, 22, 44, 35, 43, 18, 6, 34, 5], '45/1280x720/99/0/0',
+ 'WebM 720p (1280x720)')),
+ (44, ([44, 35, 43, 18, 6, 34, 5], '44/854x480/99/0/0',
+ 'WebM 480p (854x480)')),
+ (43, ([43, 18, 6, 34, 5], '43/640x360/99/0/0',
+ 'WebM 360p (640x360)')),
# MP4 H.264 video, AAC audio
# Try 35 (FLV 480p H.264 AAC) between 720p and 360p because there's no MP4 480p.
# Try 34 (FLV 360p H.264 AAC) if 18 (MP4 360p) fails.
# Fallback to 6 or 5 (FLV Sorenson H.263 MP3) if all fails.
- (38, ([38, 37, 22, 35, 18, 34, 6, 5], '38/1920x1080/9/0/115', 'MP4 4K 3072p (4096x3072)')), # 5.0 - 3.5 Mbps, 192 kbps
- (37, ([37, 22, 35, 18, 34, 6, 5], '37/1920x1080/9/0/115', 'MP4 HD 1080p (1920x1080)')), # 4.3 - 3.0 Mbps, 192 kbps
- (22, ([22, 35, 18, 34, 6, 5], '22/1280x720/9/0/115', 'MP4 HD 720p (1280x720)')), # 2.9 - 2.0 Mbps, 192 kbps
- (18, ([18, 34, 6, 5], '18/640x360/9/0/115', 'MP4 360p (640x360)')), # 0.5 Mbps, 96 kbps
+ (38, ([38, 37, 22, 35, 18, 34, 6, 5], '38/1920x1080/9/0/115', 'MP4 4K 3072p (4096x3072)')),
+ (37, ([37, 22, 35, 18, 34, 6, 5], '37/1920x1080/9/0/115', 'MP4 HD 1080p (1920x1080)')),
+ (22, ([22, 35, 18, 34, 6, 5], '22/1280x720/9/0/115', 'MP4 HD 720p (1280x720)')),
+ (18, ([18, 34, 6, 5], '18/640x360/9/0/115', 'MP4 360p (640x360)')),
# FLV H.264 video, AAC audio
# Does not check for 360p MP4.
# Fallback to 6 or 5 (FLV Sorenson H.263 MP3) if all fails.
- (35, ([35, 34, 6, 5], '35/854x480/9/0/115', 'FLV 480p (854x480)')), # 1 - 0.80 Mbps, 128 kbps
- (34, ([34, 6, 5], '34/640x360/9/0/115', 'FLV 360p (640x360)')), # 0.50 Mbps, 128 kbps
+ (35, ([35, 34, 6, 5], '35/854x480/9/0/115', 'FLV 480p (854x480)')), # 1 - 0.80 Mbps, 128 kbps
+ (34, ([34, 6, 5], '34/640x360/9/0/115', 'FLV 360p (640x360)')), # 0.50 Mbps, 128 kbps
# FLV Sorenson H.263 video, MP3 audio
- (6, ([6, 5], '5/480x270/7/0/0', 'FLV 270p (480x270)')), # 0.80 Mbps, 64 kbps
- (5, ([5], '5/320x240/7/0/0', 'FLV 240p (320x240)')), # 0.25 Mbps, 64 kbps
+ (6, ([6, 5], '5/480x270/7/0/0', 'FLV 270p (480x270)')), # 0.80 Mbps, 64 kbps
+ (5, ([5], '5/320x240/7/0/0', 'FLV 240p (320x240)')), # 0.25 Mbps, 64 kbps
]
formats_dict = dict(formats)
-class YouTubeError(Exception): pass
+
+class YouTubeError(Exception):
+ pass
def get_fmt_ids(youtube_config):
@@ -91,69 +100,74 @@
return fmt_ids
-def get_real_download_url(url, preferred_fmt_ids=None):
+
+@registry.download_url.register
+def youtube_resolve_download_url(episode, config):
+ url = episode.url
+ preferred_fmt_ids = get_fmt_ids(config.plugins.youtube)
+
if not preferred_fmt_ids:
- preferred_fmt_ids, _, _ = formats_dict[22] # MP4 720p
+ preferred_fmt_ids, _, _ = formats_dict[22] # MP4 720p
vid = get_youtube_id(url)
- if vid is not None:
- page = None
- url = 'http://www.youtube.com/get_video_info?&el=detailpage&video_id=' + vid
-
- while page is None:
- req = util.http_request(url, method='GET')
- if 'location' in req.msg:
- url = req.msg['location']
- else:
- page = req.read().decode('utf-8')
+ if vid is None:
+ return None
+
+ page = None
+ url = 'http://www.youtube.com/get_video_info?&el=detailpage&video_id=' + vid
+
+ while page is None:
+ req = util.http_request(url, method='GET')
+ if 'location' in req.msg:
+ url = req.msg['location']
+ else:
+ page = req.read().decode('utf-8')
- # Try to find the best video format available for this video
- # (http://forum.videohelp.com/topic336882-1800.html#1912972)
- def find_urls(page):
- r4 = re.search('.*&url_encoded_fmt_stream_map=([^&]+)&.*', page)
- if r4 is not None:
- fmt_url_map = urllib.parse.unquote(r4.group(1))
- for fmt_url_encoded in fmt_url_map.split(','):
- video_info = parse_qs(fmt_url_encoded)
- yield int(video_info['itag'][0]), video_info['url'][0] + "&signature=" + video_info['sig'][0]
+ # Try to find the best video format available for this video
+ # (http://forum.videohelp.com/topic336882-1800.html#1912972)
+ def find_urls(page):
+ r4 = re.search('.*&url_encoded_fmt_stream_map=([^&]+)&.*', page)
+ if r4 is not None:
+ fmt_url_map = urllib.parse.unquote(r4.group(1))
+ for fmt_url_encoded in fmt_url_map.split(','):
+ video_info = parse_qs(fmt_url_encoded)
+ yield (int(video_info['itag'][0]), video_info['url'][0])
+ else:
+ error_info = parse_qs(page)
+ error_message = util.remove_html_tags(error_info['reason'][0])
+ raise YouTubeError('Cannot download video: %s' % error_message)
+
+ fmt_id_url_map = sorted(find_urls(page), reverse=True)
+
+ if not fmt_id_url_map:
+ raise YouTubeError('fmt_url_map not found for video ID "%s"' % vid)
+
+ # Default to the highest fmt_id if we don't find a match below
+ _, url = fmt_id_url_map[0]
+
+ formats_available = set(fmt_id for fmt_id, url in fmt_id_url_map)
+ fmt_id_url_map = dict(fmt_id_url_map)
+
+ for id in preferred_fmt_ids:
+ id = int(id)
+ if id in formats_available:
+ format = formats_dict.get(id)
+ if format is not None:
+ _, _, description = format
else:
- error_info = parse_qs(page)
- error_message = util.remove_html_tags(error_info['reason'][0])
- raise YouTubeError('Cannot download video: %s' % error_message)
-
- fmt_id_url_map = sorted(find_urls(page), reverse=True)
-
- if not fmt_id_url_map:
- raise YouTubeError('fmt_url_map not found for video ID "%s"' % vid)
-
- # Default to the highest fmt_id if we don't find a match below
- _, url = fmt_id_url_map[0]
-
- formats_available = set(fmt_id for fmt_id, url in fmt_id_url_map)
- fmt_id_url_map = dict(fmt_id_url_map)
-
- for id in preferred_fmt_ids:
- id = int(id)
- if id in formats_available:
- format = formats_dict.get(id)
- if format is not None:
- _, _, description = format
- else:
- description = 'Unknown'
-
- logger.info('Found YouTube format: %s (fmt_id=%d)',
- description, id)
- url = fmt_id_url_map[id]
- break
+ description = 'Unknown'
+
+ logger.info('Found YouTube format: %s (fmt_id=%d)', description, id)
+ return fmt_id_url_map[id]
- return url
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/query.py
^
|
@@ -21,6 +21,7 @@
import re
import datetime
+
class Matcher(object):
"""Match implementation for EQL
@@ -69,7 +70,8 @@
elif k == 'description':
return episode.description
elif k == 'since':
- return (datetime.datetime.now() - datetime.datetime.fromtimestamp(episode.published)).days
+ return (datetime.datetime.now() -
+ datetime.datetime.fromtimestamp(episode.published)).days
elif k == 'age':
return episode.age_in_days()
elif k in ('minutes', 'min'):
@@ -130,7 +132,6 @@
print(e)
self._query = None
-
def match(self, episode):
if self._query is None:
return False
@@ -161,5 +162,3 @@
return EQL("'%s'" % query)
else:
return EQL(query)
-
-
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/registry.py
^
|
@@ -0,0 +1,75 @@
+#
+# gpodder.registry - Central hub for exchanging plugin resolvers (2014-03-09)
+# Copyright (c) 2014, Thomas Perl <m@thp.io>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class Resolver(object):
+ def __init__(self, name, description):
+ self._name = name
+ self._description = description
+ self._resolvers = []
+
+ def resolve(self, item, default, *args):
+ for resolver in self._resolvers:
+ result = resolver(item, *args)
+ if result is not None:
+ logger.info('{} resolved by {}: {} -> {}'.format(self._name, self._info(resolver),
+ default, result))
+ return result
+
+ return default
+
+ def each(self, *args):
+ for resolver in self._resolvers:
+ result = resolver(*args)
+ if result is not None:
+ yield result
+
+ def register(self, func):
+ logger.debug('Registering {} resolver: {}'.format(self._name, func))
+ self._resolvers.append(func)
+ return func
+
+ def _info(self, resolver):
+ return '%s from %s' % (resolver.__name__, resolver.__module__)
+
+ def _dump(self, indent=''):
+ print('== {} ({}) =='.format(self._name, self._description))
+ print('\n'.join('%s- %s' % (indent, self._info(resolver)) for resolver in self._resolvers))
+ print()
+
+RESOLVER_NAMES = {'cover_art': 'Resolve the real cover art URL of an episode',
+ 'download_url': 'Resolve the real download URL of an episode',
+ 'episode_basename': 'Resolve a good, unique download filename for an episode',
+ 'podcast_title': 'Resolve a good title for a podcast',
+ 'content_type': 'Resolve the content type (audio, video) of an episode',
+ 'feed_handler': 'Handle parsing of a feed',
+ 'fallback_feed_handler': 'Handle parsing of a feed (catch-all)',
+ 'url_shortcut': 'Expand shortcuts when adding a new URL'}
+
+LOCALS = locals()
+
+for name, description in RESOLVER_NAMES.items():
+ LOCALS[name] = Resolver(name, description)
+
+
+def dump(module_dict=LOCALS):
+ for name in RESOLVER_NAMES:
+ module_dict[name]._dump(' ')
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/storage.py
^
|
@@ -23,6 +23,7 @@
from gpodder import util
+
class Database:
def __init__(self, filename):
self.filename = filename + '.jsondb'
@@ -91,4 +92,3 @@
with gzip.open(filename, 'wb') as fp:
data = bytes(json.dumps(self._data, separators=(',', ':')), 'utf-8')
fp.write(data)
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/gpodder/util.py
^
|
@@ -1,6 +1,6 @@
#
# gPodder: Media and podcast aggregator
-# Copyright (c) 2005-2013 Thomas Perl and the gPodder Team
+# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
# Copyright (c) 2011 Neal H. Walfield
#
# gPodder is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
"""Miscellaneous helper functions for gPodder
-This module provides helper and utility functions for gPodder that
+This module provides helper and utility functions for gPodder that
are not tied to any specific part of gPodder.
"""
@@ -46,6 +46,7 @@
import datetime
import threading
import tempfile
+import platform
import urllib.parse
import urllib.request
@@ -115,17 +116,17 @@
_MIME_TYPES_EXT = dict(_MIME_TYPE_LIST)
-def make_directory( path):
+def make_directory(path):
"""
Tries to create a directory if it does not exist already.
- Returns True if the directory exists after the function
+ Returns True if the directory exists after the function
call, False otherwise.
"""
- if os.path.isdir( path):
+ if os.path.isdir(path):
return True
try:
- os.makedirs( path)
+ os.makedirs(path)
except:
logger.warn('Could not create directory: %s', path)
return False
@@ -135,7 +136,7 @@
def normalize_feed_url(url):
"""
- Converts any URL to http:// or ftp:// so that it can be
+ Converts any URL to http:// or ftp:// so that it can be
used with "wget". If the URL cannot be converted (invalid
or unknown scheme), "None" is returned.
@@ -150,12 +151,6 @@
>>> normalize_feed_url('curry.com')
'http://curry.com/'
- There are even some more shortcuts for advanced users
- and lazy typists (see the source for details).
-
- >>> normalize_feed_url('fb:43FPodcast')
- 'http://feeds.feedburner.com/43FPodcast'
-
It will also take care of converting the domain name to
all-lowercase (because domains are not case sensitive):
@@ -171,24 +166,6 @@
if not url or len(url) < 8:
return None
- # This is a list of prefixes that you can use to minimize the amount of
- # keystrokes that you have to use.
- # Feel free to suggest other useful prefixes, and I'll add them here.
- PREFIXES = {
- 'fb:': 'http://feeds.feedburner.com/%s',
- 'yt:': 'http://www.youtube.com/rss/user/%s/videos.rss',
- 'sc:': 'http://soundcloud.com/%s',
- 'fm4od:': 'http://onapp1.orf.at/webcam/fm4/fod/%s.xspf',
- # YouTube playlists. To get a list of playlists per-user, use:
- # https://gdata.youtube.com/feeds/api/users/<username>/playlists
- 'ytpl:': 'http://gdata.youtube.com/feeds/api/playlists/%s',
- }
-
- for prefix, expansion in PREFIXES.items():
- if url.startswith(prefix):
- url = expansion % (url[len(prefix):],)
- break
-
# Assume HTTP for URLs without scheme
if not '://' in url:
url = 'http://' + url
@@ -202,8 +179,8 @@
if path == '':
path = '/'
- # feed://, itpc:// and itms:// are really http://
- if scheme in ('feed', 'itpc', 'itms'):
+ # feed://, itpc://, itms:// and itmss:// are really http://
+ if scheme in ('feed', 'itpc', 'itms', 'itmss'):
scheme = 'http'
if scheme not in ('http', 'https', 'ftp', 'file'):
@@ -282,24 +259,25 @@
return (username, password)
-def calculate_size( path):
+
+def calculate_size(path):
"""
- Tries to calculate the size of a directory, including any
- subdirectories found. The returned value might not be
- correct if the user doesn't have appropriate permissions
+ Tries to calculate the size of a directory, including any
+ subdirectories found. The returned value might not be
+ correct if the user doesn't have appropriate permissions
to list all subdirectories of the given path.
"""
if path is None:
return 0
- if os.path.dirname( path) == '/':
+ if os.path.dirname(path) == '/':
return 0
- if os.path.isfile( path):
- return os.path.getsize( path)
+ if os.path.isfile(path):
+ return os.path.getsize(path)
- if os.path.isdir( path) and not os.path.islink( path):
- sum = os.path.getsize( path)
+ if os.path.isdir(path) and not os.path.islink(path):
+ sum = os.path.getsize(path)
try:
for item in os.listdir(path):
@@ -367,7 +345,7 @@
return None
try:
- diff = int( (time.time() - timestamp)/seconds_in_a_day )
+ diff = int((time.time() - timestamp)/seconds_in_a_day)
except:
logger.warn('Cannot convert "%s" to date.', timestamp, exc_info=True)
return None
@@ -400,7 +378,7 @@
def remove_html_tags(html):
"""
Remove HTML tags from a string and replace numeric and
- named entities with the corresponding character, so the
+ named entities with the corresponding character, so the
HTML text can be displayed in a simple text view.
"""
if html is None:
@@ -414,7 +392,7 @@
re_listing_tags = re.compile('<li[^>]*>', re.I)
result = html
-
+
# Convert common HTML elements to their text equivalent
result = re_newline_tags.sub('\n', result)
result = re_listing_tags.sub('\n * ', result)
@@ -427,8 +405,8 @@
result = re_unicode_entities.sub(lambda x: chr(int(x.group(1))), result)
# Convert named HTML entities to their unicode character
- result = re_html_entities.sub(lambda x: entitydefs.get(x.group(1),''), result)
-
+ result = re_html_entities.sub(lambda x: entitydefs.get(x.group(1), ''), result)
+
# Convert more than two newlines to two newlines
result = re.sub('([\r\n]{2})([\r\n])+', '\\1', result)
@@ -507,10 +485,10 @@
from a URL, e.g. http://server.com/file.MP3?download=yes
will result in the string ("file", ".mp3") being returned.
- This function will also try to best-guess the "real"
+ This function will also try to best-guess the "real"
extension for a media file (audio, video) by
trying to match an extension to these types and recurse
- into the query string to find better matches, if the
+ into the query string to find better matches, if the
original extension does not resolve to a known type.
http://my.net/redirect.php?my.net/file.ogg => ("file", ".ogg")
@@ -518,13 +496,12 @@
http://s/redirect.mp4?http://serv2/test.mp4 => ("test", ".mp4")
"""
(scheme, netloc, path, para, query, fragid) = urllib.parse.urlparse(url)
- (filename, extension) = os.path.splitext(os.path.basename( urllib.parse.unquote(path)))
+ (filename, extension) = os.path.splitext(os.path.basename(urllib.parse.unquote(path)))
- if file_type_by_extension(extension) is not None and not \
- query.startswith(scheme+'://'):
|
[-]
[+]
|
Deleted |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/src/jsonconfig.py
^
|
@@ -1,220 +0,0 @@
-#
-# jsonconfig - JSON-based configuration backend (2012-01-18)
-# Copyright (c) 2012, 2013, Thomas Perl <m@thp.io>
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-
-import copy
-from functools import reduce
-
-import json
-
-
-class JsonConfigSubtree(object):
- def __init__(self, parent, name):
- self._parent = parent
- self._name = name
-
- def __repr__(self):
- return '<Subtree %r of JsonConfig>' % (self._name,)
-
- def _attr(self, name):
- return '.'.join((self._name, name))
-
- def __getitem__(self, name):
- return self._parent._lookup(self._name).__getitem__(name)
-
- def __delitem__(self, name):
- self._parent._lookup(self._name).__delitem__(name)
-
- def __setitem__(self, name, value):
- self._parent._lookup(self._name).__setitem__(name, value)
-
- def __getattr__(self, name):
- if name == 'keys':
- # Kludge for using dict() on a JsonConfigSubtree
- return getattr(self._parent._lookup(self._name), name)
-
- return getattr(self._parent, self._attr(name))
-
- def __setattr__(self, name, value):
- if name.startswith('_'):
- object.__setattr__(self, name, value)
- else:
- self._parent.__setattr__(self._attr(name), value)
-
-
-class JsonConfig(object):
- _INDENT = 2
-
- def __init__(self, data=None, default=None, on_key_changed=None):
- """
- Create a new JsonConfig object
-
- data: A JSON string that contains the data to load (optional)
- default: A dict that contains default config values (optional)
- on_key_changed: Callback when a value changes (optional)
-
- The signature of on_key_changed looks like this:
-
- func(name, old_value, new_value)
-
- name: The key name, e.g. "ui.gtk.show_toolbar"
- old_value: The old value, e.g. False
- new_value: The new value, e.g. True
-
- For newly-set keys, on_key_changed is also called. In this case,
- None will be the old_value:
-
- >>> def callback(*args): print('callback:', args)
- >>> c = JsonConfig(on_key_changed=callback)
- >>> c.a.b = 10
- callback: ('a.b', None, 10)
- >>> c.a.b = 11
- callback: ('a.b', 10, 11)
- >>> c.x.y.z = [1,2,3]
- callback: ('x.y.z', None, [1, 2, 3])
- >>> c.x.y.z = 42
- callback: ('x.y.z', [1, 2, 3], 42)
-
- Please note that dict-style access will not call on_key_changed:
-
- >>> def callback(*args): print('callback:', args)
- >>> c = JsonConfig(on_key_changed=callback)
- >>> c.a.b = 1 # This works as expected
- callback: ('a.b', None, 1)
- >>> c.a['c'] = 10 # This doesn't call on_key_changed!
- >>> del c.a['c'] # This also doesn't call on_key_changed!
- """
- self._default = default
- self._data = copy.deepcopy(self._default) or {}
- self._on_key_changed = on_key_changed
- if data is not None:
- self._restore(data)
-
- def _restore(self, backup):
- """
- Restore a previous state saved with repr()
-
- This function allows you to "snapshot" the current values of
- the configuration and reload them later on. Any missing
- default values will be added on top of the restored config.
-
- Returns True if new keys from the default config have been added,
- False if no keys have been added (backup contains all default keys)
-
- >>> c = JsonConfig()
- >>> c.a.b = 10
- >>> backup = repr(c)
- >>> print(c.a.b)
- 10
- >>> c.a.b = 11
- >>> print(c.a.b)
- 11
- >>> c._restore(backup)
- False
- >>> print(c.a.b)
- 10
- """
- self._data = json.loads(backup)
- # Add newly-added default configuration options
- if self._default is not None:
- return self._merge_keys(self._default)
-
- return False
-
- def _merge_keys(self, merge_source):
- """Merge keys from merge_source into this config object
-
- Return True if new keys were merged, False otherwise
- """
- added_new_key = False
- # Recurse into the data and add missing items
- work_queue = [(self._data, merge_source)]
- while work_queue:
- data, default = work_queue.pop()
- for key, value in default.items():
- if key not in data:
- # Copy defaults for missing key
- data[key] = copy.deepcopy(value)
- added_new_key = True
- elif isinstance(value, dict):
- # Recurse into sub-dictionaries
- work_queue.append((data[key], value))
- elif type(value) != type(data[key]):
- # Type mismatch of current value and default
- if type(value) == int and type(data[key]) == float:
- # Convert float to int if default value is int
- data[key] = int(data[key])
-
- return added_new_key
-
- def __repr__(self):
- """
- >>> c = JsonConfig('{"a": 1}')
- >>> print(c)
- {
- "a": 1
- }
- """
- return json.dumps(self._data, indent=self._INDENT)
-
- def _lookup(self, name):
- return reduce(lambda d, k: d[k], name.split('.'), self._data)
-
- def _keys_iter(self):
- work_queue = []
- work_queue.append(([], self._data))
- while work_queue:
- path, data = work_queue.pop(0)
-
- if isinstance(data, dict):
- for key in sorted(data.keys()):
- work_queue.append((path + [key], data[key]))
- else:
- yield '.'.join(path)
-
- def __getattr__(self, name):
- try:
- value = self._lookup(name)
- if not isinstance(value, dict):
- return value
- except KeyError:
- pass
-
- return JsonConfigSubtree(self, name)
-
- def __setattr__(self, name, value):
- if name.startswith('_'):
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-core/test/test_gpodder/model.py
^
|
@@ -22,6 +22,7 @@
from gpodder import model
+
class TestEpisodePublishedProperties(unittest.TestCase):
PUBLISHED_UNIXTIME = 1360666744
PUBLISHED_SORT = '2013-02-12'
@@ -34,6 +35,7 @@
def test_sortdate(self):
self.assertEqual(self.episode.sortdate, self.PUBLISHED_SORT)
+
class TestSectionFromContentType(unittest.TestCase):
def setUp(self):
self.podcast = model.PodcastChannel(None)
@@ -54,4 +56,3 @@
def test_more_video_than_audio(self):
self.podcast.children = [self.audio_episode, self.video_episode, self.video_episode]
self.assertEqual(self.podcast._get_content_type(), 'video')
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/README
^
|
@@ -1,7 +1,9 @@
gPodder 4 QML UI Reference Implementation
-----------------------------------------
-Nothing (much) to see here (yet), move along.
+This is the reference implementation of the QML UI for gPodder 4. It contains a
+(work in progress) Desktop UI and a minimal Touch UI for mobile devices. The
+Desktop UI requires Qt Quick Controls, included in Qt 5.2 and newer.
Usage:
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/common/GPodderCore.qml
^
|
@@ -28,6 +28,10 @@
property string progname: 'gpodder'
property bool ready: false
property bool refreshing: false
+
+ property string coreversion
+ property string uiversion
+
signal downloadProgress(int episode_id, real progress)
signal playbackProgress(int episode_id, real progress)
signal podcastListChanged()
@@ -39,8 +43,11 @@
Component.onCompleted: {
setHandler('hello', function (coreversion, uiversion) {
- console.log('gPodder Core ' + coreversion);
- console.log('gPodder QML UI ' + uiversion);
+ py.coreversion = coreversion;
+ py.uiversion = uiversion;
+
+ console.log('gPodder Core ' + py.coreversion);
+ console.log('gPodder QML UI ' + py.uiversion);
console.log('PyOtherSide ' + py.pluginVersion());
console.log('Python ' + py.pythonVersion());
});
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/main.py
^
|
@@ -129,8 +129,9 @@
}
def _get_podcasts_sorted(self):
+ sort_key = self.core.model.podcast_sort_key
return sorted(self.core.model.get_podcasts(),
- key=lambda podcast: (podcast.section, podcast.title))
+ key=lambda podcast: (podcast.section, sort_key(podcast)))
def load_podcasts(self):
podcasts = self._get_podcasts_sorted()
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/makefile
^
|
@@ -0,0 +1,21 @@
+PROJECT := gpodder-ui-qml
+VERSION := 4.0.0
+
+all:
+ @echo ""
+ @echo " make release ..... Build source release"
+ @echo ""
+
+release: dist/$(PROJECT)-$(VERSION).tar.gz
+
+dist/$(PROJECT)-$(VERSION).tar.gz:
+ mkdir -p dist
+ git archive --format=tar --prefix=$(PROJECT)-$(VERSION)/ $(VERSION) | gzip >$@
+
+clean:
+ find . -name '__pycache__' -exec rm {} +
+
+distclean: clean
+ rm -rf dist
+
+.PHONY: all release clean
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/AboutPage.qml
^
|
@@ -0,0 +1,83 @@
+
+/**
+ *
+ * gPodder QML UI Reference Implementation
+ * Copyright (c) 2013, Thomas Perl <m@thp.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+import QtQuick 2.0
+
+SlidePage {
+ id: aboutPage
+
+ Flickable {
+ id: flickable
+ anchors.fill: parent
+ boundsBehavior: Flickable.StopAtBounds
+
+ contentWidth: detailColumn.width
+ contentHeight: detailColumn.height + detailColumn.spacing
+
+ Column {
+ id: detailColumn
+
+ width: aboutPage.width
+ spacing: 5 * pgst.scalef
+
+ SlidePageHeader {
+ title: 'About gPodder'
+ }
+
+ SectionHeader {
+ text: 'How to use'
+ width: parent.width
+ }
+
+ PLabel {
+ width: parent.width * .9
+ font.pixelSize: 30 * pgst.scalef
+ anchors.horizontalCenter: parent.horizontalCenter
+ wrapMode: Text.WordWrap
+ text: 'Swipe left on a page to reveal the menu for that page. Go back by swiping pages to the right.\n\nAdd subscriptions via their feed URL or use gpodder.net to search for podcasts.'
+ }
+
+ SectionHeader {
+ text: 'More information'
+ width: parent.width
+ }
+
+ PLabel {
+ width: parent.width * .9
+ font.pixelSize: 20 * pgst.scalef
+ anchors.horizontalCenter: parent.horizontalCenter
+ wrapMode: Text.WordWrap
+ text: [
+ '© 2005-2014 Thomas Perl and the gPodder Team',
+ 'License: ISC / GPLv3 or later',
+ 'Website: http://gpodder.org/',
+ '',
+ 'gPodder Core ' + py.coreversion,
+ 'gPodder QML UI ' + py.uiversion,
+ 'PyOtherSide ' + py.pluginVersion(),
+ 'Python ' + py.pythonVersion()
+ ].join('\n')
+ }
+ }
+ }
+
+ PScrollDecorator { flickable: flickable }
+}
+
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/EpisodesPage.qml
^
|
@@ -81,6 +81,11 @@
title: episodesPage.title
model: GPodderEpisodeListModel { id: episodeListModel }
+ PPlaceholder {
+ text: 'No episodes'
+ visible: episodeList.count === 0
+ }
+
delegate: EpisodeItem { }
}
}
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/PPlaceholder.qml
^
|
@@ -0,0 +1,30 @@
+
+/**
+ *
+ * gPodder QML UI Reference Implementation
+ * Copyright (c) 2014, Thomas Perl <m@thp.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+import QtQuick 2.0
+
+import 'common/constants.js' as Constants
+
+
+PLabel {
+ anchors.centerIn: parent
+ font.pixelSize: 40 * pgst.scalef
+ color: Constants.colors.text
+}
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/PodcastsPage.qml
^
|
@@ -67,6 +67,11 @@
section.property: 'section'
section.delegate: SectionHeader { text: section }
+ PPlaceholder {
+ text: 'No podcasts'
+ visible: podcastList.count === 0
+ }
+
model: podcastListModel
delegate: PodcastItem {
|
[-]
[+]
|
Deleted |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/Settings.qml
^
|
@@ -1,31 +0,0 @@
-
-/**
- *
- * gPodder QML UI Reference Implementation
- * Copyright (c) 2013, Thomas Perl <m@thp.io>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- *
- */
-
-import QtQuick 2.0
-
-SlidePage {
- SlidePageHeader { title: 'Settings' }
-
- PLabel {
- anchors.centerIn: parent
- text: 'TODO'
- }
-}
-
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/SlidePage.qml
^
|
@@ -49,8 +49,6 @@
Dragging {
id: dragging
stacking: stacking
-
- onPulled: console.log('have pulled it!')
}
Rectangle {
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/StartPage.qml
^
|
@@ -27,6 +27,19 @@
SlidePage {
id: startPage
canClose: false
+ hasPull: true
+
+ PullMenu {
+ PullMenuItem {
+ text: 'Now Playing'
+ color: Constants.colors.playback
+ icon: Icons.play
+ onClicked: {
+ pgst.loadPage('PlayerPage.qml');
+ startPage.unPull();
+ }
+ }
+ }
function update_stats() {
py.call('main.get_stats', [], function (result) {
@@ -120,7 +133,7 @@
}
StartPageButton {
- id: freshEpisodesPage
+ id: freshEpisodes
enabled: freshEpisodesRepeater.count > 0
title: py.refreshing ? 'Refreshing feeds' : 'Fresh episodes'
@@ -182,67 +195,25 @@
}
}
- ButtonArea {
- onClicked: pgst.loadPage('PlayerPage.qml');
-
- anchors {
- left: recommendationsPane.left
- right: recommendationsPane.right
- }
-
- height: 100 * pgst.scalef
-
- PLabel {
- anchors.centerIn: parent
- text: 'Now playing'
- }
- }
-
- ButtonArea {
- onClicked: pgst.loadPage('Settings.qml');
-
- anchors {
- left: recommendationsPane.left
- right: recommendationsPane.right
+ Repeater {
+ model: ListModel {
+ ListElement { caption: 'gpodder.net'; target: 'Directory.qml' }
+ ListElement { caption: 'Help'; target: 'AboutPage.qml' }
}
- height: 100 * pgst.scalef
+ delegate: ButtonArea {
+ onClicked: pgst.loadPage(target)
- PLabel {
- anchors.centerIn: parent
- text: 'Settings'
- }
- }
-
- StartPageButton {
- id: recommendationsPane
-
- title: 'gpodder.net'
- onClicked: pgst.loadPage('Directory.qml');
-
- Row {
anchors {
- horizontalCenter: parent.horizontalCenter
- bottom: parent.bottom
- margins: 40 * pgst.scalef
+ left: freshEpisodes.left
+ right: freshEpisodes.right
}
- spacing: 20 * pgst.scalef
-
- Connections {
- target: py
- onReadyChanged: {
- if (py.ready) {
- py.call('main.load_podcasts', [], function (podcasts) {
- recommendationsRepeater.model = podcasts.splice(0, 4);
- });
- }
- }
- }
+ height: 80 * pgst.scalef
- Repeater {
- id: recommendationsRepeater
- Image { source: modelData.coverart; sourceSize { width: 80 * pgst.scalef; height: 80 * pgst.scalef } }
+ PLabel {
+ anchors.centerIn: parent
+ text: caption
}
}
}
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/gpodder-ui-qml/touch/Subscribe.qml
^
|
@@ -60,19 +60,6 @@
}
}
- ButtonArea {
- id: directory
- width: input.width
- height: input.height
-
- PLabel {
- anchors.centerIn: parent
- text: 'gpodder.net'
- }
-
- onClicked: pgst.loadPage('Directory.qml');
- }
-
PBusyIndicator {
id: loading
visible: false
|
[-]
[+]
|
Added |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/qml/AboutPage.qml
^
|
@@ -0,0 +1,88 @@
+
+/**
+ *
+ * gPodder QML UI Reference Implementation
+ * Copyright (c) 2014, Thomas Perl <m@thp.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+import QtQuick 2.0
+import Sailfish.Silica 1.0
+
+Page {
+ id: aboutPage
+
+ onStatusChanged: pgst.handlePageStatusChange(status)
+
+ SilicaFlickable {
+ id: flickable
+ anchors.fill: parent
+
+ VerticalScrollDecorator { flickable: flickable }
+
+ contentWidth: aboutColumn.width
+ contentHeight: aboutColumn.height + aboutColumn.spacing
+
+ Column {
+ id: aboutColumn
+
+ width: aboutPage.width
+ spacing: Theme.paddingMedium
+
+
+ PageHeader {
+ title: 'About gPodder'
+ }
+
+ Column {
+ spacing: Theme.paddingLarge
+
+ anchors {
+ left: parent.left
+ right: parent.right
+ margins: Theme.paddingLarge
+ }
+
+ Column {
+ Label {
+ text: 'gPodder ' + py.uiversion
+ color: Theme.highlightColor
+ }
+
+ Label {
+ text: 'http://gpodder.org/'
+ font.pixelSize: Theme.fontSizeSmall
+ color: Theme.secondaryColor
+ }
+ }
+
+ Label {
+ width: parent.width
+ wrapMode: Text.WordWrap
+ text: [
+ '© 2005-2014 Thomas Perl and the gPodder Team',
+ 'License: ISC / GPLv3 or later',
+ 'Website: http://gpodder.org/',
+ '',
+ 'gPodder Core ' + py.coreversion,
+ 'gPodder QML UI ' + py.uiversion,
+ 'PyOtherSide ' + py.pluginVersion(),
+ 'Python ' + py.pythonVersion()
+ ].join('\n')
+ }
+ }
+ }
+ }
+}
|
[-]
[+]
|
Changed |
_service:tar_git:harbour-org.gpodder.sailfish-4.0.0.tar.gz/qml/PodcastsPage.qml
^
|
@@ -37,6 +37,11 @@
busy: py.refreshing
MenuItem {
+ text: 'About'
+ onClicked: pgst.loadPage('AboutPage.qml');
+ }
+
+ MenuItem {
text: 'Fresh episodes'
onClicked: pgst.loadPage('FreshEpisodes.qml');
}
|